summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn89
-rw-r--r--chromium/base/DEPS11
-rw-r--r--chromium/base/allocator/allocator_interception_mac.mm2
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc12
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.cc153
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.h84
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc169
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc1
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h23
-rw-r--r--chromium/base/android/jni_generator/BUILD.gn2
-rw-r--r--chromium/base/android/linker/BUILD.gn3
-rw-r--r--chromium/base/base_switches.cc89
-rw-r--r--chromium/base/base_switches.h1
-rw-r--r--chromium/base/bind.h148
-rw-r--r--chromium/base/bind_helpers.h2
-rw-r--r--chromium/base/bind_unittest.cc61
-rw-r--r--chromium/base/bind_unittest.nc73
-rw-r--r--chromium/base/build_time.cc4
-rw-r--r--chromium/base/build_time_unittest.cc6
-rw-r--r--chromium/base/callback.h12
-rw-r--r--chromium/base/callback_helpers_unittest.cc17
-rw-r--r--chromium/base/callback_internal.h8
-rw-r--r--chromium/base/callback_unittest.cc8
-rw-r--r--chromium/base/callback_unittest.nc25
-rw-r--r--chromium/base/cancelable_callback.h11
-rw-r--r--chromium/base/cancelable_callback_unittest.cc48
-rw-r--r--chromium/base/command_line.cc54
-rw-r--r--chromium/base/command_line.h7
-rw-r--r--chromium/base/command_line_unittest.cc85
-rw-r--r--chromium/base/cpu.cc6
-rw-r--r--chromium/base/cpu.h2
-rw-r--r--chromium/base/cpu_unittest.cc4
-rw-r--r--chromium/base/debug/activity_analyzer.cc9
-rw-r--r--chromium/base/debug/activity_analyzer_unittest.cc32
-rw-r--r--chromium/base/debug/activity_tracker.cc20
-rw-r--r--chromium/base/debug/debugger_posix.cc5
-rw-r--r--chromium/base/debug/debugger_win.cc3
-rw-r--r--chromium/base/debug/elf_reader_unittest.cc6
-rw-r--r--chromium/base/debug/stack_trace.h7
-rw-r--r--chromium/base/debug/stack_trace_posix.cc10
-rw-r--r--chromium/base/debug/task_trace.cc37
-rw-r--r--chromium/base/feature_list.cc37
-rw-r--r--chromium/base/feature_list.h7
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.cc5
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.h9
-rw-r--r--chromium/base/files/file_path.cc15
-rw-r--r--chromium/base/files/file_path_unittest.cc11
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc21
-rw-r--r--chromium/base/files/file_proxy_unittest.cc5
-rw-r--r--chromium/base/files/file_util.h10
-rw-r--r--chromium/base/files/file_util_posix.cc8
-rw-r--r--chromium/base/files/file_util_unittest.cc67
-rw-r--r--chromium/base/files/file_util_win.cc84
-rw-r--r--chromium/base/files/important_file_writer.cc3
-rw-r--r--chromium/base/files/important_file_writer.h2
-rw-r--r--chromium/base/fuchsia/default_context.cc24
-rw-r--r--chromium/base/fuchsia/default_context.h25
-rw-r--r--chromium/base/fuchsia/filtered_service_directory.cc49
-rw-r--r--chromium/base/fuchsia/filtered_service_directory.h26
-rw-r--r--chromium/base/fuchsia/filtered_service_directory_unittest.cc25
-rw-r--r--chromium/base/fuchsia/scoped_service_binding.h106
-rw-r--r--chromium/base/fuchsia/scoped_service_binding_unittest.cc110
-rw-r--r--chromium/base/fuchsia/service_directory.cc130
-rw-r--r--chromium/base/fuchsia/service_directory.h60
-rw-r--r--chromium/base/fuchsia/service_directory_test_base.cc54
-rw-r--r--chromium/base/fuchsia/service_directory_test_base.h18
-rw-r--r--chromium/base/fuchsia/service_directory_unittest.cc118
-rw-r--r--chromium/base/fuchsia/service_provider_impl.cc12
-rw-r--r--chromium/base/fuchsia/service_provider_impl.h9
-rw-r--r--chromium/base/fuchsia/service_provider_impl_unittest.cc42
-rw-r--r--chromium/base/fuchsia/startup_context.cc98
-rw-r--r--chromium/base/fuchsia/startup_context.h35
-rw-r--r--chromium/base/hash/sha1_boringssl.cc28
-rw-r--r--chromium/base/hash/sha1_perftest.cc60
-rw-r--r--chromium/base/i18n/streaming_utf8_validator_unittest.cc2
-rw-r--r--chromium/base/i18n/time_formatting.h4
-rw-r--r--chromium/base/immediate_crash.h178
-rw-r--r--chromium/base/immediate_crash_unittest.cc40
-rw-r--r--chromium/base/ios/crb_protocol_observers.mm2
-rw-r--r--chromium/base/ios/ios_util.h3
-rw-r--r--chromium/base/ios/ios_util.mm5
-rw-r--r--chromium/base/location.cc19
-rw-r--r--chromium/base/location.h24
-rw-r--r--chromium/base/location_unittest.cc39
-rw-r--r--chromium/base/logging.cc39
-rw-r--r--chromium/base/logging.h10
-rw-r--r--chromium/base/logging_unittest.cc197
-rw-r--r--chromium/base/mac/foundation_util.h3
-rw-r--r--chromium/base/mac/mac_util.h82
-rw-r--r--chromium/base/mac/mac_util.mm16
-rw-r--r--chromium/base/mac/mac_util_unittest.mm190
-rw-r--r--chromium/base/mac/mach_port_broker.h110
-rw-r--r--chromium/base/mac/mach_port_broker.mm206
-rw-r--r--chromium/base/mac/mach_port_broker_unittest.cc133
-rw-r--r--chromium/base/mac/mach_port_rendezvous.cc14
-rw-r--r--chromium/base/mac/mach_port_rendezvous.h9
-rw-r--r--chromium/base/mac/mach_port_rendezvous_unittest.cc22
-rw-r--r--chromium/base/mac/mach_port_util.cc136
-rw-r--r--chromium/base/mac/mach_port_util.h48
-rw-r--r--chromium/base/mac/objc_release_properties_unittest.mm2
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc14
-rw-r--r--chromium/base/memory/fake_memory_pressure_monitor.cc2
-rw-r--r--chromium/base/memory/fake_memory_pressure_monitor.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.cc478
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.h145
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc292
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_notifying_chromeos.cc312
-rw-r--r--chromium/base/memory/memory_pressure_monitor_notifying_chromeos.h130
-rw-r--r--chromium/base/memory/memory_pressure_monitor_notifying_chromeos_unittest.cc234
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.h2
-rw-r--r--chromium/base/memory/platform_shared_memory_region.h44
-rw-r--r--chromium/base/memory/platform_shared_memory_region_android.cc21
-rw-r--r--chromium/base/memory/platform_shared_memory_region_fuchsia.cc3
-rw-r--r--chromium/base/memory/platform_shared_memory_region_posix.cc57
-rw-r--r--chromium/base/memory/read_only_shared_memory_region.h2
-rw-r--r--chromium/base/memory/shared_memory.h45
-rw-r--r--chromium/base/memory/shared_memory_android.cc16
-rw-r--r--chromium/base/memory/shared_memory_fuchsia.cc10
-rw-r--r--chromium/base/memory/shared_memory_helper.cc4
-rw-r--r--chromium/base/memory/shared_memory_mac.cc5
-rw-r--r--chromium/base/memory/shared_memory_mapping.cc12
-rw-r--r--chromium/base/memory/shared_memory_mapping.h14
-rw-r--r--chromium/base/memory/shared_memory_nacl.cc8
-rw-r--r--chromium/base/memory/shared_memory_posix.cc152
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc250
-rw-r--r--chromium/base/memory/shared_memory_win.cc117
-rw-r--r--chromium/base/memory/weak_ptr.cc2
-rw-r--r--chromium/base/memory/weak_ptr.h13
-rw-r--r--chromium/base/memory/weak_ptr_unittest.cc4
-rw-r--r--chromium/base/message_loop/message_loop_current.cc5
-rw-r--r--chromium/base/message_loop/message_loop_current.h5
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc89
-rw-r--r--chromium/base/message_loop/message_pump.cc8
-rw-r--r--chromium/base/message_loop/message_pump.h6
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.cc18
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc24
-rw-r--r--chromium/base/message_loop/message_pump_unittest.cc34
-rw-r--r--chromium/base/metrics/field_trial.cc117
-rw-r--r--chromium/base/metrics/field_trial.h45
-rw-r--r--chromium/base/metrics/field_trial_param_associator.cc4
-rw-r--r--chromium/base/metrics/field_trial_params.cc89
-rw-r--r--chromium/base/metrics/field_trial_params.h10
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc8
-rw-r--r--chromium/base/metrics/histogram_base.cc14
-rw-r--r--chromium/base/metrics/histogram_functions.cc22
-rw-r--r--chromium/base/metrics/histogram_functions.h82
-rw-r--r--chromium/base/metrics/histogram_macros.h19
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager_unittest.cc5
-rw-r--r--chromium/base/metrics/persistent_sample_map.cc2
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc4
-rw-r--r--chromium/base/metrics/ukm_source_id.h15
-rw-r--r--chromium/base/numerics/clamped_math.h2
-rw-r--r--chromium/base/numerics/safe_conversions.h7
-rw-r--r--chromium/base/numerics/safe_math_shared_impl.h13
-rw-r--r--chromium/base/observer_list_threadsafe.h2
-rw-r--r--chromium/base/observer_list_threadsafe_unittest.cc15
-rw-r--r--chromium/base/observer_list_types.cc2
-rw-r--r--chromium/base/observer_list_types.h2
-rw-r--r--chromium/base/one_shot_event.cc1
-rw-r--r--chromium/base/one_shot_event_unittest.cc4
-rw-r--r--chromium/base/optional.h22
-rw-r--r--chromium/base/optional_unittest.cc1
-rw-r--r--chromium/base/optional_unittest.nc101
-rw-r--r--chromium/base/pickle_fuzzer.cc4
-rw-r--r--chromium/base/power_monitor/power_monitor.cc63
-rw-r--r--chromium/base/power_monitor/power_monitor.h56
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.cc5
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.h2
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_android.cc2
-rw-r--r--chromium/base/power_monitor/power_monitor_source.cc18
-rw-r--r--chromium/base/power_monitor/power_monitor_source.h8
-rw-r--r--chromium/base/power_monitor/power_monitor_unittest.cc15
-rw-r--r--chromium/base/process/kill.cc2
-rw-r--r--chromium/base/process/launch_win.cc5
-rw-r--r--chromium/base/process/process_fuchsia.cc3
-rw-r--r--chromium/base/process/process_metrics.h34
-rw-r--r--chromium/base/process/process_metrics_fuchsia.cc6
-rw-r--r--chromium/base/process/process_metrics_linux.cc60
-rw-r--r--chromium/base/process/process_metrics_mac.cc48
-rw-r--r--chromium/base/process/process_metrics_posix.cc9
-rw-r--r--chromium/base/process/process_metrics_win.cc12
-rw-r--r--chromium/base/process/process_posix.cc5
-rw-r--r--chromium/base/process/process_win.cc17
-rw-r--r--chromium/base/profiler/metadata_recorder.cc145
-rw-r--r--chromium/base/profiler/metadata_recorder.h205
-rw-r--r--chromium/base/profiler/metadata_recorder_unittest.cc117
-rw-r--r--chromium/base/profiler/profile_builder.cc7
-rw-r--r--chromium/base/profiler/profile_builder.h20
-rw-r--r--chromium/base/profiler/sample_metadata_unittest.cc24
-rw-r--r--chromium/base/profiler/stack_sampler_impl.cc9
-rw-r--r--chromium/base/profiler/stack_sampler_impl_unittest.cc8
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_test_util.cc2
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc6
-rw-r--r--chromium/base/run_loop.cc134
-rw-r--r--chromium/base/run_loop.h44
-rw-r--r--chromium/base/run_loop_unittest.cc5
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc1
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h22
-rw-r--r--chromium/base/sampling_heap_profiler/poisson_allocation_sampler.cc9
-rw-r--r--chromium/base/sampling_heap_profiler/poisson_allocation_sampler.h14
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc17
-rw-r--r--chromium/base/sampling_heap_profiler/sampling_heap_profiler.h7
-rw-r--r--chromium/base/scoped_generic_unittest.cc24
-rw-r--r--chromium/base/scoped_observer.h4
-rw-r--r--chromium/base/sequence_token.cc34
-rw-r--r--chromium/base/stl_util.h301
-rw-r--r--chromium/base/stl_util_unittest.cc175
-rw-r--r--chromium/base/strings/string_number_conversions_fuzzer.cc51
-rw-r--r--chromium/base/synchronization/cancellation_flag.h20
-rw-r--r--chromium/base/syslog_logging.cc20
-rw-r--r--chromium/base/system/sys_info.cc18
-rw-r--r--chromium/base/system/sys_info_fuchsia.cc65
-rw-r--r--chromium/base/system/sys_info_posix.cc21
-rw-r--r--chromium/base/system/sys_info_unittest.cc3
-rw-r--r--chromium/base/task/OWNERS1
-rw-r--r--chromium/base/task/README.md1
-rw-r--r--chromium/base/task/common/task_annotator_unittest.cc6
-rw-r--r--chromium/base/task/lazy_task_runner.cc8
-rw-r--r--chromium/base/task/lazy_task_runner.h10
-rw-r--r--chromium/base/task/lazy_task_runner_unittest.cc14
-rw-r--r--chromium/base/task/post_task.cc117
-rw-r--r--chromium/base/task/post_task.h124
-rw-r--r--chromium/base/task/post_task_unittest.cc96
-rw-r--r--chromium/base/task/promise/abstract_promise.cc583
-rw-r--r--chromium/base/task/promise/abstract_promise.h592
-rw-r--r--chromium/base/task/promise/abstract_promise_unittest.cc671
-rw-r--r--chromium/base/task/promise/all_container_executor.h92
-rw-r--r--chromium/base/task/promise/all_tuple_executor.h21
-rw-r--r--chromium/base/task/promise/dependent_list.cc310
-rw-r--r--chromium/base/task/promise/dependent_list.h263
-rw-r--r--chromium/base/task/promise/dependent_list_unittest.cc139
-rw-r--r--chromium/base/task/promise/finally_executor.cc3
-rw-r--r--chromium/base/task/promise/finally_executor.h26
-rw-r--r--chromium/base/task/promise/helpers.cc64
-rw-r--r--chromium/base/task/promise/helpers.h154
-rw-r--r--chromium/base/task/promise/helpers_unittest.cc122
-rw-r--r--chromium/base/task/promise/no_op_promise_executor.cc24
-rw-r--r--chromium/base/task/promise/no_op_promise_executor.h8
-rw-r--r--chromium/base/task/promise/post_task_executor.h95
-rw-r--r--chromium/base/task/promise/post_task_executor_unittest.cc68
-rw-r--r--chromium/base/task/promise/promise.h295
-rw-r--r--chromium/base/task/promise/promise_executor.cc50
-rw-r--r--chromium/base/task/promise/promise_executor.h222
-rw-r--r--chromium/base/task/promise/promise_unittest.cc275
-rw-r--r--chromium/base/task/promise/promise_unittest.nc3
-rw-r--r--chromium/base/task/promise/then_and_catch_executor.cc12
-rw-r--r--chromium/base/task/promise/then_and_catch_executor.h71
-rw-r--r--chromium/base/task/sequence_manager/OWNERS1
-rw-r--r--chromium/base/task/sequence_manager/enqueue_order.h31
-rw-r--r--chromium/base/task/sequence_manager/enqueue_order_generator.cc (renamed from chromium/base/task/sequence_manager/enqueue_order.cc)7
-rw-r--r--chromium/base/task/sequence_manager/enqueue_order_generator.h43
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager.cc10
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager.h41
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.cc96
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.h26
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc501
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_perftest.cc4
-rw-r--r--chromium/base/task/sequence_manager/sequenced_task_source.h3
-rw-r--r--chromium/base/task/sequence_manager/task_queue.cc33
-rw-r--r--chromium/base/task/sequence_manager/task_queue.h19
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.cc315
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.h64
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.cc11
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.h16
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector_unittest.cc110
-rw-r--r--chromium/base/task/sequence_manager/tasks.cc4
-rw-r--r--chromium/base/task/sequence_manager/tasks.h10
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_impl.cc6
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_impl.h2
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc15
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc195
-rw-r--r--chromium/base/task/sequence_manager/time_domain.h8
-rw-r--r--chromium/base/task/sequence_manager/work_queue_unittest.cc4
-rw-r--r--chromium/base/task/single_thread_task_executor.cc21
-rw-r--r--chromium/base/task/task_executor.h20
-rw-r--r--chromium/base/task/task_features.cc3
-rw-r--r--chromium/base/task/task_features.h4
-rw-r--r--chromium/base/task/task_traits.h89
-rw-r--r--chromium/base/task/task_traits_extension.h2
-rw-r--r--chromium/base/task/task_traits_extension_unittest.cc8
-rw-r--r--chromium/base/task/task_traits_unittest.cc66
-rw-r--r--chromium/base/task/thread_pool/delayed_task_manager.cc20
-rw-r--r--chromium/base/task/thread_pool/delayed_task_manager.h34
-rw-r--r--chromium/base/task/thread_pool/delayed_task_manager_unittest.cc29
-rw-r--r--chromium/base/task/thread_pool/job_task_source.cc90
-rw-r--r--chromium/base/task/thread_pool/job_task_source.h69
-rw-r--r--chromium/base/task/thread_pool/job_task_source_unittest.cc146
-rw-r--r--chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc35
-rw-r--r--chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.h6
-rw-r--r--chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc188
-rw-r--r--chromium/base/task/thread_pool/pooled_task_runner_delegate.h7
-rw-r--r--chromium/base/task/thread_pool/priority_queue.cc27
-rw-r--r--chromium/base/task/thread_pool/priority_queue.h12
-rw-r--r--chromium/base/task/thread_pool/priority_queue_unittest.cc61
-rw-r--r--chromium/base/task/thread_pool/sequence.cc32
-rw-r--r--chromium/base/task/thread_pool/sequence.h7
-rw-r--r--chromium/base/task/thread_pool/sequence_unittest.cc92
-rw-r--r--chromium/base/task/thread_pool/service_thread.cc18
-rw-r--r--chromium/base/task/thread_pool/task.cc10
-rw-r--r--chromium/base/task/thread_pool/task_source.cc52
-rw-r--r--chromium/base/task/thread_pool/task_source.h203
-rw-r--r--chromium/base/task/thread_pool/task_tracker.cc90
-rw-r--r--chromium/base/task/thread_pool/task_tracker.h47
-rw-r--r--chromium/base/task/thread_pool/task_tracker_posix_unittest.cc6
-rw-r--r--chromium/base/task/thread_pool/task_tracker_unittest.cc231
-rw-r--r--chromium/base/task/thread_pool/test_task_factory.cc1
-rw-r--r--chromium/base/task/thread_pool/test_utils.cc134
-rw-r--r--chromium/base/task/thread_pool/test_utils.h38
-rw-r--r--chromium/base/task/thread_pool/thread_group.cc122
-rw-r--r--chromium/base/task/thread_pool/thread_group.h79
-rw-r--r--chromium/base/task/thread_pool/thread_group_impl.cc192
-rw-r--r--chromium/base/task/thread_pool/thread_group_impl.h25
-rw-r--r--chromium/base/task/thread_pool/thread_group_impl_unittest.cc238
-rw-r--r--chromium/base/task/thread_pool/thread_group_native.cc63
-rw-r--r--chromium/base/task/thread_pool/thread_group_native.h11
-rw-r--r--chromium/base/task/thread_pool/thread_group_unittest.cc333
-rw-r--r--chromium/base/task/thread_pool/thread_pool.h2
-rw-r--r--chromium/base/task/thread_pool/thread_pool_clock.cc35
-rw-r--r--chromium/base/task/thread_pool/thread_pool_clock.h40
-rw-r--r--chromium/base/task/thread_pool/thread_pool_impl.cc92
-rw-r--r--chromium/base/task/thread_pool/thread_pool_impl.h42
-rw-r--r--chromium/base/task/thread_pool/thread_pool_impl_unittest.cc335
-rw-r--r--chromium/base/task/thread_pool/thread_pool_perftest.cc6
-rw-r--r--chromium/base/task/thread_pool/worker_thread.cc13
-rw-r--r--chromium/base/task/thread_pool/worker_thread.h4
-rw-r--r--chromium/base/task/thread_pool/worker_thread_stack.cc2
-rw-r--r--chromium/base/task/thread_pool/worker_thread_stack_unittest.cc6
-rw-r--r--chromium/base/task/thread_pool/worker_thread_unittest.cc80
-rw-r--r--chromium/base/task_runner.cc41
-rw-r--r--chromium/base/template_util.h6
-rw-r--r--chromium/base/template_util_unittest.cc13
-rw-r--r--chromium/base/test/BUILD.gn4
-rw-r--r--chromium/base/test/clang_coverage.h7
-rw-r--r--chromium/base/threading/platform_thread_android.cc2
-rw-r--r--chromium/base/threading/platform_thread_win.cc9
-rw-r--r--chromium/base/threading/platform_thread_win_unittest.cc4
-rw-r--r--chromium/base/threading/scoped_thread_priority.cc48
-rw-r--r--chromium/base/threading/scoped_thread_priority.h43
-rw-r--r--chromium/base/threading/scoped_thread_priority_unittest.cc84
-rw-r--r--chromium/base/threading/sequence_bound.h6
-rw-r--r--chromium/base/threading/sequence_bound_unittest.cc21
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle_unittest.cc13
-rw-r--r--chromium/base/threading/thread.h2
-rw-r--r--chromium/base/threading/thread_id_name_manager.cc19
-rw-r--r--chromium/base/threading/thread_id_name_manager.h26
-rw-r--r--chromium/base/threading/thread_local_storage_perftest.cc200
-rw-r--r--chromium/base/threading/thread_restrictions.cc19
-rw-r--r--chromium/base/threading/thread_restrictions.h27
-rw-r--r--chromium/base/time/default_clock.cc6
-rw-r--r--chromium/base/time/time.h11
-rw-r--r--chromium/base/time/time_exploded_posix.cc74
-rw-r--r--chromium/base/time/time_fuchsia.cc53
-rw-r--r--chromium/base/time/time_unittest.cc22
-rw-r--r--chromium/base/time/time_win.cc37
-rw-r--r--chromium/base/timer/hi_res_timer_manager_unittest.cc61
-rw-r--r--chromium/base/timer/hi_res_timer_manager_win.cc9
-rw-r--r--chromium/base/timer/lap_timer_unittest.cc9
-rw-r--r--chromium/base/timer/timer.h11
-rw-r--r--chromium/base/timer/timer_unittest.cc6
-rw-r--r--chromium/base/trace_event/blame_context.cc3
-rw-r--r--chromium/base/trace_event/blame_context.h2
-rw-r--r--chromium/base/trace_event/builtin_categories.h3
-rw-r--r--chromium/base/trace_event/common/trace_event_common.h49
-rw-r--r--chromium/base/trace_event/cpufreq_monitor_android.cc5
-rw-r--r--chromium/base/trace_event/event_name_filter_unittest.cc5
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc5
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc3
-rw-r--r--chromium/base/trace_event/memory_allocator_dump_unittest.cc5
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc2
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc1
-rw-r--r--chromium/base/trace_event/trace_event.h58
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc17
-rw-r--r--chromium/base/trace_event/trace_event_impl.h15
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc9
-rw-r--r--chromium/base/trace_event/trace_log.cc81
-rw-r--r--chromium/base/trace_event/trace_log.h11
-rw-r--r--chromium/base/util/type_safety/BUILD.gn18
-rw-r--r--chromium/base/util/type_safety/OWNERS2
-rw-r--r--chromium/base/util/type_safety/pass_key.h48
-rw-r--r--chromium/base/util/type_safety/pass_key_unittest.cc46
-rw-r--r--chromium/base/util/type_safety/pass_key_unittest.nc73
-rw-r--r--chromium/base/util/type_safety/strong_alias.h7
-rw-r--r--chromium/base/util/type_safety/strong_alias_unittest.cc10
-rw-r--r--chromium/base/values.cc2
-rw-r--r--chromium/base/values.h18
-rw-r--r--chromium/base/win/async_operation.h80
-rw-r--r--chromium/base/win/com_init_check_hook.cc4
-rw-r--r--chromium/base/win/enum_variant.cc53
-rw-r--r--chromium/base/win/enum_variant.h15
-rw-r--r--chromium/base/win/enum_variant_unittest.cc20
-rw-r--r--chromium/base/win/hstring_compare.cc40
-rw-r--r--chromium/base/win/hstring_compare.h28
-rw-r--r--chromium/base/win/hstring_compare_unittest.cc74
-rw-r--r--chromium/base/win/i18n.cc159
-rw-r--r--chromium/base/win/i18n_unittest.cc8
-rw-r--r--chromium/base/win/registry.cc25
-rw-r--r--chromium/base/win/registry.h7
-rw-r--r--chromium/base/win/scoped_variant.cc11
-rw-r--r--chromium/base/win/scoped_variant.h6
-rw-r--r--chromium/base/win/scoped_variant_unittest.cc13
-rw-r--r--chromium/base/win/startup_information.cc72
-rw-r--r--chromium/base/win/startup_information.h3
-rw-r--r--chromium/base/win/vector.h69
-rw-r--r--chromium/base/win/win_util.cc33
-rw-r--r--chromium/base/win/win_util.h3
-rw-r--r--chromium/base/win/win_util_unittest.cc2
-rw-r--r--chromium/base/win/windows_version.cc26
-rw-r--r--chromium/base/win/windowsx_shim.h1
-rw-r--r--chromium/base/win/winrt_foundation_helpers.h151
-rw-r--r--chromium/base/win/wmi.cc6
414 files changed, 13525 insertions, 7804 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 610e69eae24..1351225a7d5 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -68,7 +68,8 @@ assert(!enable_mutex_priority_inheritance || is_chromecast,
"Do not enable PI mutexes without consulting the security team")
# Determines whether libevent should be dep.
-dep_libevent = !is_fuchsia && !is_win && !(is_nacl && !is_nacl_nonsfi)
+dep_libevent =
+ !is_fuchsia && !is_win && !is_mac && !(is_nacl && !is_nacl_nonsfi)
# Determines whether message_pump_libevent should be used.
use_libevent = dep_libevent && !is_ios
@@ -307,8 +308,6 @@ jumbo_component("base") {
"guid.h",
"hash/hash.cc",
"hash/hash.h",
- "hash/sha1.cc",
- "hash/sha1.h",
"immediate_crash.h",
"ios/block_types.h",
"ios/crb_protocol_observers.h",
@@ -371,12 +370,8 @@ jumbo_component("base") {
"mac/mac_util.mm",
"mac/mach_logging.cc",
"mac/mach_logging.h",
- "mac/mach_port_broker.h",
- "mac/mach_port_broker.mm",
"mac/mach_port_rendezvous.cc",
"mac/mach_port_rendezvous.h",
- "mac/mach_port_util.cc",
- "mac/mach_port_util.h",
"mac/objc_release_properties.h",
"mac/objc_release_properties.mm",
"mac/os_crash_dumps.cc",
@@ -424,8 +419,6 @@ jumbo_component("base") {
"memory/memory_pressure_monitor_chromeos.h",
"memory/memory_pressure_monitor_mac.cc",
"memory/memory_pressure_monitor_mac.h",
- "memory/memory_pressure_monitor_notifying_chromeos.cc",
- "memory/memory_pressure_monitor_notifying_chromeos.h",
"memory/memory_pressure_monitor_win.cc",
"memory/memory_pressure_monitor_win.h",
"memory/platform_shared_memory_region.cc",
@@ -623,6 +616,7 @@ jumbo_component("base") {
"profiler/native_unwinder_mac.h",
"profiler/native_unwinder_win.cc",
"profiler/native_unwinder_win.h",
+ "profiler/profile_builder.cc",
"profiler/profile_builder.h",
"profiler/register_context.h",
"profiler/sample_metadata.cc",
@@ -715,7 +709,6 @@ jumbo_component("base") {
"sync_socket_win.cc",
"synchronization/atomic_flag.cc",
"synchronization/atomic_flag.h",
- "synchronization/cancellation_flag.h",
"synchronization/condition_variable.h",
"synchronization/condition_variable_win.cc",
"synchronization/lock.cc",
@@ -761,10 +754,14 @@ jumbo_component("base") {
"task/promise/dependent_list.h",
"task/promise/finally_executor.cc",
"task/promise/finally_executor.h",
+ "task/promise/helpers.cc",
"task/promise/helpers.h",
"task/promise/no_op_promise_executor.cc",
- "task/promise/no_op_promise_executor.h",
+ "task/promise/post_task_executor.h",
+ "task/promise/promise.h",
"task/promise/promise.h",
+ "task/promise/promise_executor.cc",
+ "task/promise/promise_executor.h",
"task/promise/promise_result.h",
"task/promise/then_and_catch_executor.cc",
"task/promise/then_and_catch_executor.h",
@@ -774,8 +771,9 @@ jumbo_component("base") {
"task/sequence_manager/associated_thread_id.h",
"task/sequence_manager/atomic_flag_set.cc",
"task/sequence_manager/atomic_flag_set.h",
- "task/sequence_manager/enqueue_order.cc",
"task/sequence_manager/enqueue_order.h",
+ "task/sequence_manager/enqueue_order_generator.cc",
+ "task/sequence_manager/enqueue_order_generator.h",
"task/sequence_manager/lazily_deallocated_deque.h",
"task/sequence_manager/lazy_now.cc",
"task/sequence_manager/lazy_now.h",
@@ -826,6 +824,8 @@ jumbo_component("base") {
"task/thread_pool/environment_config.h",
"task/thread_pool/initialization_util.cc",
"task/thread_pool/initialization_util.h",
+ "task/thread_pool/job_task_source.cc",
+ "task/thread_pool/job_task_source.h",
"task/thread_pool/pooled_parallel_task_runner.cc",
"task/thread_pool/pooled_parallel_task_runner.h",
"task/thread_pool/pooled_sequenced_task_runner.cc",
@@ -860,6 +860,8 @@ jumbo_component("base") {
"task/thread_pool/thread_group_native_win.h",
"task/thread_pool/thread_pool.cc",
"task/thread_pool/thread_pool.h",
+ "task/thread_pool/thread_pool_clock.cc",
+ "task/thread_pool/thread_pool_clock.h",
"task/thread_pool/thread_pool_impl.cc",
"task/thread_pool/thread_pool_impl.h",
"task/thread_pool/tracked_ref.h",
@@ -895,6 +897,8 @@ jumbo_component("base") {
"threading/post_task_and_reply_impl.h",
"threading/scoped_blocking_call.cc",
"threading/scoped_blocking_call.h",
+ "threading/scoped_thread_priority.cc",
+ "threading/scoped_thread_priority.h",
"threading/sequence_bound.h",
"threading/sequence_local_storage_map.cc",
"threading/sequence_local_storage_map.h",
@@ -1054,6 +1058,8 @@ jumbo_component("base") {
"win/event_trace_controller.h",
"win/event_trace_provider.cc",
"win/event_trace_provider.h",
+ "win/hstring_compare.cc",
+ "win/hstring_compare.h",
"win/hstring_reference.cc",
"win/hstring_reference.h",
"win/i18n.cc",
@@ -1119,6 +1125,7 @@ jumbo_component("base") {
"win/windows_version.cc",
"win/windows_version.h",
"win/windowsx_shim.h",
+ "win/winrt_foundation_helpers.h",
"win/winrt_storage_util.cc",
"win/winrt_storage_util.h",
"win/wmi.cc",
@@ -1373,7 +1380,6 @@ jumbo_component("base") {
"android/jni_weak_ref.h",
"android/library_loader/anchor_functions.cc",
"android/library_loader/anchor_functions.h",
- "android/library_loader/library_load_from_apk_status_codes.h",
"android/library_loader/library_loader_hooks.cc",
"android/library_loader/library_loader_hooks.h",
"android/library_loader/library_prefetcher.cc",
@@ -1491,6 +1497,8 @@ jumbo_component("base") {
"files/file_posix.cc",
"files/file_util_posix.cc",
"files/memory_mapped_file_posix.cc",
+ "fuchsia/default_context.cc",
+ "fuchsia/default_context.h",
"fuchsia/default_job.cc",
"fuchsia/default_job.h",
"fuchsia/file_utils.cc",
@@ -1542,7 +1550,6 @@ jumbo_component("base") {
"synchronization/waitable_event_posix.cc",
"synchronization/waitable_event_watcher_posix.cc",
"system/sys_info_fuchsia.cc",
- "system/sys_info_posix.cc",
"task/thread_pool/task_tracker_posix.cc",
"task/thread_pool/task_tracker_posix.h",
"threading/platform_thread_fuchsia.cc",
@@ -1562,16 +1569,18 @@ jumbo_component("base") {
"//third_party/fuchsia-sdk/sdk:fdio",
"//third_party/fuchsia-sdk/sdk:fidl_cpp",
"//third_party/fuchsia-sdk/sdk:io",
+ "//third_party/fuchsia-sdk/sdk:sys_cpp",
"//third_party/fuchsia-sdk/sdk:zx",
]
deps += [
"//third_party/fuchsia-sdk/sdk:async_default",
"//third_party/fuchsia-sdk/sdk:async_loop_cpp",
+ "//third_party/fuchsia-sdk/sdk:deprecatedtimezone",
"//third_party/fuchsia-sdk/sdk:fidl",
- "//third_party/fuchsia-sdk/sdk:svc",
"//third_party/fuchsia-sdk/sdk:sys",
"//third_party/fuchsia-sdk/sdk:syslog",
+ "//third_party/fuchsia-sdk/sdk:vfs_cpp",
]
}
@@ -1584,16 +1593,19 @@ jumbo_component("base") {
"hash/md5.h",
"hash/md5_constexpr.h",
"hash/md5_constexpr_internal.h",
+ "hash/sha1.h",
]
if (is_nacl) {
sources += [
"hash/md5_nacl.cc",
"hash/md5_nacl.h",
+ "hash/sha1.cc",
]
} else {
sources += [
"hash/md5_boringssl.cc",
"hash/md5_boringssl.h",
+ "hash/sha1_boringssl.cc",
]
public_deps += [ "//third_party/boringssl" ]
}
@@ -1688,6 +1700,8 @@ jumbo_component("base") {
# PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
"allocator/partition_allocator/address_space_randomization.cc",
"allocator/partition_allocator/address_space_randomization.h",
+ "allocator/partition_allocator/memory_reclaimer.cc",
+ "allocator/partition_allocator/memory_reclaimer.h",
"allocator/partition_allocator/oom.h",
"allocator/partition_allocator/oom_callback.cc",
"allocator/partition_allocator/oom_callback.h",
@@ -1776,6 +1790,12 @@ jumbo_component("base") {
":base_win_linker_flags",
"//tools/win/DebugVisualizers:chrome",
]
+ inputs = [
+ # chrome.natvis listed as an input here instead of in
+ # //tools/win/DebugVisualizers:chrome to prevent unnecessary size increase
+ # in generated build files.
+ "//tools/win/DebugVisualizers/chrome.natvis",
+ ]
}
# Desktop Mac.
@@ -2211,11 +2231,13 @@ component("i18n") {
test("base_perftests") {
sources = [
+ "hash/sha1_perftest.cc",
"message_loop/message_pump_perftest.cc",
"observer_list_perftest.cc",
"strings/string_util_perftest.cc",
"task/sequence_manager/sequence_manager_perftest.cc",
"task/thread_pool/thread_pool_perftest.cc",
+ "threading/thread_local_storage_perftest.cc",
# "test/run_all_unittests.cc",
"json/json_perftest.cc",
@@ -2424,6 +2446,7 @@ test("base_unittests") {
"android/application_status_listener_unittest.cc",
"android/child_process_unittest.cc",
"android/content_uri_utils_unittest.cc",
+ "android/java_handler_thread_unittest.cc",
"android/jni_android_unittest.cc",
"android/jni_array_unittest.cc",
"android/jni_string_unittest.cc",
@@ -2520,13 +2543,13 @@ test("base_unittests") {
"json/json_writer_unittest.cc",
"json/string_escape_unittest.cc",
"lazy_instance_unittest.cc",
+ "location_unittest.cc",
"logging_unittest.cc",
"mac/bind_objc_block_unittest.mm",
"mac/call_with_eh_frame_unittest.mm",
"mac/dispatch_source_mach_unittest.cc",
"mac/foundation_util_unittest.mm",
"mac/mac_util_unittest.mm",
- "mac/mach_port_broker_unittest.cc",
"mac/mach_port_rendezvous_unittest.cc",
"mac/objc_release_properties_unittest.mm",
"mac/scoped_mach_vm_unittest.cc",
@@ -2538,7 +2561,6 @@ test("base_unittests") {
"memory/memory_pressure_listener_unittest.cc",
"memory/memory_pressure_monitor_chromeos_unittest.cc",
"memory/memory_pressure_monitor_mac_unittest.cc",
- "memory/memory_pressure_monitor_notifying_chromeos_unittest.cc",
"memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc",
"memory/platform_shared_memory_region_unittest.cc",
@@ -2654,6 +2676,7 @@ test("base_unittests") {
"task/promise/abstract_promise_unittest.cc",
"task/promise/dependent_list_unittest.cc",
"task/promise/helpers_unittest.cc",
+ "task/promise/post_task_executor_unittest.cc",
"task/promise/promise_unittest.cc",
"task/scoped_set_task_priority_for_current_thread_unittest.cc",
"task/sequence_manager/atomic_flag_set_unittest.cc",
@@ -2672,6 +2695,7 @@ test("base_unittests") {
"task/thread_pool/can_run_policy_test.h",
"task/thread_pool/delayed_task_manager_unittest.cc",
"task/thread_pool/environment_config_unittest.cc",
+ "task/thread_pool/job_task_source_unittest.cc",
"task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc",
"task/thread_pool/priority_queue_unittest.cc",
"task/thread_pool/sequence_sort_key_unittest.cc",
@@ -2690,8 +2714,8 @@ test("base_unittests") {
"task/thread_pool/worker_thread_unittest.cc",
"task_runner_util_unittest.cc",
"template_util_unittest.cc",
+ "test/gmock_callback_support_unittest.cc",
"test/launcher/test_launcher_unittest.cc",
- "test/launcher/test_results_tracker_unittest.cc",
"test/launcher/unit_test_launcher_unittest.cc",
"test/metrics/histogram_enum_reader_unittest.cc",
"test/metrics/histogram_tester_unittest.cc",
@@ -2710,6 +2734,7 @@ test("base_unittests") {
"threading/platform_thread_win_unittest.cc",
"threading/post_task_and_reply_impl_unittest.cc",
"threading/scoped_blocking_call_unittest.cc",
+ "threading/scoped_thread_priority_unittest.cc",
"threading/sequence_bound_unittest.cc",
"threading/sequence_local_storage_map_unittest.cc",
"threading/sequence_local_storage_slot_unittest.cc",
@@ -2770,6 +2795,7 @@ test("base_unittests") {
"win/event_trace_consumer_unittest.cc",
"win/event_trace_controller_unittest.cc",
"win/event_trace_provider_unittest.cc",
+ "win/hstring_compare_unittest.cc",
"win/hstring_reference_unittest.cc",
"win/i18n_unittest.cc",
"win/iunknown_impl_unittest.cc",
@@ -2889,7 +2915,6 @@ test("base_unittests") {
"sync_socket_unittest.cc",
"synchronization/waitable_event_watcher_unittest.cc",
"test/launcher/test_launcher_unittest.cc",
- "test/launcher/test_results_tracker_unittest.cc",
"test/launcher/unit_test_launcher_unittest.cc",
]
@@ -2911,6 +2936,7 @@ test("base_unittests") {
if (use_partition_alloc) {
sources += [
"allocator/partition_allocator/address_space_randomization_unittest.cc",
+ "allocator/partition_allocator/memory_reclaimer_unittest.cc",
"allocator/partition_allocator/page_allocator_unittest.cc",
"allocator/partition_allocator/partition_alloc_unittest.cc",
"allocator/partition_allocator/spin_lock_unittest.cc",
@@ -2962,9 +2988,9 @@ test("base_unittests") {
"files/file_descriptor_watcher_posix_unittest.cc",
"fuchsia/file_utils_unittest.cc",
"fuchsia/filtered_service_directory_unittest.cc",
+ "fuchsia/scoped_service_binding_unittest.cc",
"fuchsia/service_directory_test_base.cc",
"fuchsia/service_directory_test_base.h",
- "fuchsia/service_directory_unittest.cc",
"fuchsia/service_provider_impl_unittest.cc",
"message_loop/message_loop_io_posix_unittest.cc",
"posix/file_descriptor_shuffle_unittest.cc",
@@ -3018,16 +3044,6 @@ test("base_unittests") {
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [ "//build/config/compiler:no_size_t_to_int_warning" ]
-
- # Symbols for crashes when running tests on swarming.
- if (symbol_level > 0) {
- if (is_win) {
- data += [ "$root_out_dir/base_unittests.exe.pdb" ]
- } else if (is_mac) {
- # TODO(crbug.com/330301): make this conditional on mac_strip_release.
- # data += [ "$root_out_dir/base_unittests.dSYM/" ]
- }
- }
}
action("build_date") {
@@ -3119,12 +3135,9 @@ if (is_android) {
public_deps = [
":android_runtime_jni_headers",
]
-
- jni_package = "base"
}
generate_jar_jni("android_runtime_jni_headers") {
- jni_package = "base"
classes = [
"java/lang/Runnable.class",
"java/lang/Runtime.class",
@@ -3225,6 +3238,7 @@ if (is_android) {
"android/java/src/org/chromium/base/compat/ApiHelperForO.java",
"android/java/src/org/chromium/base/compat/ApiHelperForOMR1.java",
"android/java/src/org/chromium/base/compat/ApiHelperForP.java",
+ "android/java/src/org/chromium/base/library_loader/LegacyLinker.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
"android/java/src/org/chromium/base/library_loader/LibraryPrefetcher.java",
"android/java/src/org/chromium/base/library_loader/Linker.java",
@@ -3422,6 +3436,7 @@ if (is_android) {
"android/junit/src/org/chromium/base/metrics/test/ShadowRecordHistogram.java",
"test/android/junit/src/org/chromium/base/task/test/BackgroundShadowAsyncTask.java",
"test/android/junit/src/org/chromium/base/task/test/CustomShadowAsyncTask.java",
+ "test/android/junit/src/org/chromium/base/task/test/ShadowPostTask.java",
"test/android/junit/src/org/chromium/base/test/BaseRobolectricTestRunner.java",
"test/android/junit/src/org/chromium/base/test/util/TestRunnerTestRule.java",
"//third_party/robolectric/custom_asynctask/java/src/org/chromium/base/task/test/ShadowAsyncTask.java",
@@ -3481,7 +3496,6 @@ if (is_android) {
sources = [
"android/application_status_listener.h",
"android/child_process_binding_types.h",
- "android/library_loader/library_load_from_apk_status_codes.h",
"android/library_loader/library_loader_hooks.h",
"android/task_scheduler/task_runner_android.h",
"memory/memory_pressure_listener.h",
@@ -3495,11 +3509,8 @@ if (is_android) {
use_final_fields = false
}
- java_cpp_template("base_native_libraries_gen") {
- sources = [
- "android/java/templates/NativeLibraries.template",
- ]
- package_path = "org/chromium/base/library_loader"
+ write_native_libraries_java("base_native_libraries_gen") {
+ use_final_fields = false
}
android_library("base_java_unittest_support") {
diff --git a/chromium/base/DEPS b/chromium/base/DEPS
index 133105cdf36..62b4a89f807 100644
--- a/chromium/base/DEPS
+++ b/chromium/base/DEPS
@@ -1,5 +1,4 @@
include_rules = [
- "+jni",
"+third_party/ashmem",
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
@@ -14,4 +13,14 @@ include_rules = [
# ICU dependendencies must be separate from the rest of base.
"-i18n",
+
+ # //base/util can use //base but not vice versa.
+ "-util",
]
+
+specific_include_rules = {
+ # Dependencies specific for fuzz targets and other fuzzing-related code.
+ ".*fuzz.*": [
+ "+third_party/libFuzzer/src/utils", # This contains FuzzedDataProvider.
+ ],
+}
diff --git a/chromium/base/allocator/allocator_interception_mac.mm b/chromium/base/allocator/allocator_interception_mac.mm
index 0f5f8608ca5..9db4dc5d48a 100644
--- a/chromium/base/allocator/allocator_interception_mac.mm
+++ b/chromium/base/allocator/allocator_interception_mac.mm
@@ -211,7 +211,7 @@ void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
// === Core Foundation CFAllocators ===
bool CanGetContextForCFAllocator() {
- return !base::mac::IsOSLaterThan10_14_DontCallThis();
+ return !base::mac::IsOSLaterThan10_15_DontCallThis();
}
CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
index 3fe46ba64e3..395567a056d 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization_unittest.cc
@@ -180,9 +180,10 @@ void RandomBitCorrelation(int random_bit) {
// Chi squared analysis for k = 2 (2, states: same/not-same) and one
// degree of freedom (k - 1).
double chi_squared = ChiSquared(m, kRepeats);
- // For 1 degree of freedom this corresponds to 1 in a million. We are
- // running ~8000 tests, so that would be surprising.
- CHECK_GE(24, chi_squared);
+ // For k=2 probability of Chi^2 < 35 is p=3.338e-9. This condition is
+ // tested ~19000 times, so probability of it failing randomly per one
+ // base_unittests run is (1 - (1 - p) ^ 19000) ~= 6e-5.
+ CHECK_LE(chi_squared, 35.0);
// If the predictor bit is a fixed 0 or 1 then it makes no sense to
// repeat the test with a different age.
if (predictor_bit < 0)
@@ -191,9 +192,6 @@ void RandomBitCorrelation(int random_bit) {
}
}
-// TODO(crbug.com/811881): These are flaky on Fuchsia
-#if !defined(OS_FUCHSIA)
-
// Tests are fairly slow, so give each random bit its own test.
#define TEST_RANDOM_BIT(BIT) \
TEST(AddressSpaceRandomizationTest, RandomBitCorrelations##BIT) { \
@@ -242,8 +240,6 @@ TEST_RANDOM_BIT(48)
// No platforms have more than 48 address bits.
#endif // defined(ARCH_CPU_64_BITS)
-#endif // defined(OS_FUCHSIA)
-
#undef TEST_RANDOM_BIT
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
new file mode 100644
index 00000000000..991576c1fb5
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
@@ -0,0 +1,153 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/memory_reclaimer.h"
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/bind.h"
+#include "base/location.h"
+#include "base/metrics/histogram_functions.h"
+#include "base/timer/elapsed_timer.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+namespace internal {
+
+// TODO(crbug.com/942512): Remove the feature after the M77 branch.
+const Feature kPartitionAllocPeriodicDecommit{"PartitionAllocPeriodicDecommit",
+ FEATURE_ENABLED_BY_DEFAULT};
+
+} // namespace internal
+
+namespace {
+
+bool IsDeprecatedDecommitEnabled() {
+ return !FeatureList::IsEnabled(internal::kPartitionAllocPeriodicDecommit);
+}
+
+} // namespace
+
+constexpr TimeDelta PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta;
+
+// static
+PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
+ static NoDestructor<PartitionAllocMemoryReclaimer> instance;
+ return instance.get();
+}
+
+void PartitionAllocMemoryReclaimer::RegisterPartition(
+ internal::PartitionRootBase* partition) {
+ AutoLock lock(lock_);
+ DCHECK(partition);
+ auto it_and_whether_inserted = partitions_.insert(partition);
+ DCHECK(it_and_whether_inserted.second);
+}
+
+void PartitionAllocMemoryReclaimer::UnregisterPartition(
+ internal::PartitionRootBase* partition) {
+ AutoLock lock(lock_);
+ DCHECK(partition);
+ size_t erased_count = partitions_.erase(partition);
+ DCHECK_EQ(1u, erased_count);
+}
+
+void PartitionAllocMemoryReclaimer::Start(
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ DCHECK(!timer_);
+ DCHECK(task_runner);
+
+ {
+ AutoLock lock(lock_);
+ DCHECK(!partitions_.empty());
+ }
+
+ if (!FeatureList::IsEnabled(internal::kPartitionAllocPeriodicDecommit))
+ return;
+
+ // This does not need to run on the main thread, however there are a few
+ // reasons to do it there:
+ // - Most of PartitionAlloc's usage is on the main thread, hence PA's metadata
+ // is more likely in cache when executing on the main thread.
+ // - Memory reclaim takes the partition lock for each partition. As a
+ // consequence, while reclaim is running, the main thread is unlikely to be
+ // able to make progress, as it would be waiting on the lock.
+ // - Finally, this runs in idle time only, so there should be no visible
+ // impact.
+ //
+ // From local testing, time to reclaim is 100us-1ms, and reclaiming every few
+ // seconds is useful. Since this is meant to run during idle time only, it is
+ // a reasonable starting point balancing effectivenes vs cost. See
+ // crbug.com/942512 for details and experimental results.
+ constexpr TimeDelta kInterval = TimeDelta::FromSeconds(4);
+
+ timer_ = std::make_unique<RepeatingTimer>();
+ timer_->SetTaskRunner(task_runner);
+ // Here and below, |Unretained(this)| is fine as |this| lives forever, as a
+ // singleton.
+ timer_->Start(
+ FROM_HERE, kInterval,
+ BindRepeating(&PartitionAllocMemoryReclaimer::Reclaim, Unretained(this)));
+
+ task_runner->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&PartitionAllocMemoryReclaimer::RecordStatistics,
+ Unretained(this)),
+ kStatsRecordingTimeDelta);
+}
+
+PartitionAllocMemoryReclaimer::PartitionAllocMemoryReclaimer() = default;
+PartitionAllocMemoryReclaimer::~PartitionAllocMemoryReclaimer() = default;
+
+void PartitionAllocMemoryReclaimer::Reclaim() {
+ TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
+ // Reclaim will almost always call into the kernel, so tail latency of this
+ // task would likely be affected by descheduling.
+ //
+ // On Linux (and Android) at least, ThreadTicks also includes kernel time, so
+ // this is a good measure of the true cost of decommit.
+ ElapsedThreadTimer timer;
+ constexpr int kFlags =
+ PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
+
+ {
+ AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
+ for (auto* partition : partitions_)
+ partition->PurgeMemory(kFlags);
+ }
+
+ has_called_reclaim_ = true;
+ if (timer.is_supported())
+ total_reclaim_thread_time_ += timer.Elapsed();
+}
+
+void PartitionAllocMemoryReclaimer::DeprecatedReclaim() {
+ if (!IsDeprecatedDecommitEnabled())
+ return;
+
+ Reclaim();
+}
+
+void PartitionAllocMemoryReclaimer::RecordStatistics() {
+ if (!ElapsedThreadTimer().is_supported())
+ return;
+ if (!has_called_reclaim_)
+ return;
+
+ UmaHistogramTimes("Memory.PartitionAlloc.MainThreadTime.5min",
+ total_reclaim_thread_time_);
+ has_called_reclaim_ = false;
+ total_reclaim_thread_time_ = TimeDelta();
+}
+
+void PartitionAllocMemoryReclaimer::ResetForTesting() {
+ AutoLock lock(lock_);
+
+ has_called_reclaim_ = false;
+ total_reclaim_thread_time_ = TimeDelta();
+ timer_ = nullptr;
+ partitions_.clear();
+}
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.h b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
new file mode 100644
index 00000000000..eca30a7d1e0
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
@@ -0,0 +1,84 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
+
+#include <memory>
+#include <set>
+
+#include "base/bind.h"
+#include "base/callback.h"
+#include "base/feature_list.h"
+#include "base/location.h"
+#include "base/no_destructor.h"
+#include "base/single_thread_task_runner.h"
+#include "base/thread_annotations.h"
+#include "base/time/time.h"
+#include "base/timer/elapsed_timer.h"
+#include "base/timer/timer.h"
+
+namespace base {
+
+namespace internal {
+
+struct PartitionRootBase;
+
+BASE_EXPORT extern const Feature kPartitionAllocPeriodicDecommit;
+
+} // namespace internal
+
+// Posts and handles memory reclaim tasks for PartitionAlloc.
+//
+// Thread safety: |RegisterPartition()| and |UnregisterPartition()| can be
+// called from any thread, concurrently with reclaim. Reclaim itself runs in the
+// context of the provided |SequencedTaskRunner|, meaning that the caller must
+// take care of this runner being compatible with the various partitions.
+//
+// Singleton as this runs as long as the process is alive, and
+// having multiple instances would be wasteful.
+class BASE_EXPORT PartitionAllocMemoryReclaimer {
+ public:
+ static PartitionAllocMemoryReclaimer* Instance();
+
+ // Internal. Do not use.
+ // Registers a partition to be tracked by the reclaimer.
+ void RegisterPartition(internal::PartitionRootBase* partition);
+ // Internal. Do not use.
+ // Unregisters a partition to be tracked by the reclaimer.
+ void UnregisterPartition(internal::PartitionRootBase* partition);
+ // Starts the periodic reclaim. Should be called once.
+ void Start(scoped_refptr<SequencedTaskRunner> task_runner);
+ // Triggers an explicit reclaim now.
+ void Reclaim();
+ // Triggers a reclaim. Do not add new callers.
+ void DeprecatedReclaim();
+
+ static constexpr TimeDelta kStatsRecordingTimeDelta =
+ TimeDelta::FromMinutes(5);
+
+ private:
+ PartitionAllocMemoryReclaimer();
+ ~PartitionAllocMemoryReclaimer();
+ void ReclaimAndReschedule();
+ void RecordStatistics();
+ void ResetForTesting();
+
+ // Total time spent in |Reclaim()|.
+ bool has_called_reclaim_ = false;
+ TimeDelta total_reclaim_thread_time_;
+ // Schedules periodic |Reclaim()|.
+ std::unique_ptr<RepeatingTimer> timer_;
+
+ Lock lock_;
+ std::set<internal::PartitionRootBase*> partitions_ GUARDED_BY(lock_);
+
+ friend class NoDestructor<PartitionAllocMemoryReclaimer>;
+ friend class PartitionAllocMemoryReclaimerTest;
+ DISALLOW_COPY_AND_ASSIGN(PartitionAllocMemoryReclaimer);
+};
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_MEMORY_RECLAIMER_H_
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
new file mode 100644
index 00000000000..a897ed6120b
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
@@ -0,0 +1,169 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/memory_reclaimer.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "base/test/scoped_feature_list.h"
+#include "base/test/scoped_task_environment.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Otherwise, PartitionAlloc doesn't allocate any memory, and the tests are
+// meaningless.
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace base {
+
+class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
+ public:
+ PartitionAllocMemoryReclaimerTest()
+ : ::testing::Test(),
+ task_environment_(test::ScopedTaskEnvironment::TimeSource::MOCK_TIME),
+ allocator_() {}
+
+ protected:
+ void SetUp() override {
+ PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
+ allocator_ = std::make_unique<PartitionAllocatorGeneric>();
+ allocator_->init();
+ }
+
+ void TearDown() override {
+ allocator_ = nullptr;
+ PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
+ task_environment_.FastForwardUntilNoTasksRemain();
+ }
+
+ void StartReclaimer() {
+ auto* memory_reclaimer = PartitionAllocMemoryReclaimer::Instance();
+ memory_reclaimer->Start(task_environment_.GetMainThreadTaskRunner());
+ }
+
+ void AllocateAndFree() {
+ void* data = allocator_->root()->Alloc(1, "");
+ allocator_->root()->Free(data);
+ }
+
+ size_t GetExpectedTasksCount() const {
+ // Includes the stats recording task.
+ if (ElapsedThreadTimer().is_supported())
+ return 2;
+ return 1;
+ }
+
+ test::ScopedTaskEnvironment task_environment_;
+ std::unique_ptr<PartitionAllocatorGeneric> allocator_;
+};
+
+TEST_F(PartitionAllocMemoryReclaimerTest, Simple) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndEnableFeature(
+ internal::kPartitionAllocPeriodicDecommit);
+
+ StartReclaimer();
+
+ EXPECT_EQ(GetExpectedTasksCount(),
+ task_environment_.GetPendingMainThreadTaskCount());
+ EXPECT_TRUE(task_environment_.NextTaskIsDelayed());
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, IsEnabledByDefault) {
+ StartReclaimer();
+ EXPECT_EQ(2u, task_environment_.GetPendingMainThreadTaskCount());
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, CanBeDisabled) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndDisableFeature(
+ internal::kPartitionAllocPeriodicDecommit);
+ StartReclaimer();
+ EXPECT_EQ(0u, task_environment_.GetPendingMainThreadTaskCount());
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
+ PartitionRootGeneric* root = allocator_->root();
+
+ size_t committed_initially = root->total_size_of_committed_pages;
+ AllocateAndFree();
+ size_t committed_before = root->total_size_of_committed_pages;
+
+ EXPECT_GT(committed_before, committed_initially);
+
+ StartReclaimer();
+ task_environment_.FastForwardBy(
+ task_environment_.NextMainThreadPendingTaskDelay());
+ size_t committed_after = root->total_size_of_committed_pages;
+ EXPECT_LT(committed_after, committed_before);
+ EXPECT_LE(committed_initially, committed_after);
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
+ PartitionRootGeneric* root = allocator_->root();
+ size_t committed_initially = root->total_size_of_committed_pages;
+
+ {
+ AllocateAndFree();
+
+ size_t committed_before = root->total_size_of_committed_pages;
+ EXPECT_GT(committed_before, committed_initially);
+ PartitionAllocMemoryReclaimer::Instance()->Reclaim();
+ size_t committed_after = root->total_size_of_committed_pages;
+
+ EXPECT_LT(committed_after, committed_before);
+ EXPECT_LE(committed_initially, committed_after);
+ }
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, DeprecatedReclaim) {
+ PartitionRootGeneric* root = allocator_->root();
+
+ // Deprecated reclaim is disabled by default.
+ {
+ AllocateAndFree();
+ size_t committed_before = root->total_size_of_committed_pages;
+ PartitionAllocMemoryReclaimer::Instance()->DeprecatedReclaim();
+ size_t committed_after = root->total_size_of_committed_pages;
+ EXPECT_EQ(committed_after, committed_before);
+
+ PartitionAllocMemoryReclaimer::Instance()->Reclaim();
+ }
+
+ // Deprecated reclaim works when periodic reclaim is disabled.
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.InitAndDisableFeature(
+ internal::kPartitionAllocPeriodicDecommit);
+ AllocateAndFree();
+ size_t committed_before = root->total_size_of_committed_pages;
+ PartitionAllocMemoryReclaimer::Instance()->DeprecatedReclaim();
+ size_t committed_after = root->total_size_of_committed_pages;
+ EXPECT_LT(committed_after, committed_before);
+ }
+}
+
+TEST_F(PartitionAllocMemoryReclaimerTest, StatsRecording) {
+ // No stats reported if the timer is not.
+ if (!ElapsedThreadTimer().is_supported())
+ return;
+
+ HistogramTester histogram_tester;
+ StartReclaimer();
+ EXPECT_EQ(GetExpectedTasksCount(),
+ task_environment_.GetPendingMainThreadTaskCount());
+
+ task_environment_.FastForwardBy(
+ PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta);
+ // Hard to make sure that the total time is >1ms, so cannot assert that the
+ // value is not 0.
+ histogram_tester.ExpectTotalCount("Memory.PartitionAlloc.MainThreadTime.5min",
+ 1);
+}
+
+} // namespace base
+#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 6537f509fa6..4b6d55fdf35 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -55,7 +55,6 @@ PartitionRoot::~PartitionRoot() = default;
PartitionRootGeneric::PartitionRootGeneric() = default;
PartitionRootGeneric::~PartitionRootGeneric() = default;
PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
-PartitionAllocatorGeneric::~PartitionAllocatorGeneric() = default;
subtle::SpinLock& GetLock() {
static NoDestructor<subtle::SpinLock> s_initialized_lock;
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index 3cb0f1027c2..f992a849fc4 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -63,6 +63,7 @@
#include <limits.h>
#include <string.h>
+#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
@@ -499,10 +500,17 @@ class SizeSpecificPartitionAllocator {
memset(actual_buckets_, 0,
sizeof(internal::PartitionBucket) * base::size(actual_buckets_));
}
- ~SizeSpecificPartitionAllocator() = default;
+ ~SizeSpecificPartitionAllocator() {
+ PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
+ &partition_root_);
+ }
static const size_t kMaxAllocation = N - kAllocationGranularity;
static const size_t kNumBuckets = N / kAllocationGranularity;
- void init() { partition_root_.Init(kNumBuckets, kMaxAllocation); }
+ void init() {
+ partition_root_.Init(kNumBuckets, kMaxAllocation);
+ PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
+ &partition_root_);
+ }
ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
private:
@@ -513,9 +521,16 @@ class SizeSpecificPartitionAllocator {
class BASE_EXPORT PartitionAllocatorGeneric {
public:
PartitionAllocatorGeneric();
- ~PartitionAllocatorGeneric();
+ ~PartitionAllocatorGeneric() {
+ PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
+ &partition_root_);
+ }
- void init() { partition_root_.Init(); }
+ void init() {
+ partition_root_.Init();
+ PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
+ &partition_root_);
+ }
ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
private:
diff --git a/chromium/base/android/jni_generator/BUILD.gn b/chromium/base/android/jni_generator/BUILD.gn
index 0f058501d89..691e584087d 100644
--- a/chromium/base/android/jni_generator/BUILD.gn
+++ b/chromium/base/android/jni_generator/BUILD.gn
@@ -12,7 +12,6 @@ generate_jni("jni_sample_header") {
"java/src/org/chromium/example/jni_generator/SampleForAnnotationProcessor.java",
"java/src/org/chromium/example/jni_generator/SampleForTests.java",
]
- jni_package = "example"
}
android_library("jni_sample_java") {
@@ -65,6 +64,7 @@ shared_library("jni_sample_lib") {
android_apk("sample_jni_apk") {
apk_name = "SampleJni"
android_manifest = "AndroidManifest.xml"
+ target_sdk_version = 24
deps = [
":jni_sample_java",
"//base:base_java",
diff --git a/chromium/base/android/linker/BUILD.gn b/chromium/base/android/linker/BUILD.gn
index dd5a0d6f31e..1f1821d2e80 100644
--- a/chromium/base/android/linker/BUILD.gn
+++ b/chromium/base/android/linker/BUILD.gn
@@ -9,7 +9,10 @@ assert(is_android)
shared_library("chromium_android_linker") {
sources = [
+ "legacy_linker_jni.cc",
+ "legacy_linker_jni.h",
"linker_jni.cc",
+ "linker_jni.h",
]
# The NDK contains the crazy_linker here:
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 89e303d97b6..a0cf9cc5727 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -7,7 +7,7 @@
namespace switches {
-// Delays execution of base::TaskPriority::BEST_EFFORT tasks until shutdown.
+// Delays execution of TaskPriority::BEST_EFFORT tasks until shutdown.
const char kDisableBestEffortTasks[] = "disable-best-effort-tasks";
// Disables the crash reporting.
@@ -16,6 +16,9 @@ const char kDisableBreakpad[] = "disable-breakpad";
// Comma-separated list of feature names to disable. See also kEnableFeatures.
const char kDisableFeatures[] = "disable-features";
+// Force disabling of low-end device mode when set.
+const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
+
// Indicates that crash reporting should be enabled. On platforms where helper
// processes cannot access to files needed to make this decision, this flag is
// generated internally.
@@ -24,15 +27,9 @@ const char kEnableCrashReporter[] = "enable-crash-reporter";
// Comma-separated list of feature names to enable. See also kDisableFeatures.
const char kEnableFeatures[] = "enable-features";
-// Generates full memory crash dump.
-const char kFullMemoryCrashReport[] = "full-memory-crash-report";
-
// Force low-end device mode when set.
const char kEnableLowEndDeviceMode[] = "enable-low-end-device-mode";
-// Force disabling of low-end device mode when set.
-const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
-
// This option can be used to force field trials when testing changes locally.
// The argument is a list of name and value pairs, separated by slashes. If a
// trial name is prefixed with an asterisk, that trial will start activated.
@@ -43,21 +40,60 @@ const char kDisableLowEndDeviceMode[] = "disable-low-end-device-mode";
// FieldTrialList::CreateTrialsFromString() in field_trial.h for details.
const char kForceFieldTrials[] = "force-fieldtrials";
+// Generates full memory crash dump.
+const char kFullMemoryCrashReport[] = "full-memory-crash-report";
+
+// Logs information about all tasks posted with TaskPriority::BEST_EFFORT. Use
+// this to diagnose issues that are thought to be caused by
+// TaskPriority::BEST_EFFORT execution fences. Note: Tasks posted to a
+// non-BEST_EFFORT UpdateableSequencedTaskRunner whose priority is later lowered
+// to BEST_EFFORT are not logged.
+const char kLogBestEffortTasks[] = "log-best-effort-tasks";
+
// Suppresses all error dialogs when present.
const char kNoErrorDialogs[] = "noerrdialogs";
+// Starts the sampling based profiler for the browser process at startup. This
+// will only work if chrome has been built with the gn arg enable_profiling =
+// true. The output will go to the value of kProfilingFile.
+const char kProfilingAtStart[] = "profiling-at-start";
+
+// Specifies a location for profiling output. This will only work if chrome has
+// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
+//
+// {pid} if present will be replaced by the pid of the process.
+// {count} if present will be incremented each time a profile is generated
+// for this process.
+// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
+// for tests.
+const char kProfilingFile[] = "profiling-file";
+
+// Controls whether profile data is periodically flushed to a file. Normally
+// the data gets written on exit but cases exist where chromium doesn't exit
+// cleanly (especially when using single-process). A time in seconds can be
+// specified.
+const char kProfilingFlush[] = "profiling-flush";
+
// When running certain tests that spawn child processes, this switch indicates
// to the test framework that the current process is a child process.
-const char kTestChildProcess[] = "test-child-process";
+const char kTestChildProcess[] = "test-child-process";
// When running certain tests that spawn child processes, this switch indicates
// to the test framework that the current process should not initialize ICU to
// avoid creating any scoped handles too early in startup.
-const char kTestDoNotInitializeIcu[] = "test-do-not-initialize-icu";
+const char kTestDoNotInitializeIcu[] = "test-do-not-initialize-icu";
+
+// Sends trace events from these categories to a file.
+// --trace-to-file on its own sends to default categories.
+const char kTraceToFile[] = "trace-to-file";
+
+// Specifies the file name for --trace-to-file. If unspecified, it will
+// go to a default file name.
+const char kTraceToFileName[] = "trace-to-file-name";
// Gives the default maximal active V-logging level; 0 is the default.
// Normally positive values are used for V-logging levels.
-const char kV[] = "v";
+const char kV[] = "v";
// Gives the per-module maximal V-logging levels to override the value
// given by --v. E.g. "my_module=2,foo*=3" would change the logging
@@ -68,39 +104,10 @@ const char kV[] = "v";
// against the whole pathname and not just the module. E.g.,
// "*/foo/bar/*=2" would change the logging level for all code in
// source files under a "foo/bar" directory.
-const char kVModule[] = "vmodule";
+const char kVModule[] = "vmodule";
// Will wait for 60 seconds for a debugger to come to attach to the process.
-const char kWaitForDebugger[] = "wait-for-debugger";
-
-// Sends trace events from these categories to a file.
-// --trace-to-file on its own sends to default categories.
-const char kTraceToFile[] = "trace-to-file";
-
-// Specifies the file name for --trace-to-file. If unspecified, it will
-// go to a default file name.
-const char kTraceToFileName[] = "trace-to-file-name";
-
-// Starts the sampling based profiler for the browser process at startup. This
-// will only work if chrome has been built with the gn arg enable_profiling =
-// true. The output will go to the value of kProfilingFile.
-const char kProfilingAtStart[] = "profiling-at-start";
-
-// Specifies a location for profiling output. This will only work if chrome has
-// been built with the gyp variable profiling=1 or gn arg enable_profiling=true.
-//
-// {pid} if present will be replaced by the pid of the process.
-// {count} if present will be incremented each time a profile is generated
-// for this process.
-// The default is chrome-profile-{pid} for the browser and test-profile-{pid}
-// for tests.
-const char kProfilingFile[] = "profiling-file";
-
-// Controls whether profile data is periodically flushed to a file. Normally
-// the data gets written on exit but cases exist where chromium doesn't exit
-// cleanly (especially when using single-process). A time in seconds can be
-// specified.
-const char kProfilingFlush[] = "profiling-flush";
+const char kWaitForDebugger[] = "wait-for-debugger";
#if defined(OS_WIN)
// Disables the USB keyboard detection for blocking the OSK on Win8+.
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index 62397a9c4cd..51d6e6bb2d3 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -20,6 +20,7 @@ extern const char kEnableFeatures[];
extern const char kEnableLowEndDeviceMode[];
extern const char kForceFieldTrials[];
extern const char kFullMemoryCrashReport[];
+extern const char kLogBestEffortTasks[];
extern const char kNoErrorDialogs[];
extern const char kProfilingAtStart[];
extern const char kProfilingFile[];
diff --git a/chromium/base/bind.h b/chromium/base/bind.h
index 3c3d5d3a825..7a400af63fa 100644
--- a/chromium/base/bind.h
+++ b/chromium/base/bind.h
@@ -7,6 +7,7 @@
#include <functional>
#include <memory>
+#include <type_traits>
#include <utility>
#include "base/bind_internal.h"
@@ -41,7 +42,7 @@
// class C : public base::RefCounted<C> { void F(); };
// auto instance = base::MakeRefCounted<C>();
// auto cb = base::BindOnce(&C::F, instance);
-// cb.Run(); // Identical to instance->F()
+// std::move(cb).Run(); // Identical to instance->F()
//
// base::Bind is currently a type alias for base::BindRepeating(). In the
// future, we expect to flip this to default to base::BindOnce().
@@ -179,26 +180,39 @@ template <bool is_once, bool is_method, typename... Args>
using MakeUnwrappedTypeList =
typename MakeUnwrappedTypeListImpl<is_once, is_method, Args...>::Type;
-} // namespace internal
+// Used below in BindImpl to determine whether to use Invoker::Run or
+// Invoker::RunOnce.
+// Note: Simply using `kIsOnce ? &Invoker::RunOnce : &Invoker::Run` does not
+// work, since the compiler needs to check whether both expressions are
+// well-formed. Using `Invoker::Run` with a OnceCallback triggers a
+// static_assert, which is why the ternary expression does not compile.
+// TODO(crbug.com/752720): Remove this indirection once we have `if constexpr`.
+template <bool is_once, typename Invoker>
+struct InvokeFuncImpl;
+
+template <typename Invoker>
+struct InvokeFuncImpl<true, Invoker> {
+ static constexpr auto Value = &Invoker::RunOnce;
+};
-// Bind as OnceCallback.
-template <typename Functor, typename... Args>
-inline OnceCallback<MakeUnboundRunType<Functor, Args...>>
-BindOnce(Functor&& functor, Args&&... args) {
- static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
- (std::is_rvalue_reference<Functor&&>() &&
- !std::is_const<std::remove_reference_t<Functor>>()),
- "BindOnce requires non-const rvalue for OnceCallback binding."
- " I.e.: base::BindOnce(std::move(callback)).");
+template <typename Invoker>
+struct InvokeFuncImpl<false, Invoker> {
+ static constexpr auto Value = &Invoker::Run;
+};
+template <template <typename> class CallbackT,
+ typename Functor,
+ typename... Args>
+decltype(auto) BindImpl(Functor&& functor, Args&&... args) {
// This block checks if each |args| matches to the corresponding params of the
// target function. This check does not affect the behavior of Bind, but its
// error message should be more readable.
+ static constexpr bool kIsOnce = IsOnceCallback<CallbackT<void()>>::value;
using Helper = internal::BindTypeHelper<Functor, Args...>;
using FunctorTraits = typename Helper::FunctorTraits;
using BoundArgsList = typename Helper::BoundArgsList;
using UnwrappedArgsList =
- internal::MakeUnwrappedTypeList<true, FunctorTraits::is_method,
+ internal::MakeUnwrappedTypeList<kIsOnce, FunctorTraits::is_method,
Args&&...>;
using BoundParamsList = typename Helper::BoundParamsList;
static_assert(internal::AssertBindArgsValidity<
@@ -209,13 +223,13 @@ BindOnce(Functor&& functor, Args&&... args) {
using BindState = internal::MakeBindStateType<Functor, Args...>;
using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
using Invoker = internal::Invoker<BindState, UnboundRunType>;
- using CallbackType = OnceCallback<UnboundRunType>;
+ using CallbackType = CallbackT<UnboundRunType>;
// Store the invoke func into PolymorphicInvoke before casting it to
// InvokeFuncStorage, so that we can ensure its type matches to
// PolymorphicInvoke, to which CallbackType will cast back.
using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
- PolymorphicInvoke invoke_func = &Invoker::RunOnce;
+ PolymorphicInvoke invoke_func = InvokeFuncImpl<kIsOnce, Invoker>::Value;
using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
return CallbackType(BindState::Create(
@@ -223,6 +237,23 @@ BindOnce(Functor&& functor, Args&&... args) {
std::forward<Functor>(functor), std::forward<Args>(args)...));
}
+} // namespace internal
+
+// Bind as OnceCallback.
+template <typename Functor, typename... Args>
+inline OnceCallback<MakeUnboundRunType<Functor, Args...>> BindOnce(
+ Functor&& functor,
+ Args&&... args) {
+ static_assert(!internal::IsOnceCallback<std::decay_t<Functor>>() ||
+ (std::is_rvalue_reference<Functor&&>() &&
+ !std::is_const<std::remove_reference_t<Functor>>()),
+ "BindOnce requires non-const rvalue for OnceCallback binding."
+ " I.e.: base::BindOnce(std::move(callback)).");
+
+ return internal::BindImpl<OnceCallback>(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
+}
+
// Bind as RepeatingCallback.
template <typename Functor, typename... Args>
inline RepeatingCallback<MakeUnboundRunType<Functor, Args...>>
@@ -231,36 +262,8 @@ BindRepeating(Functor&& functor, Args&&... args) {
!internal::IsOnceCallback<std::decay_t<Functor>>(),
"BindRepeating cannot bind OnceCallback. Use BindOnce with std::move().");
- // This block checks if each |args| matches to the corresponding params of the
- // target function. This check does not affect the behavior of Bind, but its
- // error message should be more readable.
- using Helper = internal::BindTypeHelper<Functor, Args...>;
- using FunctorTraits = typename Helper::FunctorTraits;
- using BoundArgsList = typename Helper::BoundArgsList;
- using UnwrappedArgsList =
- internal::MakeUnwrappedTypeList<false, FunctorTraits::is_method,
- Args&&...>;
- using BoundParamsList = typename Helper::BoundParamsList;
- static_assert(internal::AssertBindArgsValidity<
- std::make_index_sequence<Helper::num_bounds>, BoundArgsList,
- UnwrappedArgsList, BoundParamsList>::ok,
- "The bound args need to be convertible to the target params.");
-
- using BindState = internal::MakeBindStateType<Functor, Args...>;
- using UnboundRunType = MakeUnboundRunType<Functor, Args...>;
- using Invoker = internal::Invoker<BindState, UnboundRunType>;
- using CallbackType = RepeatingCallback<UnboundRunType>;
-
- // Store the invoke func into PolymorphicInvoke before casting it to
- // InvokeFuncStorage, so that we can ensure its type matches to
- // PolymorphicInvoke, to which CallbackType will cast back.
- using PolymorphicInvoke = typename CallbackType::PolymorphicInvoke;
- PolymorphicInvoke invoke_func = &Invoker::Run;
-
- using InvokeFuncStorage = internal::BindStateBase::InvokeFuncStorage;
- return CallbackType(BindState::Create(
- reinterpret_cast<InvokeFuncStorage>(invoke_func),
- std::forward<Functor>(functor), std::forward<Args>(args)...));
+ return internal::BindImpl<RepeatingCallback>(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
}
// Unannotated Bind.
@@ -290,7 +293,7 @@ Callback<Signature> Bind(Callback<Signature> closure) {
return closure;
}
-// Unretained() allows Bind() to bind a non-refcounted class, and to disable
+// Unretained() allows binding a non-refcounted class, and to disable
// refcounting on arguments that are refcounted objects.
//
// EXAMPLE OF Unretained():
@@ -302,9 +305,9 @@ Callback<Signature> Bind(Callback<Signature> closure) {
//
// // In some function somewhere.
// Foo foo;
-// Closure foo_callback =
-// Bind(&Foo::func, Unretained(&foo));
-// foo_callback.Run(); // Prints "Foo:f".
+// OnceClosure foo_callback =
+// BindOnce(&Foo::func, Unretained(&foo));
+// std::move(foo_callback).Run(); // Prints "Foo:f".
//
// Without the Unretained() wrapper on |&foo|, the above call would fail
// to compile because Foo does not support the AddRef() and Release() methods.
@@ -321,13 +324,13 @@ static inline internal::UnretainedWrapper<T> Unretained(T* o) {
// void foo(RefCountedBytes* bytes) {}
//
// scoped_refptr<RefCountedBytes> bytes = ...;
-// Closure callback = Bind(&foo, base::RetainedRef(bytes));
-// callback.Run();
+// OnceClosure callback = BindOnce(&foo, base::RetainedRef(bytes));
+// std::move(callback).Run();
//
// Without RetainedRef, the scoped_refptr would try to implicitly convert to
// a raw pointer and fail compilation:
//
-// Closure callback = Bind(&foo, bytes); // ERROR!
+// OnceClosure callback = BindOnce(&foo, bytes); // ERROR!
template <typename T>
static inline internal::RetainedRefWrapper<T> RetainedRef(T* o) {
return internal::RetainedRefWrapper<T>(o);
@@ -337,26 +340,26 @@ static inline internal::RetainedRefWrapper<T> RetainedRef(scoped_refptr<T> o) {
return internal::RetainedRefWrapper<T>(std::move(o));
}
-// Owned() transfers ownership of an object to the Callback resulting from
-// bind; the object will be deleted when the Callback is deleted.
+// Owned() transfers ownership of an object to the callback resulting from
+// bind; the object will be deleted when the callback is deleted.
//
// EXAMPLE OF Owned():
//
// void foo(int* arg) { cout << *arg << endl }
//
// int* pn = new int(1);
-// Closure foo_callback = Bind(&foo, Owned(pn));
+// RepeatingClosure foo_callback = BindRepeating(&foo, Owned(pn));
//
// foo_callback.Run(); // Prints "1"
// foo_callback.Run(); // Prints "1"
-// *n = 2;
+// *pn = 2;
// foo_callback.Run(); // Prints "2"
//
// foo_callback.Reset(); // |pn| is deleted. Also will happen when
// // |foo_callback| goes out of scope.
//
// Without Owned(), someone would have to know to delete |pn| when the last
-// reference to the Callback is deleted.
+// reference to the callback is deleted.
template <typename T>
static inline internal::OwnedWrapper<T> Owned(T* o) {
return internal::OwnedWrapper<T>(o);
@@ -368,9 +371,9 @@ static inline internal::OwnedWrapper<T> Owned(std::unique_ptr<T>&& ptr) {
}
// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
-// through a Callback. Logically, this signifies a destructive transfer of
-// the state of the argument into the target function. Invoking
-// Callback::Run() twice on a Callback that was created with a Passed()
+// through a RepeatingCallback. Logically, this signifies a destructive transfer
+// of the state of the argument into the target function. Invoking
+// RepeatingCallback::Run() twice on a callback that was created with a Passed()
// argument will CHECK() because the first invocation would have already
// transferred ownership to the target function.
//
@@ -387,22 +390,22 @@ static inline internal::OwnedWrapper<T> Owned(std::unique_ptr<T>&& ptr) {
//
// // |cb| is given ownership of Foo(). |f| is now NULL.
// // You can use std::move(f) in place of &f, but it's more verbose.
-// Closure cb = Bind(&TakesOwnership, Passed(&f));
+// RepeatingClosure cb = BindRepeating(&TakesOwnership, Passed(&f));
//
// // Run was never called so |cb| still owns Foo() and deletes
// // it on Reset().
// cb.Reset();
//
// // |cb| is given a new Foo created by CreateFoo().
-// cb = Bind(&TakesOwnership, Passed(CreateFoo()));
+// cb = BindRepeating(&TakesOwnership, Passed(CreateFoo()));
//
// // |arg| in TakesOwnership() is given ownership of Foo(). |cb|
// // no longer owns Foo() and, if reset, would not delete Foo().
// cb.Run(); // Foo() is now transferred to |arg| and deleted.
// cb.Run(); // This CHECK()s since Foo() already been used once.
//
-// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and
-// is best suited for use with the return value of a function or other temporary
+// We offer 2 syntaxes for calling Passed(). The first takes an rvalue and is
+// best suited for use with the return value of a function or other temporary
// rvalues. The second takes a pointer to the scoper and is just syntactic sugar
// to avoid having to write Passed(std::move(scoper)).
//
@@ -418,21 +421,21 @@ static inline internal::PassedWrapper<T> Passed(T* scoper) {
return internal::PassedWrapper<T>(std::move(*scoper));
}
-// IgnoreResult() is used to adapt a function or Callback with a return type to
+// IgnoreResult() is used to adapt a function or callback with a return type to
// one with a void return. This is most useful if you have a function with,
// say, a pesky ignorable bool return that you want to use with PostTask or
-// something else that expect a Callback with a void return.
+// something else that expect a callback with a void return.
//
// EXAMPLE OF IgnoreResult():
//
// int DoSomething(int arg) { cout << arg << endl; }
//
-// // Assign to a Callback with a void return type.
-// Callback<void(int)> cb = Bind(IgnoreResult(&DoSomething));
-// cb->Run(1); // Prints "1".
+// // Assign to a callback with a void return type.
+// OnceCallback<void(int)> cb = BindOnce(IgnoreResult(&DoSomething));
+// std::move(cb).Run(1); // Prints "1".
//
-// // Prints "1" on |ml|.
-// ml->PostTask(FROM_HERE, BindOnce(IgnoreResult(&DoSomething), 1);
+// // Prints "2" on |ml|.
+// ml->PostTask(FROM_HERE, BindOnce(IgnoreResult(&DoSomething), 2);
template <typename T>
static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
return internal::IgnoreResultHelper<T>(std::move(data));
@@ -447,8 +450,9 @@ static inline internal::IgnoreResultHelper<T> IgnoreResult(T data) {
// EXAMPLE OF RetainBlock():
//
// // Wrap the block and bind it to a callback.
-// Callback<void(int)> cb = Bind(RetainBlock(^(int n) { NSLog(@"%d", n); }));
-// cb.Run(1); // Logs "1".
+// OnceCallback<void(int)> cb =
+// BindOnce(RetainBlock(^(int n) { NSLog(@"%d", n); }));
+// std::move(cb).Run(1); // Logs "1".
template <typename R, typename... Args>
base::mac::ScopedBlock<R (^)(Args...)> RetainBlock(R (^block)(Args...)) {
return base::mac::ScopedBlock<R (^)(Args...)>(block,
diff --git a/chromium/base/bind_helpers.h b/chromium/base/bind_helpers.h
index 15961e6059e..37065a07ab5 100644
--- a/chromium/base/bind_helpers.h
+++ b/chromium/base/bind_helpers.h
@@ -16,7 +16,7 @@
#include "build/build_config.h"
// This defines a set of simple functions and utilities that people want when
-// using Callback<> and Bind().
+// using {Once,Repeating}Callback<> and Bind{Once,Repeating}().
namespace base {
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index 4dd03e5803a..9bf85aba898 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -311,7 +311,7 @@ int FunctionWithScopedRefptrFirstParam(const scoped_refptr<HasRef>& o, int n) {
return n;
}
-void TakesACallback(const Closure& callback) {
+void TakesACallback(const RepeatingClosure& callback) {
callback.Run();
}
@@ -363,31 +363,32 @@ int IntFunc0() {
}
TEST_F(BindTest, BasicTest) {
- Callback<int(int, int, int)> cb = Bind(&Sum, 32, 16, 8);
+ RepeatingCallback<int(int, int, int)> cb = BindRepeating(&Sum, 32, 16, 8);
EXPECT_EQ(92, cb.Run(13, 12, 11));
- Callback<int(int, int, int, int, int, int)> c1 = Bind(&Sum);
+ RepeatingCallback<int(int, int, int, int, int, int)> c1 = BindRepeating(&Sum);
EXPECT_EQ(69, c1.Run(14, 13, 12, 11, 10, 9));
- Callback<int(int, int, int)> c2 = Bind(c1, 32, 16, 8);
+ RepeatingCallback<int(int, int, int)> c2 = BindRepeating(c1, 32, 16, 8);
EXPECT_EQ(86, c2.Run(11, 10, 9));
- Callback<int()> c3 = Bind(c2, 4, 2, 1);
+ RepeatingCallback<int()> c3 = BindRepeating(c2, 4, 2, 1);
EXPECT_EQ(63, c3.Run());
}
-// Test that currying the rvalue result of another Bind() works correctly.
-// - rvalue should be usable as argument to Bind().
-// - multiple runs of resulting Callback remain valid.
+// Test that currying the rvalue result of another BindRepeating() works
+// correctly.
+// - rvalue should be usable as argument to BindRepeating().
+// - multiple runs of resulting RepeatingCallback remain valid.
TEST_F(BindTest, CurryingRvalueResultOfBind) {
int n = 0;
RepeatingClosure cb = BindRepeating(&TakesACallback,
BindRepeating(&PtrArgSet, &n));
- // If we implement Bind() such that the return value has auto_ptr-like
- // semantics, the second call here will fail because ownership of
- // the internal BindState<> would have been transfered to a *temporary*
- // constructon of a Callback object on the first call.
+ // If we implement BindRepeating() such that the return value has
+ // auto_ptr-like semantics, the second call here will fail because ownership
+ // of the internal BindState<> would have been transferred to a *temporary*
+ // construction of a RepeatingCallback object on the first call.
cb.Run();
EXPECT_EQ(2, n);
@@ -633,8 +634,8 @@ TEST_F(BindTest, WeakPtrForOnce) {
BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
OnceClosure const_method_const_ptr_cb =
BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
- Callback<int(int)> normal_func_cb =
- Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+ OnceCallback<int(int)> normal_func_cb =
+ BindOnce(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
weak_factory.InvalidateWeakPtrs();
const_weak_factory.InvalidateWeakPtrs();
@@ -1056,7 +1057,8 @@ TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
int deletes = 0;
TypeParam ptr(new DeleteCounter(&deletes));
- Callback<TypeParam()> callback = Bind(&PassThru<TypeParam>, Passed(&ptr));
+ RepeatingCallback<TypeParam()> callback =
+ BindRepeating(&PassThru<TypeParam>, Passed(&ptr));
EXPECT_FALSE(ptr.get());
EXPECT_EQ(0, deletes);
@@ -1067,7 +1069,7 @@ TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
int deletes = 0;
- Callback<TypeParam()> callback = Bind(
+ RepeatingCallback<TypeParam()> callback = BindRepeating(
&PassThru<TypeParam>, Passed(TypeParam(new DeleteCounter(&deletes))));
EXPECT_EQ(0, deletes);
@@ -1080,8 +1082,8 @@ TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
TYPED_TEST(BindMoveOnlyTypeTest, ReturnMoveOnlyType) {
int deletes = 0;
DeleteCounter* counter = new DeleteCounter(&deletes);
- Callback<TypeParam()> callback =
- Bind(&PassThru<TypeParam>, Passed(TypeParam(counter)));
+ RepeatingCallback<TypeParam()> callback =
+ BindRepeating(&PassThru<TypeParam>, Passed(TypeParam(counter)));
TypeParam result = callback.Run();
ASSERT_EQ(counter, result.get());
EXPECT_EQ(0, deletes);
@@ -1099,7 +1101,8 @@ TYPED_TEST(BindMoveOnlyTypeTest, UnboundForwarding) {
int deletes = 0;
TypeParam ptr(new DeleteCounter(&deletes));
// Test unbound argument forwarding.
- Callback<TypeParam(TypeParam)> cb_unbound = Bind(&PassThru<TypeParam>);
+ RepeatingCallback<TypeParam(TypeParam)> cb_unbound =
+ BindRepeating(&PassThru<TypeParam>);
cb_unbound.Run(std::move(ptr));
EXPECT_EQ(1, deletes);
}
@@ -1123,14 +1126,14 @@ TEST_F(BindTest, BindMoveOnlyVector) {
v.push_back(std::make_unique<int>(12345));
// Early binding should work:
- base::Callback<MoveOnlyVector()> bound_cb =
- base::Bind(&AcceptAndReturnMoveOnlyVector, Passed(&v));
+ base::RepeatingCallback<MoveOnlyVector()> bound_cb =
+ base::BindRepeating(&AcceptAndReturnMoveOnlyVector, Passed(&v));
MoveOnlyVector intermediate_result = bound_cb.Run();
VerifyVector(intermediate_result);
// As should passing it as an argument to Run():
- base::Callback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
- base::Bind(&AcceptAndReturnMoveOnlyVector);
+ base::RepeatingCallback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
+ base::BindRepeating(&AcceptAndReturnMoveOnlyVector);
MoveOnlyVector final_result = unbound_cb.Run(std::move(intermediate_result));
VerifyVector(final_result);
}
@@ -1311,8 +1314,8 @@ TEST_F(BindTest, CapturelessLambda) {
EXPECT_EQ(42, Bind([](int i) { return i * 7; }, 6).Run());
int x = 1;
- base::Callback<void(int)> cb =
- Bind([](int* x, int i) { *x *= i; }, Unretained(&x));
+ base::RepeatingCallback<void(int)> cb =
+ BindRepeating([](int* x, int i) { *x *= i; }, Unretained(&x));
cb.Run(6);
EXPECT_EQ(6, x);
cb.Run(7);
@@ -1496,10 +1499,10 @@ int __stdcall StdCallFunc(int n) {
// - Can bind a __fastcall function.
// - Can bind a __stdcall function.
TEST_F(BindTest, WindowsCallingConventions) {
- Callback<int()> fastcall_cb = Bind(&FastCallFunc, 1);
+ RepeatingCallback<int()> fastcall_cb = BindRepeating(&FastCallFunc, 1);
EXPECT_EQ(1, fastcall_cb.Run());
- Callback<int()> stdcall_cb = Bind(&StdCallFunc, 2);
+ RepeatingCallback<int()> stdcall_cb = BindRepeating(&StdCallFunc, 2);
EXPECT_EQ(2, stdcall_cb.Run());
}
#endif
@@ -1565,9 +1568,9 @@ TEST_F(BindTest, BindNoexcept) {
// Test null callbacks cause a DCHECK.
TEST(BindDeathTest, NullCallback) {
- base::Callback<void(int)> null_cb;
+ base::RepeatingCallback<void(int)> null_cb;
ASSERT_TRUE(null_cb.is_null());
- EXPECT_DCHECK_DEATH(base::Bind(null_cb, 42));
+ EXPECT_DCHECK_DEATH(base::BindRepeating(null_cb, 42));
}
TEST(BindDeathTest, BanFirstOwnerOfRefCountedType) {
diff --git a/chromium/base/bind_unittest.nc b/chromium/base/bind_unittest.nc
index 349d38bd6d3..77a9a9ce759 100644
--- a/chromium/base/bind_unittest.nc
+++ b/chromium/base/bind_unittest.nc
@@ -86,8 +86,8 @@ struct NonEmptyFunctor {
void WontCompile() {
HasRef has_ref;
const HasRef* const_has_ref_ptr_ = &has_ref;
- Callback<void()> method_to_const_cb =
- Bind(&HasRef::VoidMethod0, const_has_ref_ptr_);
+ RepeatingCallback<void()> method_to_const_cb =
+ BindRepeating(&HasRef::VoidMethod0, const_has_ref_ptr_);
method_to_const_cb.Run();
}
@@ -99,8 +99,8 @@ void WontCompile() {
// We require refcounts unless you have Unretained().
void WontCompile() {
NoRef no_ref;
- Callback<void()> no_ref_cb =
- Bind(&NoRef::VoidMethod0, &no_ref);
+ RepeatingCallback<void()> no_ref_cb =
+ BindRepeating(&NoRef::VoidMethod0, &no_ref);
no_ref_cb.Run();
}
@@ -111,8 +111,8 @@ void WontCompile() {
// We require refcounts unless you have Unretained().
void WontCompile() {
NoRef no_ref;
- Callback<void()> no_ref_const_cb =
- Bind(&NoRef::VoidConstMethod0, &no_ref);
+ RepeatingCallback<void()> no_ref_const_cb =
+ BindRepeating(&NoRef::VoidConstMethod0, &no_ref);
no_ref_const_cb.Run();
}
@@ -123,8 +123,8 @@ void WontCompile() {
// This is just a const-correctness check.
void WontCompile() {
const NoRef* const_no_ref_ptr;
- Callback<NoRef*()> pointer_same_cb =
- Bind(&PolymorphicIdentity<NoRef*>, const_no_ref_ptr);
+ RepeatingCallback<NoRef*()> pointer_same_cb =
+ BindRepeating(&PolymorphicIdentity<NoRef*>, const_no_ref_ptr);
pointer_same_cb.Run();
}
@@ -135,8 +135,8 @@ void WontCompile() {
// This is just a const-correctness check.
void WontCompile() {
const NoRefChild* const_child_ptr;
- Callback<NoRefParent*()> pointer_super_cb =
- Bind(&PolymorphicIdentity<NoRefParent*>, const_child_ptr);
+ RepeatingCallback<NoRefParent*()> pointer_super_cb =
+ BindRepeating(&PolymorphicIdentity<NoRefParent*>, const_child_ptr);
pointer_super_cb.Run();
}
@@ -153,7 +153,7 @@ void WontCompile() {
// accidentally have the function be modifying a temporary, or a copy.
void WontCompile() {
Parent p;
- Callback<int(Parent&)> ref_arg_cb = Bind(&UnwrapParentRef);
+ RepeatingCallback<int(Parent&)> ref_arg_cb = BindRepeating(&UnwrapParentRef);
ref_arg_cb.Run(p);
}
@@ -164,7 +164,7 @@ void WontCompile() {
// See comment in NCTEST_DISALLOW_NON_CONST_REF_PARAM
void WontCompile() {
Parent p;
- Callback<int()> ref_cb = Bind(&UnwrapParentRef, p);
+ RepeatingCallback<int()> ref_cb = BindRepeating(&UnwrapParentRef, p);
ref_cb.Run();
}
@@ -177,8 +177,8 @@ void WontCompile() {
// implicitly convert an array type to a pointer type.
void WontCompile() {
HasRef p[10];
- Callback<void()> method_bound_to_array_cb =
- Bind(&HasRef::VoidMethod0, p);
+ RepeatingCallback<void()> method_bound_to_array_cb =
+ BindRepeating(&HasRef::VoidMethod0, p);
method_bound_to_array_cb.Run();
}
@@ -188,10 +188,10 @@ void WontCompile() {
void WontCompile() {
HasRef for_raw_ptr;
int a;
- Callback<void()> ref_count_as_raw_ptr_a =
- Bind(&VoidPolymorphic1<int*>, &a);
- Callback<void()> ref_count_as_raw_ptr =
- Bind(&VoidPolymorphic1<HasRef*>, &for_raw_ptr);
+ RepeatingCallback<void()> ref_count_as_raw_ptr_a =
+ BindRepeating(&VoidPolymorphic1<int*>, &a);
+ RepeatingCallback<void()> ref_count_as_raw_ptr =
+ BindRepeating(&VoidPolymorphic1<HasRef*>, &for_raw_ptr);
}
#elif defined(NCTEST_NO_LVALUE_RAW_PTR_FOR_REFCOUNTED_TYPES) // [r"fatal error: static_assert failed due to requirement '!HasRefCountedTypeAsRawPtr<base::HasRef \*>::value' \"A parameter is a refcounted type and needs scoped_refptr.\""]
@@ -199,8 +199,8 @@ void WontCompile() {
// Refcounted types should not be bound as a raw pointer.
void WontCompile() {
HasRef* for_raw_ptr = nullptr;
- Callback<void()> ref_count_as_raw_ptr =
- Bind(&VoidPolymorphic1<HasRef*>, for_raw_ptr);
+ RepeatingCallback<void()> ref_count_as_raw_ptr =
+ BindRepeating(&VoidPolymorphic1<HasRef*>, for_raw_ptr);
}
#elif defined(NCTEST_NO_RVALUE_CONST_RAW_PTR_FOR_REFCOUNTED_TYPES) // [r"fatal error: static_assert failed due to requirement '!HasRefCountedTypeAsRawPtr<const base::HasRef \*>::value' \"A parameter is a refcounted type and needs scoped_refptr.\""]
@@ -208,8 +208,8 @@ void WontCompile() {
// Refcounted types should not be bound as a raw pointer.
void WontCompile() {
const HasRef for_raw_ptr;
- Callback<void()> ref_count_as_raw_ptr =
- Bind(&VoidPolymorphic1<const HasRef*>, &for_raw_ptr);
+ RepeatingCallback<void()> ref_count_as_raw_ptr =
+ BindRepeating(&VoidPolymorphic1<const HasRef*>, &for_raw_ptr);
}
#elif defined(NCTEST_NO_LVALUE_CONST_RAW_PTR_FOR_REFCOUNTED_TYPES) // [r"fatal error: static_assert failed due to requirement '!HasRefCountedTypeAsRawPtr<const base::HasRef \*>::value' \"A parameter is a refcounted type and needs scoped_refptr.\""]
@@ -217,8 +217,8 @@ void WontCompile() {
// Refcounted types should not be bound as a raw pointer.
void WontCompile() {
const HasRef* for_raw_ptr = nullptr;
- Callback<void()> ref_count_as_raw_ptr =
- Bind(&VoidPolymorphic1<const HasRef*>, for_raw_ptr);
+ RepeatingCallback<void()> ref_count_as_raw_ptr =
+ BindRepeating(&VoidPolymorphic1<const HasRef*>, for_raw_ptr);
}
#elif defined(NCTEST_WEAKPTR_BIND_MUST_RETURN_VOID) // [r"fatal error: static_assert failed due to requirement 'std::is_void<int>::value' \"weak_ptrs can only bind to methods without return values\""]
@@ -227,50 +227,51 @@ void WontCompile() {
void WontCompile() {
NoRef no_ref;
WeakPtrFactory<NoRef> weak_factory(&no_ref);
- Callback<int()> weak_ptr_with_non_void_return_type =
- Bind(&NoRef::IntMethod0, weak_factory.GetWeakPtr());
+ RepeatingCallback<int()> weak_ptr_with_non_void_return_type =
+ BindRepeating(&NoRef::IntMethod0, weak_factory.GetWeakPtr());
weak_ptr_with_non_void_return_type.Run();
}
-#elif defined(NCTEST_DISALLOW_ASSIGN_DIFFERENT_TYPES) // [r"fatal error: no viable conversion from 'Callback<MakeUnboundRunType<void \(\*\)\(int\)>>' to 'Callback<void \(\)>'"]
+#elif defined(NCTEST_DISALLOW_ASSIGN_DIFFERENT_TYPES) // [r"fatal error: no viable conversion from 'RepeatingCallback<MakeUnboundRunType<void \(\*\)\(int\)>>' to 'RepeatingCallback<void \(\)>'"]
// Bind result cannot be assigned to Callbacks with a mismatching type.
void WontCompile() {
- Closure callback_mismatches_bind_type = Bind(&VoidPolymorphic1<int>);
+ RepeatingClosure callback_mismatches_bind_type =
+ BindRepeating(&VoidPolymorphic1<int>);
}
#elif defined(NCTEST_DISALLOW_CAPTURING_LAMBDA) // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<\(lambda at (\.\./)+base/bind_unittest.nc:[0-9]+:[0-9]+\), void>'"]
void WontCompile() {
int i = 0, j = 0;
- Bind([i,&j]() {j = i;});
+ BindOnce([i,&j]() {j = i;});
}
#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_LVALUE) // [r"fatal error: static_assert failed due to requirement '!sizeof \(\*this\)' \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\).Run\(\).\""]
void WontCompile() {
- OnceClosure cb = Bind([] {});
+ OnceClosure cb = BindOnce([] {});
cb.Run();
}
#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_LVALUE) // [r"fatal error: static_assert failed due to requirement '!sizeof \(\*this\)' \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\).Run\(\).\""]
void WontCompile() {
- const OnceClosure cb = Bind([] {});
+ const OnceClosure cb = BindOnce([] {});
cb.Run();
}
#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_RVALUE) // [r"fatal error: static_assert failed due to requirement '!sizeof \(\*this\)' \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\).Run\(\).\""]
void WontCompile() {
- const OnceClosure cb = Bind([] {});
+ const OnceClosure cb = BindOnce([] {});
std::move(cb).Run();
}
#elif defined(NCTEST_DISALLOW_BIND_ONCECALLBACK) // [r"fatal error: static_assert failed due to requirement '!base::internal::IsOnceCallback<base::OnceCallback<void \(int\)> >\(\)' \"BindRepeating cannot bind OnceCallback. Use BindOnce with std::move\(\).\""]
void WontCompile() {
- Bind(BindOnce([](int) {}), 42);
+ BindRepeating(BindOnce([](int) {}), 42);
}
#elif defined(NCTEST_DISALLOW_BINDONCE_LVALUE_ONCECALLBACK) // [r"fatal error: static_assert failed due to requirement '!internal::IsOnceCallback<std::decay_t<OnceCallback<void (int)> &> >() || (std::is_rvalue_reference<OnceCallback<void (int)> &>() && !std::is_const<std::remove_reference_t<OnceCallback<void (int)> &> >())' \"BindOnce requires non-const rvalue for OnceCallback binding. I.e.: base::BindOnce(std::move(callback)).\""]
@@ -297,20 +298,20 @@ void WontCompile() {
void WontCompile() {
std::unique_ptr<int> x;
- Bind(&TakesMoveOnly, x);
+ BindRepeating(&TakesMoveOnly, x);
}
#elif defined(NCTEST_BIND_MOVEONLY_TYPE_WITH_STDMOVE) // [r"Bound argument \|i\| is move-only but will be forwarded by copy\. Ensure \|Arg\| is bound using base::Passed\(\), not std::move\(\)."]
void WontCompile() {
std::unique_ptr<int> x;
- Bind(&TakesMoveOnly, std::move(x));
+ BindRepeating(&TakesMoveOnly, std::move(x));
}
#elif defined(NCTEST_BIND_NON_EMPTY_FUNCTOR) // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<base::NonEmptyFunctor, void>'"]
void WontCompile() {
- Bind(NonEmptyFunctor());
+ BindRepeating(NonEmptyFunctor());
}
#elif defined(NCTEST_DISALLOW_BINDLAMBDAFORTESTING_LVALUE_MUTABLE_LAMBDA) // [r"BindLambdaForTesting requires non-const rvalue for mutable lambda binding\. I\.e\.: base::BindLambdaForTesting\(std::move\(lambda\)\)."]
diff --git a/chromium/base/build_time.cc b/chromium/base/build_time.cc
index 834b0412018..c7462b15fb1 100644
--- a/chromium/base/build_time.cc
+++ b/chromium/base/build_time.cc
@@ -15,8 +15,8 @@ namespace base {
Time GetBuildTime() {
Time integral_build_time;
// BUILD_DATE is exactly "Mmm DD YYYY HH:MM:SS".
- // See //build/write_build_date_header.py. "HH:MM:SS" is normally expected to
- // be "05:00:00" but is not enforced here.
+ // See //build/write_build_date_header.py. "HH:MM:SS" is expected to
+ // be "05:00:00" in non-official builds but is not enforced here.
bool result = Time::FromUTCString(BUILD_DATE, &integral_build_time);
DCHECK(result);
return integral_build_time;
diff --git a/chromium/base/build_time_unittest.cc b/chromium/base/build_time_unittest.cc
index a9cc44590d1..f032788a63d 100644
--- a/chromium/base/build_time_unittest.cc
+++ b/chromium/base/build_time_unittest.cc
@@ -15,14 +15,20 @@ TEST(BuildTime, DateLooksValid) {
EXPECT_EQ(' ', build_date[3]);
EXPECT_EQ(' ', build_date[6]);
EXPECT_EQ(' ', build_date[11]);
+#if !defined(OFFICIAL_BUILD)
EXPECT_EQ('0', build_date[12]);
EXPECT_EQ('5', build_date[13]);
+#endif
EXPECT_EQ(':', build_date[14]);
+#if !defined(OFFICIAL_BUILD)
EXPECT_EQ('0', build_date[15]);
EXPECT_EQ('0', build_date[16]);
+#endif
EXPECT_EQ(':', build_date[17]);
+#if !defined(OFFICIAL_BUILD)
EXPECT_EQ('0', build_date[18]);
EXPECT_EQ('0', build_date[19]);
+#endif
}
TEST(BuildTime, InThePast) {
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index 4aad5e8ee5d..e08f9b872b7 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -2,8 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
-// NOTE: Header files that do not require the full definition of Callback or
-// Closure should #include "base/callback_forward.h" instead of this file.
+// NOTE: Header files that do not require the full definition of
+// base::{Once,Repeating}Callback or base::{Once,Repeating}Closure should
+// #include "base/callback_forward.h" instead of this file.
#ifndef BASE_CALLBACK_H_
#define BASE_CALLBACK_H_
@@ -42,7 +43,7 @@
//
// Callbacks also support cancellation. A common use is binding the receiver
// object as a WeakPtr<T>. If that weak pointer is invalidated, calling Run()
-// will be a no-op. Note that |is_cancelled()| and |is_null()| are distinct:
+// will be a no-op. Note that |IsCancelled()| and |is_null()| are distinct:
// simply cancelling a callback will not also make it null.
//
// base::Callback is currently a type alias for base::RepeatingCallback. In the
@@ -125,11 +126,6 @@ class RepeatingCallback<R(Args...)> : public internal::CallbackBaseCopyable {
return !operator==(other);
}
- // TODO(http://crbug.com/937566): Deprecated, use == or != instead.
- bool Equals(const RepeatingCallback& other) const {
- return EqualsInternal(other);
- }
-
R Run(Args... args) const & {
PolymorphicInvoke f =
reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke());
diff --git a/chromium/base/callback_helpers_unittest.cc b/chromium/base/callback_helpers_unittest.cc
index 5a7e2c320c1..00b2ef59ebe 100644
--- a/chromium/base/callback_helpers_unittest.cc
+++ b/chromium/base/callback_helpers_unittest.cc
@@ -50,7 +50,7 @@ void Increment(int* value) {
TEST(CallbackHelpersTest, TestScopedClosureRunnerExitScope) {
int run_count = 0;
{
- base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+ base::ScopedClosureRunner runner(base::BindOnce(&Increment, &run_count));
EXPECT_EQ(0, run_count);
}
EXPECT_EQ(1, run_count);
@@ -60,7 +60,7 @@ TEST(CallbackHelpersTest, TestScopedClosureRunnerRelease) {
int run_count = 0;
base::OnceClosure c;
{
- base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count));
+ base::ScopedClosureRunner runner(base::BindOnce(&Increment, &run_count));
c = runner.Release();
EXPECT_EQ(0, run_count);
}
@@ -74,8 +74,8 @@ TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
int run_count_2 = 0;
{
base::ScopedClosureRunner runner;
- runner.ReplaceClosure(base::Bind(&Increment, &run_count_1));
- runner.ReplaceClosure(base::Bind(&Increment, &run_count_2));
+ runner.ReplaceClosure(base::BindOnce(&Increment, &run_count_1));
+ runner.ReplaceClosure(base::BindOnce(&Increment, &run_count_2));
EXPECT_EQ(0, run_count_1);
EXPECT_EQ(0, run_count_2);
}
@@ -86,7 +86,7 @@ TEST(CallbackHelpersTest, TestScopedClosureRunnerReplaceClosure) {
TEST(CallbackHelpersTest, TestScopedClosureRunnerRunAndReset) {
int run_count_3 = 0;
{
- base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_3));
+ base::ScopedClosureRunner runner(base::BindOnce(&Increment, &run_count_3));
EXPECT_EQ(0, run_count_3);
runner.RunAndReset();
EXPECT_EQ(1, run_count_3);
@@ -98,7 +98,7 @@ TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveConstructor) {
int run_count = 0;
{
std::unique_ptr<base::ScopedClosureRunner> runner(
- new base::ScopedClosureRunner(base::Bind(&Increment, &run_count)));
+ new base::ScopedClosureRunner(base::BindOnce(&Increment, &run_count)));
base::ScopedClosureRunner runner2(std::move(*runner));
runner.reset();
EXPECT_EQ(0, run_count);
@@ -110,9 +110,10 @@ TEST(CallbackHelpersTest, TestScopedClosureRunnerMoveAssignment) {
int run_count_1 = 0;
int run_count_2 = 0;
{
- base::ScopedClosureRunner runner(base::Bind(&Increment, &run_count_1));
+ base::ScopedClosureRunner runner(base::BindOnce(&Increment, &run_count_1));
{
- base::ScopedClosureRunner runner2(base::Bind(&Increment, &run_count_2));
+ base::ScopedClosureRunner runner2(
+ base::BindOnce(&Increment, &run_count_2));
runner = std::move(runner2);
EXPECT_EQ(0, run_count_1);
EXPECT_EQ(0, run_count_2);
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index 390461c9aa5..777397d88a3 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -19,9 +19,12 @@ struct FakeBindState;
namespace internal {
+class BindStateBase;
class FinallyExecutorCommon;
class ThenAndCatchExecutorCommon;
-class BindStateBase;
+
+template <typename ReturnType>
+class PostTaskExecutor;
template <typename Functor, typename... BoundArgs>
struct BindState;
@@ -140,6 +143,9 @@ class BASE_EXPORT CallbackBase {
friend class FinallyExecutorCommon;
friend class ThenAndCatchExecutorCommon;
+ template <typename ReturnType>
+ friend class PostTaskExecutor;
+
using InvokeFuncStorage = BindStateBase::InvokeFuncStorage;
// Returns true if this callback equals |other|. |other| may be null.
diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc
index d8aff7dff7b..10c1c183458 100644
--- a/chromium/base/callback_unittest.cc
+++ b/chromium/base/callback_unittest.cc
@@ -165,11 +165,15 @@ TEST_F(CallbackTest, MaybeValidInvalidateWeakPtrsOnSameSequence) {
RepeatingCallback<void()> cb =
BindRepeating(&ClassWithAMethod::TheMethod, ptr);
EXPECT_TRUE(cb.MaybeValid());
+ EXPECT_FALSE(cb.IsCancelled());
factory.InvalidateWeakPtrs();
- // MaybeValid() should be false because InvalidateWeakPtrs() was called on
- // the same thread.
+ // MaybeValid() should be false and IsCancelled() should become true because
+ // InvalidateWeakPtrs() was called on the same thread.
EXPECT_FALSE(cb.MaybeValid());
+ EXPECT_TRUE(cb.IsCancelled());
+ // is_null() is not affected by the invalidated WeakPtr.
+ EXPECT_FALSE(cb.is_null());
}
TEST_F(CallbackTest, MaybeValidInvalidateWeakPtrsOnOtherSequence) {
diff --git a/chromium/base/callback_unittest.nc b/chromium/base/callback_unittest.nc
index 32615293410..df681843433 100644
--- a/chromium/base/callback_unittest.nc
+++ b/chromium/base/callback_unittest.nc
@@ -15,36 +15,37 @@ class Parent {
class Child : Parent {
};
-#if defined(NCTEST_EQUALS_REQUIRES_SAMETYPE) // [r"fatal error: no viable conversion from 'RepeatingCallback<int \(\)>' to 'const RepeatingCallback<void \(\)>'"]
+#if defined(NCTEST_EQUALS_REQUIRES_SAMETYPE) // [r"fatal error: invalid operands to binary expression \('RepeatingCallback<void \(\)>' and 'RepeatingCallback<int \(\)>'\)"]
// Attempting to call comparison function on two callbacks of different type.
//
// This should be a compile time failure because each callback type should be
// considered distinct.
void WontCompile() {
- Closure c1;
- Callback<int()> c2;
- c1.Equals(c2);
+ RepeatingCallback<void()> c1;
+ RepeatingCallback<int()> c2;
+ c1 == c2;
}
-#elif defined(NCTEST_CONSTRUCTION_FROM_SUBTYPE) // [r"fatal error: no viable conversion from 'Callback<base::Parent \(\)>' to 'Callback<base::Child \(\)>'"]
+#elif defined(NCTEST_CONSTRUCTION_FROM_SUBTYPE) // [r"fatal error: no viable conversion from 'RepeatingCallback<base::Parent \(\)>' to 'RepeatingCallback<base::Child \(\)>'"]
-// Construction of Callback<A> from Callback<B> if A is supertype of B.
+// Construction of RepeatingCallback<A> from RepeatingCallback<B> if A is
+// supertype of B.
//
// While this is technically safe, most people aren't used to it when coding
// C++ so if this is happening, it is almost certainly an error.
void WontCompile() {
- Callback<Parent()> cb_a;
- Callback<Child()> cb_b = cb_a;
+ RepeatingCallback<Parent()> cb_a;
+ RepeatingCallback<Child()> cb_b = cb_a;
}
#elif defined(NCTEST_ASSIGNMENT_FROM_SUBTYPE) // [r"fatal error: no viable overloaded '='"]
-// Assignment of Callback<A> from Callback<B> if A is supertype of B.
-// See explanation for NCTEST_CONSTRUCTION_FROM_SUBTYPE
+// Assignment of RepeatingCallback<A> from RepeatingCallback<B> if A is
+// supertype of B. See explanation for NCTEST_CONSTRUCTION_FROM_SUBTYPE.
void WontCompile() {
- Callback<Parent()> cb_a;
- Callback<Child()> cb_b;
+ RepeatingCallback<Parent()> cb_a;
+ RepeatingCallback<Child()> cb_b;
cb_a = cb_b;
}
diff --git a/chromium/base/cancelable_callback.h b/chromium/base/cancelable_callback.h
index a98101a162d..b4c40b1175f 100644
--- a/chromium/base/cancelable_callback.h
+++ b/chromium/base/cancelable_callback.h
@@ -33,7 +33,8 @@
// run_loop.QuitWhenIdle();
// }
//
-// CancelableClosure timeout(base::Bind(&TimeoutCallback, "Test timed out."));
+// CancelableOnceClosure timeout(
+// base::BindOnce(&TimeoutCallback, "Test timed out."));
// ThreadTaskRunnerHandle::Get()->PostDelayedTask(FROM_HERE, timeout.callback(),
// TimeDelta::FromSeconds(4));
// RunIntensiveTest();
@@ -61,11 +62,11 @@ namespace internal {
template <typename CallbackType>
class CancelableCallbackImpl {
public:
- CancelableCallbackImpl() : weak_ptr_factory_(this) {}
+ CancelableCallbackImpl() {}
// |callback| must not be null.
explicit CancelableCallbackImpl(CallbackType callback)
- : callback_(std::move(callback)), weak_ptr_factory_(this) {
+ : callback_(std::move(callback)) {
DCHECK(callback_);
}
@@ -128,7 +129,7 @@ class CancelableCallbackImpl {
// The stored closure that may be cancelled.
CallbackType callback_;
- mutable base::WeakPtrFactory<CancelableCallbackImpl> weak_ptr_factory_;
+ mutable base::WeakPtrFactory<CancelableCallbackImpl> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(CancelableCallbackImpl);
};
@@ -145,7 +146,7 @@ using CancelableOnceClosure = CancelableOnceCallback<void()>;
template <typename Signature>
using CancelableRepeatingCallback =
internal::CancelableCallbackImpl<RepeatingCallback<Signature>>;
-using CancelableRepeatingClosure = CancelableOnceCallback<void()>;
+using CancelableRepeatingClosure = CancelableRepeatingCallback<void()>;
template <typename Signature>
using CancelableCallback = CancelableRepeatingCallback<Signature>;
diff --git a/chromium/base/cancelable_callback_unittest.cc b/chromium/base/cancelable_callback_unittest.cc
index da1a957caa4..da6c7bb64d5 100644
--- a/chromium/base/cancelable_callback_unittest.cc
+++ b/chromium/base/cancelable_callback_unittest.cc
@@ -38,10 +38,10 @@ void OnMoveOnlyReceived(int* value, std::unique_ptr<int> result) {
// - After Cancel(), Run() completes but has no effect.
TEST(CancelableCallbackTest, Cancel) {
int count = 0;
- CancelableClosure cancelable(
- base::Bind(&Increment, base::Unretained(&count)));
+ CancelableRepeatingClosure cancelable(
+ base::BindRepeating(&Increment, base::Unretained(&count)));
- base::Closure callback = cancelable.callback();
+ base::RepeatingClosure callback = cancelable.callback();
callback.Run();
EXPECT_EQ(1, count);
@@ -59,11 +59,11 @@ TEST(CancelableCallbackTest, Cancel) {
// - After Cancel(), callback() returns a null callback.
TEST(CancelableCallbackTest, MultipleCancel) {
int count = 0;
- CancelableClosure cancelable(
- base::Bind(&Increment, base::Unretained(&count)));
+ CancelableRepeatingClosure cancelable(
+ base::BindRepeating(&Increment, base::Unretained(&count)));
- base::Closure callback1 = cancelable.callback();
- base::Closure callback2 = cancelable.callback();
+ base::RepeatingClosure callback1 = cancelable.callback();
+ base::RepeatingClosure callback2 = cancelable.callback();
cancelable.Cancel();
callback1.Run();
@@ -76,7 +76,7 @@ TEST(CancelableCallbackTest, MultipleCancel) {
cancelable.Cancel();
// callback() of a cancelled callback is null.
- base::Closure callback3 = cancelable.callback();
+ base::RepeatingClosure callback3 = cancelable.callback();
EXPECT_TRUE(callback3.is_null());
}
@@ -84,11 +84,11 @@ TEST(CancelableCallbackTest, MultipleCancel) {
// - Destruction of CancelableCallback cancels outstanding callbacks.
TEST(CancelableCallbackTest, CallbackCanceledOnDestruction) {
int count = 0;
- base::Closure callback;
+ base::RepeatingClosure callback;
{
- CancelableClosure cancelable(
- base::Bind(&Increment, base::Unretained(&count)));
+ CancelableRepeatingClosure cancelable(
+ base::BindRepeating(&Increment, base::Unretained(&count)));
callback = cancelable.callback();
callback.Run();
@@ -105,7 +105,8 @@ TEST(CancelableCallbackTest, CancelDropsCallback) {
scoped_refptr<TestRefCounted> ref_counted = new TestRefCounted;
EXPECT_TRUE(ref_counted->HasOneRef());
- CancelableClosure cancelable(base::Bind(RefCountedParam, ref_counted));
+ CancelableOnceClosure cancelable(
+ base::BindOnce(RefCountedParam, ref_counted));
EXPECT_FALSE(cancelable.IsCancelled());
EXPECT_TRUE(ref_counted.get());
EXPECT_FALSE(ref_counted->HasOneRef());
@@ -122,10 +123,10 @@ TEST(CancelableCallbackTest, CancelDropsCallback) {
// - Reset() deactivates outstanding callbacks.
TEST(CancelableCallbackTest, Reset) {
int count = 0;
- CancelableClosure cancelable(
- base::Bind(&Increment, base::Unretained(&count)));
+ CancelableRepeatingClosure cancelable(
+ base::BindRepeating(&Increment, base::Unretained(&count)));
- base::Closure callback = cancelable.callback();
+ base::RepeatingClosure callback = cancelable.callback();
callback.Run();
EXPECT_EQ(1, count);
@@ -133,7 +134,7 @@ TEST(CancelableCallbackTest, Reset) {
EXPECT_EQ(2, count);
cancelable.Reset(
- base::Bind(&IncrementBy, base::Unretained(&count), 3));
+ base::BindRepeating(&IncrementBy, base::Unretained(&count), 3));
EXPECT_FALSE(cancelable.IsCancelled());
// The stale copy of the cancelable callback is non-null.
@@ -143,7 +144,7 @@ TEST(CancelableCallbackTest, Reset) {
callback.Run();
EXPECT_EQ(2, count);
- base::Closure callback2 = cancelable.callback();
+ base::RepeatingClosure callback2 = cancelable.callback();
ASSERT_FALSE(callback2.is_null());
callback2.Run();
@@ -153,12 +154,11 @@ TEST(CancelableCallbackTest, Reset) {
// IsCanceled().
// - Cancel() transforms the CancelableCallback into a cancelled state.
TEST(CancelableCallbackTest, IsNull) {
- CancelableClosure cancelable;
+ CancelableOnceClosure cancelable;
EXPECT_TRUE(cancelable.IsCancelled());
int count = 0;
- cancelable.Reset(base::Bind(&Increment,
- base::Unretained(&count)));
+ cancelable.Reset(base::BindOnce(&Increment, base::Unretained(&count)));
EXPECT_FALSE(cancelable.IsCancelled());
cancelable.Cancel();
@@ -171,8 +171,8 @@ TEST(CancelableCallbackTest, PostTask) {
test::ScopedTaskEnvironment scoped_task_environment;
int count = 0;
- CancelableClosure cancelable(base::Bind(&Increment,
- base::Unretained(&count)));
+ CancelableRepeatingClosure cancelable(
+ base::BindRepeating(&Increment, base::Unretained(&count)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, cancelable.callback());
RunLoop().RunUntilIdle();
@@ -194,8 +194,8 @@ TEST(CancelableCallbackTest, MoveOnlyType) {
const int kExpectedResult = 42;
int result = 0;
- CancelableCallback<void(std::unique_ptr<int>)> cb(
- base::Bind(&OnMoveOnlyReceived, base::Unretained(&result)));
+ CancelableRepeatingCallback<void(std::unique_ptr<int>)> cb(
+ base::BindRepeating(&OnMoveOnlyReceived, base::Unretained(&result)));
cb.callback().Run(std::make_unique<int>(kExpectedResult));
EXPECT_EQ(kExpectedResult, result);
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index 3f13db20a76..281b3dedcf9 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -45,10 +45,10 @@ const CommandLine::CharType* const kSwitchPrefixes[] = {"--", "-"};
#endif
size_t switch_prefix_count = size(kSwitchPrefixes);
-size_t GetSwitchPrefixLength(const CommandLine::StringType& string) {
+size_t GetSwitchPrefixLength(CommandLine::StringPieceType string) {
for (size_t i = 0; i < switch_prefix_count; ++i) {
CommandLine::StringType prefix(kSwitchPrefixes[i]);
- if (string.compare(0, prefix.length(), prefix) == 0)
+ if (string.substr(0, prefix.length()) == prefix)
return prefix.length();
}
return 0;
@@ -72,6 +72,19 @@ bool IsSwitch(const CommandLine::StringType& string,
return true;
}
+// Returns true iff |string| represents a switch with key
+// |switch_key_without_prefix|, regardless of value.
+bool IsSwitchWithKey(CommandLine::StringPieceType string,
+ CommandLine::StringPieceType switch_key_without_prefix) {
+ size_t prefix_length = GetSwitchPrefixLength(string);
+ if (prefix_length == 0 || prefix_length == string.length())
+ return false;
+
+ const size_t equals_position = string.find(kSwitchValueSeparator);
+ return string.substr(prefix_length, equals_position - prefix_length) ==
+ switch_key_without_prefix;
+}
+
// Append switches and arguments, keeping switches before arguments.
void AppendSwitchesAndArguments(CommandLine* command_line,
const CommandLine::StringVector& argv) {
@@ -286,7 +299,7 @@ void CommandLine::SetProgram(const FilePath& program) {
bool CommandLine::HasSwitch(const StringPiece& switch_string) const {
DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
- return ContainsKey(switches_, switch_string);
+ return Contains(switches_, switch_string);
}
bool CommandLine::HasSwitch(const char switch_constant[]) const {
@@ -362,9 +375,38 @@ void CommandLine::AppendSwitchASCII(const std::string& switch_string,
#endif
}
-void CommandLine::RemoveSwitch(const StringPiece& switch_string) {
- DCHECK_EQ(ToLowerASCII(switch_string), switch_string);
- switches_.erase(switch_string.as_string());
+void CommandLine::RemoveSwitch(base::StringPiece switch_key_without_prefix) {
+#if defined(OS_WIN)
+ StringType switch_key_native = base::ASCIIToUTF16(switch_key_without_prefix);
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+ StringType switch_key_native = switch_key_without_prefix.as_string();
+#endif
+
+ DCHECK_EQ(ToLowerASCII(switch_key_without_prefix), switch_key_without_prefix);
+ DCHECK_EQ(0u, GetSwitchPrefixLength(switch_key_native));
+ size_t erased_from_switches =
+ switches_.erase(switch_key_without_prefix.as_string());
+ DCHECK(erased_from_switches <= 1);
+ if (!erased_from_switches)
+ return;
+
+ // Also erase from the switches section of |argv_| and update |begin_args_|
+ // accordingly.
+ // Switches in |argv_| have indices [1, begin_args_).
+ auto argv_switches_begin = argv_.begin() + 1;
+ auto argv_switches_end = argv_.begin() + begin_args_;
+ DCHECK(argv_switches_begin <= argv_switches_end);
+ DCHECK(argv_switches_end <= argv_.end());
+ auto arg_iter = std::find_if(argv_switches_begin, argv_switches_end,
+ [&switch_key_native](const StringType& arg) {
+ return IsSwitchWithKey(arg, switch_key_native);
+ });
+ if (arg_iter == argv_switches_end) {
+ NOTREACHED();
+ return;
+ }
+ argv_.erase(arg_iter);
+ --begin_args_;
}
void CommandLine::CopySwitchesFrom(const CommandLine& source,
diff --git a/chromium/base/command_line.h b/chromium/base/command_line.h
index 75c13c25c7e..cd32efbc046 100644
--- a/chromium/base/command_line.h
+++ b/chromium/base/command_line.h
@@ -35,8 +35,10 @@ class BASE_EXPORT CommandLine {
#if defined(OS_WIN)
// The native command line string type.
using StringType = string16;
+ using StringPieceType = base::StringPiece16;
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
using StringType = std::string;
+ using StringPieceType = base::StringPiece;
#endif
using CharType = StringType::value_type;
@@ -183,8 +185,9 @@ class BASE_EXPORT CommandLine {
void AppendSwitchASCII(const std::string& switch_string,
const std::string& value);
- // Removes a switch.
- void RemoveSwitch(const StringPiece& switch_string);
+ // Removes the switch that matches |switch_key_without_prefix|, regardless of
+ // prefix and value. If no such switch is present, this has no effect.
+ void RemoveSwitch(const base::StringPiece switch_key_without_prefix);
// Copy a set of switches (and any values) from another command line.
// Commonly used when launching a subprocess.
diff --git a/chromium/base/command_line_unittest.cc b/chromium/base/command_line_unittest.cc
index 26a045b212e..d4194fce7ac 100644
--- a/chromium/base/command_line_unittest.cc
+++ b/chromium/base/command_line_unittest.cc
@@ -13,6 +13,7 @@
#include "base/strings/strcat.h"
#include "base/strings/utf_string_conversions.h"
#include "build/build_config.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -441,9 +442,9 @@ TEST(CommandLineTest, PrependComplexWrapper) {
}
TEST(CommandLineTest, RemoveSwitch) {
- std::string switch1 = "switch1";
- std::string switch2 = "switch2";
- std::string value2 = "value";
+ const std::string switch1 = "switch1";
+ const std::string switch2 = "switch2";
+ const std::string value2 = "value";
CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
@@ -453,12 +454,90 @@ TEST(CommandLineTest, RemoveSwitch) {
EXPECT_TRUE(cl.HasSwitch(switch1));
EXPECT_TRUE(cl.HasSwitch(switch2));
EXPECT_EQ(value2, cl.GetSwitchValueASCII(switch2));
+ EXPECT_THAT(cl.argv(),
+ testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--switch1"),
+ FILE_PATH_LITERAL("--switch2=value")));
cl.RemoveSwitch(switch1);
EXPECT_FALSE(cl.HasSwitch(switch1));
EXPECT_TRUE(cl.HasSwitch(switch2));
EXPECT_EQ(value2, cl.GetSwitchValueASCII(switch2));
+ EXPECT_THAT(cl.argv(),
+ testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--switch2=value")));
+}
+
+TEST(CommandLineTest, RemoveSwitchWithValue) {
+ const std::string switch1 = "switch1";
+ const std::string switch2 = "switch2";
+ const std::string value2 = "value";
+
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+ cl.AppendSwitch(switch1);
+ cl.AppendSwitchASCII(switch2, value2);
+
+ EXPECT_TRUE(cl.HasSwitch(switch1));
+ EXPECT_TRUE(cl.HasSwitch(switch2));
+ EXPECT_EQ(value2, cl.GetSwitchValueASCII(switch2));
+ EXPECT_THAT(cl.argv(),
+ testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--switch1"),
+ FILE_PATH_LITERAL("--switch2=value")));
+
+ cl.RemoveSwitch(switch2);
+
+ EXPECT_TRUE(cl.HasSwitch(switch1));
+ EXPECT_FALSE(cl.HasSwitch(switch2));
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--switch1")));
+}
+
+TEST(CommandLineTest, AppendAndRemoveSwitchWithDefaultPrefix) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+ cl.AppendSwitch("foo");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--foo")));
+ EXPECT_EQ(0u, cl.GetArgs().size());
+
+ cl.RemoveSwitch("foo");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program")));
+ EXPECT_EQ(0u, cl.GetArgs().size());
+}
+
+TEST(CommandLineTest, AppendAndRemoveSwitchWithAlternativePrefix) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+ cl.AppendSwitch("-foo");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("-foo")));
+ EXPECT_EQ(0u, cl.GetArgs().size());
+
+ cl.RemoveSwitch("foo");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program")));
+ EXPECT_EQ(0u, cl.GetArgs().size());
+}
+
+TEST(CommandLineTest, AppendAndRemoveSwitchPreservesOtherSwitchesAndArgs) {
+ CommandLine cl(FilePath(FILE_PATH_LITERAL("Program")));
+
+ cl.AppendSwitch("foo");
+ cl.AppendSwitch("bar");
+ cl.AppendArg("arg");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--foo"),
+ FILE_PATH_LITERAL("--bar"),
+ FILE_PATH_LITERAL("arg")));
+ EXPECT_THAT(cl.GetArgs(), testing::ElementsAre(FILE_PATH_LITERAL("arg")));
+
+ cl.RemoveSwitch("foo");
+ EXPECT_THAT(cl.argv(), testing::ElementsAre(FILE_PATH_LITERAL("Program"),
+ FILE_PATH_LITERAL("--bar"),
+ FILE_PATH_LITERAL("arg")));
+ EXPECT_THAT(cl.GetArgs(), testing::ElementsAre(FILE_PATH_LITERAL("arg")));
}
TEST(CommandLineTest, MultipleSameSwitch) {
diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc
index d9ce2f0d66c..f42e2473238 100644
--- a/chromium/base/cpu.cc
+++ b/chromium/base/cpu.cc
@@ -48,6 +48,7 @@ CPU::CPU()
has_avx2_(false),
has_aesni_(false),
has_non_stop_time_stamp_counter_(false),
+ is_running_in_vm_(false),
cpu_vendor_("unknown") {
Initialize();
}
@@ -156,7 +157,6 @@ void CPU::Initialize() {
memcpy(cpu_string, &cpu_info[1], kVendorNameSize);
cpu_string[kVendorNameSize] = '\0';
cpu_vendor_ = cpu_string;
- bool hypervisor = false;
// Interpret CPU feature information.
if (num_ids > 0) {
@@ -186,7 +186,7 @@ void CPU::Initialize() {
// This is checking for any hypervisor. Hypervisors may choose not to
// announce themselves. Hypervisors trap CPUID and sometimes return
// different results to underlying hardware.
- hypervisor = (cpu_info[2] & 0x80000000) != 0;
+ is_running_in_vm_ = (cpu_info[2] & 0x80000000) != 0;
// AVX instructions will generate an illegal instruction exception unless
// a) they are supported by the CPU,
@@ -235,7 +235,7 @@ void CPU::Initialize() {
has_non_stop_time_stamp_counter_ = (cpu_info[3] & (1 << 8)) != 0;
}
- if (!has_non_stop_time_stamp_counter_ && hypervisor) {
+ if (!has_non_stop_time_stamp_counter_ && is_running_in_vm_) {
int cpu_info_hv[4] = {};
__cpuid(cpu_info_hv, 0x40000000);
if (cpu_info_hv[1] == 0x7263694D && // Micr
diff --git a/chromium/base/cpu.h b/chromium/base/cpu.h
index 2c6caeafdd0..0c2f5fc79c9 100644
--- a/chromium/base/cpu.h
+++ b/chromium/base/cpu.h
@@ -52,6 +52,7 @@ class BASE_EXPORT CPU final {
bool has_non_stop_time_stamp_counter() const {
return has_non_stop_time_stamp_counter_;
}
+ bool is_running_in_vm() const { return is_running_in_vm_; }
IntelMicroArchitecture GetIntelMicroArchitecture() const;
const std::string& cpu_brand() const { return cpu_brand_; }
@@ -79,6 +80,7 @@ class BASE_EXPORT CPU final {
bool has_avx2_;
bool has_aesni_;
bool has_non_stop_time_stamp_counter_;
+ bool is_running_in_vm_;
std::string cpu_vendor_;
std::string cpu_brand_;
};
diff --git a/chromium/base/cpu_unittest.cc b/chromium/base/cpu_unittest.cc
index 8a68ea07817..b6403af2380 100644
--- a/chromium/base/cpu_unittest.cc
+++ b/chromium/base/cpu_unittest.cc
@@ -129,6 +129,6 @@ TEST(CPU, RunExtendedInstructions) {
// For https://crbug.com/249713
TEST(CPU, BrandAndVendorContainsNoNUL) {
base::CPU cpu;
- EXPECT_FALSE(base::ContainsValue(cpu.cpu_brand(), '\0'));
- EXPECT_FALSE(base::ContainsValue(cpu.vendor_name(), '\0'));
+ EXPECT_FALSE(base::Contains(cpu.cpu_brand(), '\0'));
+ EXPECT_FALSE(base::Contains(cpu.vendor_name(), '\0'));
}
diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc
index 1bd60262a9b..bbb45fcc9cb 100644
--- a/chromium/base/debug/activity_analyzer.cc
+++ b/chromium/base/debug/activity_analyzer.cc
@@ -12,6 +12,7 @@
#include "base/files/memory_mapped_file.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
@@ -34,8 +35,8 @@ enum AnalyzerCreationError {
};
void LogAnalyzerCreationError(AnalyzerCreationError error) {
- UMA_HISTOGRAM_ENUMERATION("ActivityTracker.Collect.AnalyzerCreationError",
- error, kAnalyzerCreationErrorMax);
+ UmaHistogramEnumeration("ActivityTracker.Collect.AnalyzerCreationError",
+ error, kAnalyzerCreationErrorMax);
}
} // namespace
@@ -354,7 +355,7 @@ void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Add this analyzer to the map of known ones, indexed by a unique
// thread
// identifier.
- DCHECK(!base::ContainsKey(analyzers_, analyzer->GetThreadKey()));
+ DCHECK(!base::Contains(analyzers_, analyzer->GetThreadKey()));
analyzer->allocator_reference_ = ref;
analyzers_[analyzer->GetThreadKey()] = std::move(analyzer);
} break;
@@ -364,7 +365,7 @@ void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
int64_t process_id;
int64_t create_stamp;
ActivityUserData::GetOwningProcessId(base, &process_id, &create_stamp);
- DCHECK(!base::ContainsKey(process_data_, process_id));
+ DCHECK(!base::Contains(process_data_, process_id));
// Create a snapshot of the data. This can fail if the data is somehow
// corrupted or the process shutdown and the memory being released.
diff --git a/chromium/base/debug/activity_analyzer_unittest.cc b/chromium/base/debug/activity_analyzer_unittest.cc
index 15b08f9d651..4fdc2d27f2b 100644
--- a/chromium/base/debug/activity_analyzer_unittest.cc
+++ b/chromium/base/debug/activity_analyzer_unittest.cc
@@ -294,22 +294,22 @@ TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
const ActivityUserData::Snapshot& user_data =
analyzer_snapshot.user_data_stack.at(1);
EXPECT_EQ(8U, user_data.size());
- ASSERT_TRUE(ContainsKey(user_data, "raw2"));
+ ASSERT_TRUE(Contains(user_data, "raw2"));
EXPECT_EQ("foo2", user_data.at("raw2").Get().as_string());
- ASSERT_TRUE(ContainsKey(user_data, "string2"));
+ ASSERT_TRUE(Contains(user_data, "string2"));
EXPECT_EQ("bar2", user_data.at("string2").GetString().as_string());
- ASSERT_TRUE(ContainsKey(user_data, "char2"));
+ ASSERT_TRUE(Contains(user_data, "char2"));
EXPECT_EQ('2', user_data.at("char2").GetChar());
- ASSERT_TRUE(ContainsKey(user_data, "int2"));
+ ASSERT_TRUE(Contains(user_data, "int2"));
EXPECT_EQ(-2222, user_data.at("int2").GetInt());
- ASSERT_TRUE(ContainsKey(user_data, "uint2"));
+ ASSERT_TRUE(Contains(user_data, "uint2"));
EXPECT_EQ(2222U, user_data.at("uint2").GetUint());
- ASSERT_TRUE(ContainsKey(user_data, "bool2"));
+ ASSERT_TRUE(Contains(user_data, "bool2"));
EXPECT_FALSE(user_data.at("bool2").GetBool());
- ASSERT_TRUE(ContainsKey(user_data, "ref2"));
+ ASSERT_TRUE(Contains(user_data, "ref2"));
EXPECT_EQ(string2a, user_data.at("ref2").GetReference().data());
EXPECT_EQ(sizeof(string2a), user_data.at("ref2").GetReference().size());
- ASSERT_TRUE(ContainsKey(user_data, "sref2"));
+ ASSERT_TRUE(Contains(user_data, "sref2"));
EXPECT_EQ(string2b, user_data.at("sref2").GetStringReference().data());
EXPECT_EQ(strlen(string2b),
user_data.at("sref2").GetStringReference().size());
@@ -372,22 +372,22 @@ TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
DCHECK_EQ(pid, first_pid);
const ActivityUserData::Snapshot& snapshot =
global_analyzer.GetProcessDataSnapshot(pid);
- ASSERT_TRUE(ContainsKey(snapshot, "raw"));
+ ASSERT_TRUE(Contains(snapshot, "raw"));
EXPECT_EQ("foo", snapshot.at("raw").Get().as_string());
- ASSERT_TRUE(ContainsKey(snapshot, "string"));
+ ASSERT_TRUE(Contains(snapshot, "string"));
EXPECT_EQ("bar", snapshot.at("string").GetString().as_string());
- ASSERT_TRUE(ContainsKey(snapshot, "char"));
+ ASSERT_TRUE(Contains(snapshot, "char"));
EXPECT_EQ('9', snapshot.at("char").GetChar());
- ASSERT_TRUE(ContainsKey(snapshot, "int"));
+ ASSERT_TRUE(Contains(snapshot, "int"));
EXPECT_EQ(-9999, snapshot.at("int").GetInt());
- ASSERT_TRUE(ContainsKey(snapshot, "uint"));
+ ASSERT_TRUE(Contains(snapshot, "uint"));
EXPECT_EQ(9999U, snapshot.at("uint").GetUint());
- ASSERT_TRUE(ContainsKey(snapshot, "bool"));
+ ASSERT_TRUE(Contains(snapshot, "bool"));
EXPECT_TRUE(snapshot.at("bool").GetBool());
- ASSERT_TRUE(ContainsKey(snapshot, "ref"));
+ ASSERT_TRUE(Contains(snapshot, "ref"));
EXPECT_EQ(string1, snapshot.at("ref").GetReference().data());
EXPECT_EQ(sizeof(string1), snapshot.at("ref").GetReference().size());
- ASSERT_TRUE(ContainsKey(snapshot, "sref"));
+ ASSERT_TRUE(Contains(snapshot, "sref"));
EXPECT_EQ(string2, snapshot.at("sref").GetStringReference().data());
EXPECT_EQ(strlen(string2), snapshot.at("sref").GetStringReference().size());
}
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index f5ee9c48fe5..2a73ac24c93 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -1363,23 +1363,14 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
// because the underlying allocator wasn't given enough memory to satisfy
// to all possible requests.
NOTREACHED();
- // Report the thread-count at which the allocator was full so that the
- // failure can be seen and underlying memory resized appropriately.
- UMA_HISTOGRAM_COUNTS_1000(
- "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
- thread_tracker_count_.load(std::memory_order_relaxed));
+
// Return null, just as if tracking wasn't enabled.
return nullptr;
}
// Convert the memory block found above into an actual memory address.
// Doing the conversion as a Header object enacts the 32/64-bit size
- // consistency checks which would not otherwise be done. Unfortunately,
- // some older compilers and MSVC don't have standard-conforming definitions
- // of std::atomic which cause it not to be plain-old-data. Don't check on
- // those platforms assuming that the checks on other platforms will be
- // sufficient.
- // TODO(bcwhite): Review this after major compiler releases.
+ // consistency checks which would not otherwise be done.
DCHECK(mem_reference);
void* mem_base;
mem_base =
@@ -1395,10 +1386,7 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
DCHECK(tracker->IsValid());
auto* tracker_raw = tracker.get();
this_thread_tracker_.Set(std::move(tracker));
- int old_count = thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
-
- UMA_HISTOGRAM_EXACT_LINEAR("ActivityTracker.ThreadTrackers.Count",
- old_count + 1, static_cast<int>(kMaxThreadCount));
+ thread_tracker_count_.fetch_add(1, std::memory_order_relaxed);
return tracker_raw;
}
@@ -1427,7 +1415,7 @@ void GlobalActivityTracker::RecordProcessLaunch(
DCHECK_NE(0, pid);
base::AutoLock lock(global_tracker_lock_);
- if (base::ContainsKey(known_processes_, pid)) {
+ if (base::Contains(known_processes_, pid)) {
// TODO(bcwhite): Measure this in UMA.
NOTREACHED() << "Process #" << process_id
<< " was previously recorded as \"launched\""
diff --git a/chromium/base/debug/debugger_posix.cc b/chromium/base/debug/debugger_posix.cc
index 674a78bf612..45bf1da4176 100644
--- a/chromium/base/debug/debugger_posix.cc
+++ b/chromium/base/debug/debugger_posix.cc
@@ -19,7 +19,6 @@
#include "base/clang_coverage_buildflags.h"
#include "base/stl_util.h"
-#include "base/test/clang_coverage.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -52,6 +51,10 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
+#if BUILDFLAG(CLANG_COVERAGE)
+#include "base/test/clang_coverage.h"
+#endif
+
#if defined(USE_SYMBOLIZE)
#include "base/third_party/symbolize/symbolize.h"
#endif
diff --git a/chromium/base/debug/debugger_win.cc b/chromium/base/debug/debugger_win.cc
index 53dfcd2b872..fd10b4b8957 100644
--- a/chromium/base/debug/debugger_win.cc
+++ b/chromium/base/debug/debugger_win.cc
@@ -8,7 +8,10 @@
#include <windows.h>
#include "base/clang_coverage_buildflags.h"
+
+#if BUILDFLAG(CLANG_COVERAGE)
#include "base/test/clang_coverage.h"
+#endif
namespace base {
namespace debug {
diff --git a/chromium/base/debug/elf_reader_unittest.cc b/chromium/base/debug/elf_reader_unittest.cc
index c247d75c322..358e4bde98c 100644
--- a/chromium/base/debug/elf_reader_unittest.cc
+++ b/chromium/base/debug/elf_reader_unittest.cc
@@ -23,10 +23,10 @@ namespace debug {
// builds.
#if defined(OFFICIAL_BUILD) || defined(OS_FUCHSIA)
-#if defined(OS_FUCHSIA)
-constexpr size_t kExpectedBuildIdStringLength = 16; // 64-bit int in hex.
-#else
+#if defined(OFFICIAL_BUILD)
constexpr size_t kExpectedBuildIdStringLength = 40; // SHA1 hash in hex.
+#else
+constexpr size_t kExpectedBuildIdStringLength = 16; // 64-bit int in hex.
#endif
TEST(ElfReaderTest, ReadElfBuildIdUppercase) {
diff --git a/chromium/base/debug/stack_trace.h b/chromium/base/debug/stack_trace.h
index f9db705f9d9..0d617ff8910 100644
--- a/chromium/base/debug/stack_trace.h
+++ b/chromium/base/debug/stack_trace.h
@@ -16,6 +16,9 @@
#include "build/build_config.h"
#if defined(OS_POSIX)
+#if !defined(OS_NACL)
+#include <signal.h>
+#endif
#include <unistd.h>
#endif
@@ -38,7 +41,7 @@ namespace debug {
// done in official builds because it has security implications).
BASE_EXPORT bool EnableInProcessStackDumping();
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !defined(OS_NACL)
// Sets a first-chance callback for the stack dump signal handler. This callback
// is called at the beginning of the signal handler to handle special kinds of
// signals, like out-of-bounds memory accesses in WebAssembly (WebAssembly Trap
@@ -47,7 +50,7 @@ BASE_EXPORT bool EnableInProcessStackDumping();
// has been set correctly. It returns {false} if the stack dump signal handler
// has not been registered with the OS, e.g. because of ASAN.
BASE_EXPORT bool SetStackDumpFirstChanceCallback(bool (*handler)(int,
- void*,
+ siginfo_t*,
void*));
#endif
diff --git a/chromium/base/debug/stack_trace_posix.cc b/chromium/base/debug/stack_trace_posix.cc
index de2f35633f5..f4ddf9c1e0a 100644
--- a/chromium/base/debug/stack_trace_posix.cc
+++ b/chromium/base/debug/stack_trace_posix.cc
@@ -63,7 +63,9 @@ namespace {
volatile sig_atomic_t in_signal_handler = 0;
-bool (*try_handle_signal)(int, void*, void*) = nullptr;
+#if !defined(OS_NACL)
+bool (*try_handle_signal)(int, siginfo_t*, void*) = nullptr;
+#endif
#if !defined(USE_SYMBOLIZE)
// The prefix used for mangled symbols, per the Itanium C++ ABI:
@@ -228,6 +230,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
// NOTE: This code MUST be async-signal safe.
// NO malloc or stdio is allowed here.
+#if !defined(OS_NACL)
// Give a registered callback a chance to recover from this signal
//
// V8 uses guard regions to guarantee memory safety in WebAssembly. This means
@@ -248,6 +251,7 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
sigaction(signal, &action, nullptr);
return;
}
+#endif
// Do not take the "in signal handler" code path on Mac in a DCHECK-enabled
// build, as this prevents seeing a useful (symbolized) stack trace on a crash
@@ -803,7 +807,8 @@ bool EnableInProcessStackDumping() {
return success;
}
-bool SetStackDumpFirstChanceCallback(bool (*handler)(int, void*, void*)) {
+#if !defined(OS_NACL)
+bool SetStackDumpFirstChanceCallback(bool (*handler)(int, siginfo_t*, void*)) {
DCHECK(try_handle_signal == nullptr || handler == nullptr);
try_handle_signal = handler;
@@ -823,6 +828,7 @@ bool SetStackDumpFirstChanceCallback(bool (*handler)(int, void*, void*)) {
#endif
return true;
}
+#endif
size_t CollectStackTrace(void** trace, size_t count) {
// NOTE: This code MUST be async-signal safe (it's used by in-process
diff --git a/chromium/base/debug/task_trace.cc b/chromium/base/debug/task_trace.cc
index ad5276a2155..bea5f1bef18 100644
--- a/chromium/base/debug/task_trace.cc
+++ b/chromium/base/debug/task_trace.cc
@@ -4,15 +4,50 @@
#include "base/debug/task_trace.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include <android/log.h>
+#endif // OS_ANDROID
+
#include <algorithm>
#include <iostream>
#include <sstream>
+#if defined(OS_ANDROID)
+#include "base/no_destructor.h"
+#endif
+
#include "base/pending_task.h"
#include "base/task/common/task_annotator.h"
namespace base {
namespace debug {
+namespace {
+#if defined(OS_ANDROID)
+// Android sends stdout and stderr to /dev/null; logging should be done through
+// the __android_log_write() function. Here we create an override of
+// std::stringbuf that writes to the Android log.
+class AndroidErrBuffer : public std::stringbuf {
+ protected:
+ int sync() override {
+ __android_log_write(ANDROID_LOG_ERROR, "chromium", str().c_str());
+ return 0;
+ }
+};
+
+std::ostream& DefaultOutputStream() {
+ static NoDestructor<AndroidErrBuffer> buf;
+ static NoDestructor<std::ostream> out(buf.get());
+ return *out;
+}
+#else
+// Use stderr by default.
+std::ostream& DefaultOutputStream() {
+ return std::cerr;
+}
+#endif // OS_ANDROID
+} // namespace
TaskTrace::TaskTrace() {
const PendingTask* current_task = TaskAnnotator::CurrentTaskForThread();
@@ -36,7 +71,7 @@ bool TaskTrace::empty() const {
}
void TaskTrace::Print() const {
- OutputToStream(&std::cerr);
+ OutputToStream(&DefaultOutputStream());
}
void TaskTrace::OutputToStream(std::ostream* os) const {
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index a92ec44a4a5..d602ce94eca 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -15,6 +15,7 @@
#include "base/pickle.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
+#include "build/build_config.h"
namespace base {
@@ -78,7 +79,7 @@ bool IsValidFeatureOrFieldTrialName(const std::string& name) {
#if defined(DCHECK_IS_CONFIGURABLE)
const Feature kDCheckIsFatalFeature{"DcheckIsFatal",
- base::FEATURE_DISABLED_BY_DEFAULT};
+ FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(DCHECK_IS_CONFIGURABLE)
FeatureList::FeatureList() = default;
@@ -150,7 +151,7 @@ void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
OverrideState override_state,
FieldTrial* field_trial) {
DCHECK(field_trial);
- DCHECK(!ContainsKey(overrides_, feature_name) ||
+ DCHECK(!Contains(overrides_, feature_name) ||
!overrides_.find(feature_name)->second.field_trial)
<< "Feature " << feature_name
<< " has conflicting field trial overrides: "
@@ -214,8 +215,8 @@ FieldTrial* FeatureList::GetFieldTrial(const Feature& feature) {
}
// static
-std::vector<base::StringPiece> FeatureList::SplitFeatureListString(
- base::StringPiece input) {
+std::vector<StringPiece> FeatureList::SplitFeatureListString(
+ StringPiece input) {
return SplitStringPiece(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
}
@@ -244,9 +245,9 @@ bool FeatureList::InitializeInstance(const std::string& enable_features,
instance_existed_before = true;
}
- std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+ std::unique_ptr<FeatureList> feature_list(new FeatureList);
feature_list->InitializeFromCommandLine(enable_features, disable_features);
- base::FeatureList::SetInstance(std::move(feature_list));
+ FeatureList::SetInstance(std::move(feature_list));
return !instance_existed_before;
}
@@ -268,8 +269,8 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
// DCHECK is also forced to be FATAL if we are running a death-test.
// TODO(asvitkine): If we find other use-cases that need integrating here
// then define a proper API/hook for the purpose.
- if (base::FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
- base::CommandLine::ForCurrentProcess()->HasSwitch(
+ if (FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
+ CommandLine::ForCurrentProcess()->HasSwitch(
"gtest_internal_run_death_test")) {
logging::LOG_DCHECK = logging::LOG_FATAL;
} else {
@@ -283,7 +284,7 @@ std::unique_ptr<FeatureList> FeatureList::ClearInstanceForTesting() {
FeatureList* old_instance = g_feature_list_instance;
g_feature_list_instance = nullptr;
g_initialized_from_accessor = false;
- return base::WrapUnique(old_instance);
+ return WrapUnique(old_instance);
}
// static
@@ -296,6 +297,8 @@ void FeatureList::RestoreInstanceForTesting(
void FeatureList::FinalizeInitialization() {
DCHECK(!initialized_);
+ // Store the field trial list pointer for DCHECKing.
+ field_trial_list_ = FieldTrialList::GetInstance();
initialized_ = true;
}
@@ -341,14 +344,19 @@ void FeatureList::RegisterOverridesFromCommandLine(
OverrideState overridden_state) {
for (const auto& value : SplitFeatureListString(feature_list)) {
StringPiece feature_name = value;
- base::FieldTrial* trial = nullptr;
+ FieldTrial* trial = nullptr;
// The entry may be of the form FeatureName<FieldTrialName - in which case,
// this splits off the field trial name and associates it with the override.
std::string::size_type pos = feature_name.find('<');
if (pos != std::string::npos) {
feature_name.set(value.data(), pos);
- trial = base::FieldTrialList::Find(value.substr(pos + 1).as_string());
+ trial = FieldTrialList::Find(value.substr(pos + 1).as_string());
+#if !defined(OS_NACL)
+ // If the below DCHECK fires, it means a non-existent trial name was
+ // specified via the "Feature<Trial" command-line syntax.
+ DCHECK(trial) << "trial=" << value.substr(pos + 1);
+#endif // !defined(OS_NACL)
}
RegisterOverride(feature_name, overridden_state, trial);
@@ -380,6 +388,13 @@ void FeatureList::GetFeatureOverridesImpl(std::string* enable_overrides,
bool command_line_only) {
DCHECK(initialized_);
+ // Check that the FieldTrialList this is associated with, if any, is the
+ // active one. If not, it likely indicates that this FeatureList has override
+ // entries from a freed FieldTrial, which may be caused by an incorrect test
+ // set up.
+ if (field_trial_list_)
+ DCHECK_EQ(field_trial_list_, FieldTrialList::GetInstance());
+
enable_overrides->clear();
disable_overrides->clear();
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 74e4e9ae4ce..883ac9247a8 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -21,6 +21,7 @@
namespace base {
class FieldTrial;
+class FieldTrialList;
// Specifies whether a given feature is enabled or disabled by default.
enum FeatureState {
@@ -296,6 +297,12 @@ class BASE_EXPORT FeatureList {
Lock feature_identity_tracker_lock_;
std::map<std::string, const Feature*> feature_identity_tracker_;
+ // Tracks the associated FieldTrialList for DCHECKs. This is used to catch
+ // the scenario where multiple FieldTrialList are used with the same
+ // FeatureList - which can lead to overrides pointing to invalid FieldTrial
+ // objects.
+ base::FieldTrialList* field_trial_list_ = nullptr;
+
// Whether this object has been fully initialized. This gets set to true as a
// result of FinalizeInitialization().
bool initialized_ = false;
diff --git a/chromium/base/files/file_descriptor_watcher_posix.cc b/chromium/base/files/file_descriptor_watcher_posix.cc
index f4e885e9f3f..04579d1adce 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.cc
+++ b/chromium/base/files/file_descriptor_watcher_posix.cc
@@ -156,8 +156,7 @@ FileDescriptorWatcher::Controller::Controller(MessagePumpForIO::Mode mode,
const RepeatingClosure& callback)
: callback_(callback),
io_thread_task_runner_(
- tls_fd_watcher.Get().Get()->io_thread_task_runner()),
- weak_factory_(this) {
+ tls_fd_watcher.Get().Get()->io_thread_task_runner()) {
DCHECK(!callback_.is_null());
DCHECK(io_thread_task_runner_);
watcher_ = std::make_unique<Watcher>(weak_factory_.GetWeakPtr(), mode, fd);
@@ -175,7 +174,7 @@ FileDescriptorWatcher::Controller::~Controller() {
// thread. This ensures that the file descriptor is never accessed after
// this destructor returns.
//
- // Use a ScopedClosureRunner to ensure that |done| is signalled even if the
+ // Use a ScopedClosureRunner to ensure that |done| is signaled even if the
// thread doesn't run any more tasks (if PostTask returns true, it means
// that the task was queued, but it doesn't mean that a RunLoop will run the
// task before the queue is deleted).
diff --git a/chromium/base/files/file_descriptor_watcher_posix.h b/chromium/base/files/file_descriptor_watcher_posix.h
index 3c2cb9dc9bd..6eff314fea7 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.h
+++ b/chromium/base/files/file_descriptor_watcher_posix.h
@@ -79,7 +79,7 @@ class BASE_EXPORT FileDescriptorWatcher {
// instantiated.
SequenceChecker sequence_checker_;
- WeakPtrFactory<Controller> weak_factory_;
+ WeakPtrFactory<Controller> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(Controller);
};
@@ -97,11 +97,16 @@ class BASE_EXPORT FileDescriptorWatcher {
// Registers |callback| to be posted on the current sequence when |fd| is
// readable or writable without blocking. |callback| is unregistered when the
// returned Controller is deleted (deletion must happen on the current
- // sequence). To call these methods, a FileDescriptorWatcher must have been
+ // sequence).
+ // Usage note: To call these methods, a FileDescriptorWatcher must have been
// instantiated on the current thread and SequencedTaskRunnerHandle::IsSet()
// must return true (these conditions are met at least on all ThreadPool
// threads as well as on threads backed by a MessageLoopForIO). |fd| must
// outlive the returned Controller.
+ // Shutdown note: notifications aren't guaranteed to be emitted once the bound
+ // (current) SequencedTaskRunner enters its shutdown phase (i.e.
+ // ThreadPool::Shutdown() or Thread::Stop()) regardless of the
+ // SequencedTaskRunner's TaskShutdownBehavior.
static std::unique_ptr<Controller> WatchReadable(
int fd,
const RepeatingClosure& callback);
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index 45f78a26333..f1ad6880cb0 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -1260,7 +1260,6 @@ int FilePath::CompareIgnoreCase(StringPieceType string1,
// GetHFSDecomposedForm() returns an empty string in an error case.
if (hfs1.empty() || hfs2.empty()) {
- NOTREACHED();
ScopedCFTypeRef<CFStringRef> cfstring1(
CFStringCreateWithBytesNoCopy(
NULL,
@@ -1277,6 +1276,20 @@ int FilePath::CompareIgnoreCase(StringPieceType string1,
kCFStringEncodingUTF8,
false,
kCFAllocatorNull));
+ // If neither GetHFSDecomposedForm nor CFStringCreateWithBytesNoCopy
+ // succeed, fall back to strcmp. This can occur when the input string is
+ // invalid UTF-8.
+ if (!cfstring1 || !cfstring2) {
+ int comparison =
+ memcmp(string1.as_string().c_str(), string2.as_string().c_str(),
+ std::min(string1.length(), string2.length()));
+ if (comparison < 0)
+ return -1;
+ if (comparison > 0)
+ return 1;
+ return 0;
+ }
+
return CFStringCompare(cfstring1,
cfstring2,
kCFCompareCaseInsensitive);
diff --git a/chromium/base/files/file_path_unittest.cc b/chromium/base/files/file_path_unittest.cc
index afe70916a89..c3cbc9a01ca 100644
--- a/chromium/base/files/file_path_unittest.cc
+++ b/chromium/base/files/file_path_unittest.cc
@@ -1317,6 +1317,17 @@ TEST_F(FilePathTest, GetHFSDecomposedFormWithInvalidInput) {
EXPECT_TRUE(observed.empty());
}
}
+
+TEST_F(FilePathTest, CompareIgnoreCaseWithInvalidInput) {
+ const FilePath::CharType* cases[] = {
+ FPL("\xc3\x28"), FPL("\xe2\x82\x28"), FPL("\xe2\x28\xa1"),
+ FPL("\xf0\x28\x8c\xbc"), FPL("\xf0\x28\x8c\x28"),
+ };
+ for (auto* invalid_input : cases) {
+ // All example inputs will be greater than the string "fixed".
+ EXPECT_EQ(FilePath::CompareIgnoreCase(invalid_input, FPL("fixed")), 1);
+ }
+}
#endif
} // namespace base
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 0ff8924fed4..4b727c37c4f 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -252,7 +252,7 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
// appear after it, that is not possible.
WeakPtr<FilePathWatcherImpl> weak_ptr_;
- WeakPtrFactory<FilePathWatcherImpl> weak_factory_;
+ WeakPtrFactory<FilePathWatcherImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
@@ -391,7 +391,7 @@ void InotifyReader::OnInotifyEvent(const inotify_event* event) {
}
}
-FilePathWatcherImpl::FilePathWatcherImpl() : weak_factory_(this) {
+FilePathWatcherImpl::FilePathWatcherImpl() {
weak_ptr_ = weak_factory_.GetWeakPtr();
}
@@ -496,7 +496,7 @@ void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
}
}
- if (ContainsKey(recursive_paths_by_watch_, fired_watch)) {
+ if (Contains(recursive_paths_by_watch_, fired_watch)) {
if (!did_update)
UpdateRecursiveWatches(fired_watch, is_dir);
callback_.Run(target_, false /* error */);
@@ -607,7 +607,7 @@ void FilePathWatcherImpl::UpdateRecursiveWatches(
// Check to see if this is a forced update or if some component of |target_|
// has changed. For these cases, redo the watches for |target_| and below.
- if (!ContainsKey(recursive_paths_by_watch_, fired_watch) &&
+ if (!Contains(recursive_paths_by_watch_, fired_watch) &&
fired_watch != watches_.back().watch) {
UpdateRecursiveWatchesForPath(target_);
return;
@@ -617,10 +617,9 @@ void FilePathWatcherImpl::UpdateRecursiveWatches(
if (!is_dir)
return;
- const FilePath& changed_dir =
- ContainsKey(recursive_paths_by_watch_, fired_watch) ?
- recursive_paths_by_watch_[fired_watch] :
- target_;
+ const FilePath& changed_dir = Contains(recursive_paths_by_watch_, fired_watch)
+ ? recursive_paths_by_watch_[fired_watch]
+ : target_;
auto start_it = recursive_watches_by_path_.lower_bound(changed_dir);
auto end_it = start_it;
@@ -652,7 +651,7 @@ void FilePathWatcherImpl::UpdateRecursiveWatchesForPath(const FilePath& path) {
current = enumerator.Next()) {
DCHECK(enumerator.GetInfo().IsDirectory());
- if (!ContainsKey(recursive_watches_by_path_, current)) {
+ if (!Contains(recursive_watches_by_path_, current)) {
// Add new watches.
InotifyReader::Watch watch =
g_inotify_reader.Get().AddWatch(current, this);
@@ -686,8 +685,8 @@ void FilePathWatcherImpl::TrackWatchForRecursion(InotifyReader::Watch watch,
if (watch == InotifyReader::kInvalidWatch)
return;
- DCHECK(!ContainsKey(recursive_paths_by_watch_, watch));
- DCHECK(!ContainsKey(recursive_watches_by_path_, path));
+ DCHECK(!Contains(recursive_paths_by_watch_, watch));
+ DCHECK(!Contains(recursive_watches_by_path_, path));
recursive_paths_by_watch_[watch] = path;
recursive_watches_by_path_[path] = watch;
}
diff --git a/chromium/base/files/file_proxy_unittest.cc b/chromium/base/files/file_proxy_unittest.cc
index 7139bac19a5..0ebaa0e120c 100644
--- a/chromium/base/files/file_proxy_unittest.cc
+++ b/chromium/base/files/file_proxy_unittest.cc
@@ -31,8 +31,7 @@ class FileProxyTest : public testing::Test {
test::ScopedTaskEnvironment::MainThreadType::IO),
file_thread_("FileProxyTestFileThread"),
error_(File::FILE_OK),
- bytes_written_(-1),
- weak_factory_(this) {}
+ bytes_written_(-1) {}
void SetUp() override {
ASSERT_TRUE(dir_.CreateUniqueTempDir());
@@ -103,7 +102,7 @@ class FileProxyTest : public testing::Test {
File::Info file_info_;
std::vector<char> buffer_;
int bytes_written_;
- WeakPtrFactory<FileProxyTest> weak_factory_;
+ WeakPtrFactory<FileProxyTest> weak_factory_{this};
};
TEST_F(FileProxyTest, CreateOrOpen_Create) {
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index 543a6e65c4e..a40289e33a5 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -315,8 +315,7 @@ BASE_EXPORT bool GetFileSize(const FilePath& file_path, int64_t* file_size);
// On windows, make sure the path starts with a lettered drive.
// |path| must reference a file. Function will fail if |path| points to
// a directory or to a nonexistent path. On windows, this function will
-// fail if |path| is a junction or symlink that points to an empty file,
-// or if |real_path| would be longer than MAX_PATH characters.
+// fail if |real_path| would be longer than MAX_PATH characters.
BASE_EXPORT bool NormalizeFilePath(const FilePath& path, FilePath* real_path);
#if defined(OS_WIN)
@@ -327,13 +326,6 @@ BASE_EXPORT bool NormalizeFilePath(const FilePath& path, FilePath* real_path);
BASE_EXPORT bool DevicePathToDriveLetterPath(const FilePath& device_path,
FilePath* drive_letter_path);
-// Given an existing file in |path|, set |real_path| to the path
-// in native NT format, of the form "\Device\HarddiskVolumeXX\..".
-// Returns false if the path can not be found. Empty files cannot
-// be resolved with this function.
-BASE_EXPORT bool NormalizeToNativeFilePath(const FilePath& path,
- FilePath* nt_path);
-
// Method that wraps the win32 GetLongPathName API, normalizing the specified
// path to its long form. An example where this is needed is when comparing
// temp file paths. If a username isn't a valid 8.3 short file name (even just a
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index c05a09bde29..e3579d7a7f1 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -114,7 +114,7 @@ bool VerifySpecificPathControlledByUser(const FilePath& path,
}
if ((stat_info.st_mode & S_IWGRP) &&
- !ContainsKey(group_gids, stat_info.st_gid)) {
+ !Contains(group_gids, stat_info.st_gid)) {
DLOG(ERROR) << "Path " << path.value()
<< " is writable by an unprivileged group.";
return false;
@@ -350,6 +350,12 @@ FilePath MakeAbsoluteFilePath(const FilePath& input) {
// here.
bool DeleteFile(const FilePath& path, bool recursive) {
ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
+
+#if defined(OS_ANDROID)
+ if (path.IsContentUri())
+ return DeleteContentUri(path);
+#endif // defined(OS_ANDROID)
+
const char* path_str = path.value().c_str();
stat_wrapper_t file_info;
if (CallLstat(path_str, &file_info) != 0) {
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 05ddf73cbec..21cefcc4e13 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -410,7 +410,7 @@ TEST_F(FileUtilTest, NormalizeFilePathBasic) {
ASSERT_TRUE(PathExists(file_b_path));
ASSERT_TRUE(NormalizeFilePath(file_b_path, &normalized_file_b_path));
- // Beacuse this test created |dir_path|, we know it is not a link
+ // Because this test created |dir_path|, we know it is not a link
// or junction. So, the real path of the directory holding file a
// must be the parent of the path holding file b.
ASSERT_TRUE(normalized_file_a_path.DirName()
@@ -419,6 +419,36 @@ TEST_F(FileUtilTest, NormalizeFilePathBasic) {
#if defined(OS_WIN)
+TEST_F(FileUtilTest, NormalizeFileEmptyFile) {
+ // Create a directory under the test dir. Because we create it,
+ // we know it is not a link.
+ const wchar_t empty_content[] = L"";
+
+ FilePath file_a_path = temp_dir_.GetPath().Append(FPL("file_empty_a"));
+ FilePath dir_path = temp_dir_.GetPath().Append(FPL("dir"));
+ FilePath file_b_path = dir_path.Append(FPL("file_empty_b"));
+ ASSERT_TRUE(CreateDirectory(dir_path));
+
+ FilePath normalized_file_a_path, normalized_file_b_path;
+ ASSERT_FALSE(PathExists(file_a_path));
+ EXPECT_FALSE(NormalizeFilePath(file_a_path, &normalized_file_a_path))
+ << "NormalizeFilePath() should fail on nonexistent paths.";
+
+ CreateTextFile(file_a_path, empty_content);
+ ASSERT_TRUE(PathExists(file_a_path));
+ EXPECT_TRUE(NormalizeFilePath(file_a_path, &normalized_file_a_path));
+
+ CreateTextFile(file_b_path, empty_content);
+ ASSERT_TRUE(PathExists(file_b_path));
+ EXPECT_TRUE(NormalizeFilePath(file_b_path, &normalized_file_b_path));
+
+ // Because this test created |dir_path|, we know it is not a link
+ // or junction. So, the real path of the directory holding file a
+ // must be the parent of the path holding file b.
+ EXPECT_TRUE(normalized_file_a_path.DirName().IsParent(
+ normalized_file_b_path.DirName()));
+}
+
TEST_F(FileUtilTest, NormalizeFilePathReparsePoints) {
// Build the following directory structure:
//
@@ -443,6 +473,10 @@ TEST_F(FileUtilTest, NormalizeFilePathReparsePoints) {
base_a = FilePath(temp_base_a);
#endif
ASSERT_TRUE(CreateDirectory(base_a));
+#if defined(OS_WIN)
+ // TEMP might be a short name which is not normalized.
+ base_a = MakeLongFilePath(base_a);
+#endif
FilePath sub_a = base_a.Append(FPL("sub_a"));
ASSERT_TRUE(CreateDirectory(sub_a));
@@ -479,6 +513,10 @@ TEST_F(FileUtilTest, NormalizeFilePathReparsePoints) {
FilePath base_b = temp_dir_.GetPath().Append(FPL("base_b"));
ASSERT_TRUE(CreateDirectory(base_b));
+#if defined(OS_WIN)
+ // TEMP might be a short name which is not normalized.
+ base_b = MakeLongFilePath(base_b);
+#endif
FilePath to_sub_a = base_b.Append(FPL("to_sub_a"));
ASSERT_TRUE(CreateDirectory(to_sub_a));
@@ -1387,6 +1425,33 @@ TEST_F(FileUtilTest, DeleteFile) {
EXPECT_FALSE(PathExists(file_name));
}
+#if defined(OS_ANDROID)
+TEST_F(FileUtilTest, DeleteContentUri) {
+ // Get the path to the test file.
+ FilePath data_dir;
+ ASSERT_TRUE(PathService::Get(DIR_TEST_DATA, &data_dir));
+ data_dir = data_dir.Append(FPL("file_util"));
+ ASSERT_TRUE(PathExists(data_dir));
+ FilePath image_file = data_dir.Append(FPL("red.png"));
+ ASSERT_TRUE(PathExists(image_file));
+
+ // Make a copy (we don't want to delete the original red.png when deleting the
+ // content URI).
+ FilePath image_copy = data_dir.Append(FPL("redcopy.png"));
+ ASSERT_TRUE(CopyFile(image_file, image_copy));
+
+ // Insert the image into MediaStore and get a content URI.
+ FilePath uri_path = InsertImageIntoMediaStore(image_copy);
+ ASSERT_TRUE(uri_path.IsContentUri());
+ ASSERT_TRUE(PathExists(uri_path));
+
+ // Try deleting the content URI.
+ EXPECT_TRUE(DeleteFile(uri_path, false));
+ EXPECT_FALSE(PathExists(image_copy));
+ EXPECT_FALSE(PathExists(uri_path));
+}
+#endif // defined(OS_ANDROID)
+
#if defined(OS_WIN)
// Tests that the Delete function works for wild cards, especially
// with the recursion flag. Also coincidentally tests PathExists.
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index f77f12840a7..4fb999a5e2e 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -31,6 +31,7 @@
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/scoped_thread_priority.h"
#include "base/time/time.h"
#include "base/win/scoped_handle.h"
#include "base/win/windows_version.h"
@@ -179,6 +180,10 @@ bool DoCopyFile(const FilePath& from_path,
return false;
}
+ // Mitigate the issues caused by loading DLLs on a background thread
+ // (http://crbug/973868).
+ base::ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+
// Unlike the posix implementation that copies the file manually and discards
// the ACL bits, CopyFile() copies the complete SECURITY_DESCRIPTOR and access
// bits, which is usually not what we want. We can't do much about the
@@ -654,14 +659,28 @@ bool CreateDirectoryAndGetError(const FilePath& full_path,
bool NormalizeFilePath(const FilePath& path, FilePath* real_path) {
ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
- FilePath mapped_file;
- if (!NormalizeToNativeFilePath(path, &mapped_file))
+ File file(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_SHARE_DELETE);
+ if (!file.IsValid())
+ return false;
+
+ // The expansion of |path| into a full path may make it longer.
+ constexpr int kMaxPathLength = MAX_PATH + 10;
+ char16 native_file_path[kMaxPathLength];
+ // kMaxPathLength includes space for trailing '\0' so we subtract 1.
+ // Returned length, used_wchars, does not include trailing '\0'.
+ // Failure is indicated by returning 0 or >= kMaxPathLength.
+ DWORD used_wchars = ::GetFinalPathNameByHandle(
+ file.GetPlatformFile(), as_writable_wcstr(native_file_path),
+ kMaxPathLength - 1, FILE_NAME_NORMALIZED | VOLUME_NAME_NT);
+
+ if (used_wchars >= kMaxPathLength || used_wchars == 0)
return false;
- // NormalizeToNativeFilePath() will return a path that starts with
- // "\Device\Harddisk...". Helper DevicePathToDriveLetterPath()
- // will find a drive letter which maps to the path's device, so
- // that we return a path starting with a drive letter.
- return DevicePathToDriveLetterPath(mapped_file, real_path);
+
+ // GetFinalPathNameByHandle() returns the \\?\ syntax for file names and
+ // existing code expects we return a path starting 'X:\' so we call
+ // DevicePathToDriveLetterPath rather than using VOLUME_NAME_DOS above.
+ return DevicePathToDriveLetterPath(
+ FilePath(StringPiece16(native_file_path, used_wchars)), real_path);
}
bool DevicePathToDriveLetterPath(const FilePath& nt_device_path,
@@ -710,57 +729,6 @@ bool DevicePathToDriveLetterPath(const FilePath& nt_device_path,
return false;
}
-bool NormalizeToNativeFilePath(const FilePath& path, FilePath* nt_path) {
- ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
- // In Vista, GetFinalPathNameByHandle() would give us the real path
- // from a file handle. If we ever deprecate XP, consider changing the
- // code below to a call to GetFinalPathNameByHandle(). The method this
- // function uses is explained in the following msdn article:
- // http://msdn.microsoft.com/en-us/library/aa366789(VS.85).aspx
- win::ScopedHandle file_handle(
- ::CreateFile(as_wcstr(path.value()), GENERIC_READ, kFileShareAll, NULL,
- OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL));
- if (!file_handle.IsValid())
- return false;
-
- // Create a file mapping object. Can't easily use MemoryMappedFile, because
- // we only map the first byte, and need direct access to the handle. You can
- // not map an empty file, this call fails in that case.
- win::ScopedHandle file_map_handle(
- ::CreateFileMapping(file_handle.Get(),
- NULL,
- PAGE_READONLY,
- 0,
- 1, // Just one byte. No need to look at the data.
- NULL));
- if (!file_map_handle.IsValid())
- return false;
-
- // Use a view of the file to get the path to the file.
- void* file_view = MapViewOfFile(file_map_handle.Get(),
- FILE_MAP_READ, 0, 0, 1);
- if (!file_view)
- return false;
-
- // The expansion of |path| into a full path may make it longer.
- // GetMappedFileName() will fail if the result is longer than MAX_PATH.
- // Pad a bit to be safe. If kMaxPathLength is ever changed to be less
- // than MAX_PATH, it would be nessisary to test that GetMappedFileName()
- // not return kMaxPathLength. This would mean that only part of the
- // path fit in |mapped_file_path|.
- const int kMaxPathLength = MAX_PATH + 10;
- char16 mapped_file_path[kMaxPathLength];
- bool success = false;
- HANDLE cp = GetCurrentProcess();
- if (::GetMappedFileNameW(cp, file_view, as_writable_wcstr(mapped_file_path),
- kMaxPathLength)) {
- *nt_path = FilePath(mapped_file_path);
- success = true;
- }
- ::UnmapViewOfFile(file_view);
- return success;
-}
-
FilePath MakeLongFilePath(const FilePath& input) {
ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc
index d751f654a0d..b37ce2f4f36 100644
--- a/chromium/base/files/important_file_writer.cc
+++ b/chromium/base/files/important_file_writer.cc
@@ -230,8 +230,7 @@ ImportantFileWriter::ImportantFileWriter(
task_runner_(std::move(task_runner)),
serializer_(nullptr),
commit_interval_(interval),
- histogram_suffix_(histogram_suffix ? histogram_suffix : ""),
- weak_factory_(this) {
+ histogram_suffix_(histogram_suffix ? histogram_suffix : "") {
DCHECK(task_runner_);
}
diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h
index af919090743..d3a42eff790 100644
--- a/chromium/base/files/important_file_writer.h
+++ b/chromium/base/files/important_file_writer.h
@@ -151,7 +151,7 @@ class BASE_EXPORT ImportantFileWriter {
SEQUENCE_CHECKER(sequence_checker_);
- WeakPtrFactory<ImportantFileWriter> weak_factory_;
+ WeakPtrFactory<ImportantFileWriter> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ImportantFileWriter);
};
diff --git a/chromium/base/fuchsia/default_context.cc b/chromium/base/fuchsia/default_context.cc
new file mode 100644
index 00000000000..96d6a0c9f64
--- /dev/null
+++ b/chromium/base/fuchsia/default_context.cc
@@ -0,0 +1,24 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/default_context.h"
+
+#include <lib/sys/cpp/component_context.h>
+
+#include "base/fuchsia/file_utils.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+
+namespace base {
+namespace fuchsia {
+
+// Returns default sys::ComponentContext for the current process.
+sys::ComponentContext* ComponentContextForCurrentProcess() {
+ static base::NoDestructor<std::unique_ptr<sys::ComponentContext>> value(
+ sys::ComponentContext::Create());
+ return value.get()->get();
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/default_context.h b/chromium/base/fuchsia/default_context.h
new file mode 100644
index 00000000000..042e7330e06
--- /dev/null
+++ b/chromium/base/fuchsia/default_context.h
@@ -0,0 +1,25 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_DEFAULT_CONTEXT_H_
+#define BASE_FUCHSIA_DEFAULT_CONTEXT_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+
+namespace sys {
+class ComponentContext;
+} // namespace sys
+
+namespace base {
+namespace fuchsia {
+
+// Returns default sys::ComponentContext for the current process.
+BASE_EXPORT sys::ComponentContext* ComponentContextForCurrentProcess();
+
+} // namespace fuchsia
+} // namespace base
+
+#endif // BASE_FUCHSIA_DEFAULT_CONTEXT_H_ \ No newline at end of file
diff --git a/chromium/base/fuchsia/filtered_service_directory.cc b/chromium/base/fuchsia/filtered_service_directory.cc
index 02cf09a51cd..77c07c37ed5 100644
--- a/chromium/base/fuchsia/filtered_service_directory.cc
+++ b/chromium/base/fuchsia/filtered_service_directory.cc
@@ -4,51 +4,42 @@
#include "base/fuchsia/filtered_service_directory.h"
-#include <lib/fdio/directory.h>
+#include <lib/async/default.h>
#include <utility>
#include "base/bind.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/fuchsia/service_directory_client.h"
namespace base {
namespace fuchsia {
FilteredServiceDirectory::FilteredServiceDirectory(
- const ServiceDirectoryClient* directory)
- : directory_(directory) {
- outgoing_directory_ = std::make_unique<ServiceDirectory>(
- outgoing_directory_client_.NewRequest());
+ sys::ServiceDirectory* directory)
+ : directory_(std::move(directory)) {
+ outgoing_directory_.Serve(
+ outgoing_directory_client_.NewRequest().TakeChannel());
}
-FilteredServiceDirectory::~FilteredServiceDirectory() {
- outgoing_directory_->RemoveAllServices();
-}
+FilteredServiceDirectory::~FilteredServiceDirectory() {}
void FilteredServiceDirectory::AddService(const char* service_name) {
- outgoing_directory_->AddServiceUnsafe(
- service_name,
- base::BindRepeating(&FilteredServiceDirectory::HandleRequest,
- base::Unretained(this), service_name));
+ outgoing_directory_.AddPublicService(
+ std::make_unique<vfs::Service>(
+ [this, service_name](zx::channel channel,
+ async_dispatcher_t* dispatcher) {
+ DCHECK_EQ(dispatcher, async_get_default_dispatcher());
+ directory_->Connect(service_name, std::move(channel));
+ }),
+ service_name);
}
-fidl::InterfaceHandle<::fuchsia::io::Directory>
-FilteredServiceDirectory::ConnectClient() {
- fidl::InterfaceHandle<::fuchsia::io::Directory> client;
-
- // ServiceDirectory puts public services under ./svc . Connect to that
+void FilteredServiceDirectory::ConnectClient(
+ fidl::InterfaceRequest<::fuchsia::io::Directory> dir_request) {
+ // sys::OutgoingDirectory puts public services under ./svc . Connect to that
// directory and return client handle for the connection,
- zx_status_t status =
- fdio_service_connect_at(outgoing_directory_client_.channel().get(), "svc",
- client.NewRequest().TakeChannel().release());
- ZX_CHECK(status == ZX_OK, status) << "fdio_service_connect_at()";
-
- return client;
-}
-
-void FilteredServiceDirectory::HandleRequest(const char* service_name,
- zx::channel channel) {
- directory_->ConnectToServiceUnsafe(service_name, std::move(channel));
+ outgoing_directory_.GetOrCreateDirectory("svc")->Serve(
+ ::fuchsia::io::OPEN_RIGHT_READABLE | ::fuchsia::io::OPEN_RIGHT_WRITABLE,
+ dir_request.TakeChannel());
}
} // namespace fuchsia
diff --git a/chromium/base/fuchsia/filtered_service_directory.h b/chromium/base/fuchsia/filtered_service_directory.h
index e484ec11289..8b4bce395b6 100644
--- a/chromium/base/fuchsia/filtered_service_directory.h
+++ b/chromium/base/fuchsia/filtered_service_directory.h
@@ -5,41 +5,39 @@
#ifndef BASE_FUCHSIA_FILTERED_SERVICE_DIRECTORY_H_
#define BASE_FUCHSIA_FILTERED_SERVICE_DIRECTORY_H_
-#include "base/fuchsia/service_directory.h"
-
#include <fuchsia/io/cpp/fidl.h>
#include <lib/fidl/cpp/interface_handle.h>
+#include <lib/sys/cpp/outgoing_directory.h>
+#include <lib/sys/cpp/service_directory.h>
#include <lib/zx/channel.h>
#include <memory>
+#include "base/base_export.h"
#include "base/macros.h"
namespace base {
namespace fuchsia {
-class ServiceDirectoryClient;
-
// ServiceDirectory that uses the supplied ServiceDirectoryClient to satisfy
// requests for only a restricted set of services.
class BASE_EXPORT FilteredServiceDirectory {
public:
- // Creates proxy that proxies requests to the specified service |directory|,
- // which must outlive the proxy.
- explicit FilteredServiceDirectory(const ServiceDirectoryClient* directory);
+ // Creates a directory that proxies requests to the specified service
+ // |directory|.
+ explicit FilteredServiceDirectory(sys::ServiceDirectory* directory);
~FilteredServiceDirectory();
// Adds the specified service to the list of whitelisted services.
void AddService(const char* service_name);
- // Returns a client channel connected to the directory. The returned channel
- // can be passed to a sandboxed process to be used for /svc namespace.
- fidl::InterfaceHandle<::fuchsia::io::Directory> ConnectClient();
+ // Connects a directory client. The directory can be passed to a sandboxed
+ // process to be used for /svc namespace.
+ void ConnectClient(
+ fidl::InterfaceRequest<::fuchsia::io::Directory> dir_request);
private:
- void HandleRequest(const char* service_name, zx::channel channel);
-
- const ServiceDirectoryClient* const directory_;
- std::unique_ptr<ServiceDirectory> outgoing_directory_;
+ const sys::ServiceDirectory* const directory_;
+ sys::OutgoingDirectory outgoing_directory_;
// Client side of the channel used by |outgoing_directory_|.
fidl::InterfaceHandle<::fuchsia::io::Directory> outgoing_directory_client_;
diff --git a/chromium/base/fuchsia/filtered_service_directory_unittest.cc b/chromium/base/fuchsia/filtered_service_directory_unittest.cc
index 6d5b27cd12b..e58ef115446 100644
--- a/chromium/base/fuchsia/filtered_service_directory_unittest.cc
+++ b/chromium/base/fuchsia/filtered_service_directory_unittest.cc
@@ -6,7 +6,6 @@
#include <utility>
-#include "base/fuchsia/service_directory_client.h"
#include "base/fuchsia/service_directory_test_base.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -17,21 +16,23 @@ class FilteredServiceDirectoryTest : public ServiceDirectoryTestBase {
public:
FilteredServiceDirectoryTest() {
filtered_service_directory_ = std::make_unique<FilteredServiceDirectory>(
- public_service_directory_client_.get());
- filtered_client_ = std::make_unique<ServiceDirectoryClient>(
- filtered_service_directory_->ConnectClient());
+ public_service_directory_.get());
+ fidl::InterfaceHandle<::fuchsia::io::Directory> directory;
+ filtered_service_directory_->ConnectClient(directory.NewRequest());
+ filtered_client_ =
+ std::make_unique<sys::ServiceDirectory>(std::move(directory));
}
protected:
std::unique_ptr<FilteredServiceDirectory> filtered_service_directory_;
- std::unique_ptr<ServiceDirectoryClient> filtered_client_;
+ std::unique_ptr<sys::ServiceDirectory> filtered_client_;
};
// Verify that we can connect to a whitelisted service.
TEST_F(FilteredServiceDirectoryTest, Connect) {
filtered_service_directory_->AddService(testfidl::TestInterface::Name_);
- auto stub = filtered_client_->ConnectToService<testfidl::TestInterface>();
+ auto stub = filtered_client_->Connect<testfidl::TestInterface>();
VerifyTestInterface(&stub, ZX_OK);
}
@@ -39,15 +40,15 @@ TEST_F(FilteredServiceDirectoryTest, Connect) {
TEST_F(FilteredServiceDirectoryTest, ConnectMultiple) {
filtered_service_directory_->AddService(testfidl::TestInterface::Name_);
- auto stub1 = filtered_client_->ConnectToService<testfidl::TestInterface>();
- auto stub2 = filtered_client_->ConnectToService<testfidl::TestInterface>();
+ auto stub1 = filtered_client_->Connect<testfidl::TestInterface>();
+ auto stub2 = filtered_client_->Connect<testfidl::TestInterface>();
VerifyTestInterface(&stub1, ZX_OK);
VerifyTestInterface(&stub2, ZX_OK);
}
// Verify that non-whitelisted services are blocked.
TEST_F(FilteredServiceDirectoryTest, ServiceBlocked) {
- auto stub = filtered_client_->ConnectToService<testfidl::TestInterface>();
+ auto stub = filtered_client_->Connect<testfidl::TestInterface>();
VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
}
@@ -58,7 +59,7 @@ TEST_F(FilteredServiceDirectoryTest, NoService) {
service_binding_.reset();
- auto stub = filtered_client_->ConnectToService<testfidl::TestInterface>();
+ auto stub = filtered_client_->Connect<testfidl::TestInterface>();
VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
}
@@ -68,9 +69,9 @@ TEST_F(FilteredServiceDirectoryTest, NoServiceDir) {
filtered_service_directory_->AddService(testfidl::TestInterface::Name_);
service_binding_.reset();
- service_directory_.reset();
+ outgoing_directory_.reset();
- auto stub = filtered_client_->ConnectToService<testfidl::TestInterface>();
+ auto stub = filtered_client_->Connect<testfidl::TestInterface>();
VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
}
diff --git a/chromium/base/fuchsia/scoped_service_binding.h b/chromium/base/fuchsia/scoped_service_binding.h
index efdc30d86ea..8306565813d 100644
--- a/chromium/base/fuchsia/scoped_service_binding.h
+++ b/chromium/base/fuchsia/scoped_service_binding.h
@@ -6,8 +6,10 @@
#define BASE_FUCHSIA_SCOPED_SERVICE_BINDING_H_
#include <lib/fidl/cpp/binding_set.h>
+#include <lib/sys/cpp/outgoing_directory.h>
#include "base/bind.h"
+#include "base/callback.h"
#include "base/fuchsia/service_directory.h"
namespace base {
@@ -16,14 +18,41 @@ namespace fuchsia {
template <typename Interface>
class ScopedServiceBinding {
public:
- // |service_directory| and |impl| must outlive the binding.
- ScopedServiceBinding(ServiceDirectory* service_directory, Interface* impl)
- : directory_(service_directory), impl_(impl) {
- directory_->AddService(
- BindRepeating(&ScopedServiceBinding::BindClient, Unretained(this)));
+ // Published a public service in the specified |outgoing_directory|.
+ // |outgoing_directory| and |impl| must outlive the binding.
+ ScopedServiceBinding(sys::OutgoingDirectory* outgoing_directory,
+ Interface* impl)
+ : directory_(outgoing_directory), impl_(impl) {
+ directory_->AddPublicService<Interface>(
+ [this](fidl::InterfaceRequest<Interface> request) {
+ BindClient(std::move(request));
+ });
+ }
+
+ // Publishes a service in the specified |pseudo_dir|. |pseudo_dir| and |impl|
+ // must outlive the binding.
+ ScopedServiceBinding(vfs::PseudoDir* pseudo_dir, Interface* impl)
+ : pseudo_dir_(pseudo_dir), impl_(impl) {
+ pseudo_dir_->AddEntry(
+ Interface::Name_,
+ std::make_unique<vfs::Service>(fidl::InterfaceRequestHandler<Interface>(
+ [this](fidl::InterfaceRequest<Interface> request) {
+ BindClient(std::move(request));
+ })));
}
- ~ScopedServiceBinding() { directory_->RemoveService(Interface::Name_); }
+ // TODO(crbug.com/974072): Remove this constructor once all code has been
+ // migrated from base::fuchsia::ServiceDirectory to sys::OutgoingDirectory.
+ ScopedServiceBinding(ServiceDirectory* service_directory, Interface* impl)
+ : ScopedServiceBinding(service_directory->outgoing_directory(), impl) {}
+
+ ~ScopedServiceBinding() {
+ if (directory_) {
+ directory_->RemovePublicService<Interface>();
+ } else {
+ pseudo_dir_->RemoveEntry(Interface::Name_);
+ }
+ }
void SetOnLastClientCallback(base::OnceClosure on_last_client_callback) {
on_last_client_callback_ = std::move(on_last_client_callback);
@@ -43,7 +72,8 @@ class ScopedServiceBinding {
std::move(on_last_client_callback_).Run();
}
- ServiceDirectory* const directory_;
+ sys::OutgoingDirectory* const directory_ = nullptr;
+ vfs::PseudoDir* const pseudo_dir_ = nullptr;
Interface* const impl_;
fidl::BindingSet<Interface> bindings_;
base::OnceClosure on_last_client_callback_;
@@ -51,6 +81,68 @@ class ScopedServiceBinding {
DISALLOW_COPY_AND_ASSIGN(ScopedServiceBinding);
};
+// Scoped service binding which allows only a single client to be connected
+// at any time. By default a new connection will disconnect an existing client.
+enum class ScopedServiceBindingPolicy { kPreferNew, kPreferExisting };
+
+template <typename Interface,
+ ScopedServiceBindingPolicy Policy =
+ ScopedServiceBindingPolicy::kPreferNew>
+class ScopedSingleClientServiceBinding {
+ public:
+ // |outgoing_directory| and |impl| must outlive the binding.
+ ScopedSingleClientServiceBinding(sys::OutgoingDirectory* outgoing_directory,
+ Interface* impl)
+ : directory_(std::move(outgoing_directory)), binding_(impl) {
+ directory_->AddPublicService<Interface>(
+ [this](fidl::InterfaceRequest<Interface> request) {
+ BindClient(std::move(request));
+ });
+ }
+
+ // TODO(crbug.com/974072): Remove this constructor once all code has been
+ // migrated from base::fuchsia::ServiceDirectory to sys::OutgoingDirectory.
+ ScopedSingleClientServiceBinding(ServiceDirectory* service_directory,
+ Interface* impl)
+ : ScopedSingleClientServiceBinding(
+ service_directory->outgoing_directory(),
+ impl) {}
+
+ ~ScopedSingleClientServiceBinding() {
+ directory_->RemovePublicService<Interface>();
+ }
+
+ typename Interface::EventSender_& events() { return binding_.events(); }
+
+ void SetOnLastClientCallback(base::OnceClosure on_last_client_callback) {
+ on_last_client_callback_ = std::move(on_last_client_callback);
+ binding_.set_error_handler(fit::bind_member(
+ this, &ScopedSingleClientServiceBinding::OnBindingEmpty));
+ }
+
+ bool has_clients() const { return binding_.is_bound(); }
+
+ private:
+ void BindClient(fidl::InterfaceRequest<Interface> request) {
+ if (Policy == ScopedServiceBindingPolicy::kPreferExisting &&
+ binding_.is_bound()) {
+ return;
+ }
+ binding_.Bind(std::move(request));
+ }
+
+ void OnBindingEmpty() {
+ binding_.set_error_handler(nullptr);
+ std::move(on_last_client_callback_).Run();
+ }
+
+ sys::OutgoingDirectory* const directory_;
+ fidl::Binding<Interface> binding_;
+ base::OnceClosure on_last_client_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSingleClientServiceBinding);
+};
+
} // namespace fuchsia
} // namespace base
diff --git a/chromium/base/fuchsia/scoped_service_binding_unittest.cc b/chromium/base/fuchsia/scoped_service_binding_unittest.cc
new file mode 100644
index 00000000000..ddbea039e2a
--- /dev/null
+++ b/chromium/base/fuchsia/scoped_service_binding_unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/scoped_service_binding.h"
+
+#include "base/fuchsia/service_directory_test_base.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace fuchsia {
+
+class ScopedServiceBindingTest : public ServiceDirectoryTestBase {};
+
+// Verifies that ScopedServiceBinding allows connection more than once.
+TEST_F(ScopedServiceBindingTest, ConnectTwice) {
+ auto stub = public_service_directory_->Connect<testfidl::TestInterface>();
+ auto stub2 = public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&stub, ZX_OK);
+ VerifyTestInterface(&stub2, ZX_OK);
+}
+
+// Verify that if we connect twice to a prefer-new bound service, the existing
+// connection gets closed.
+TEST_F(ScopedServiceBindingTest, SingleClientPreferNew) {
+ // Teardown the default multi-client binding and create a prefer-new one.
+ service_binding_ = nullptr;
+ ScopedSingleClientServiceBinding<testfidl::TestInterface,
+ ScopedServiceBindingPolicy::kPreferNew>
+ binding(outgoing_directory_.get(), &test_service_);
+
+ // Connect the first client, and verify that it is functional.
+ auto existing_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&existing_client, ZX_OK);
+
+ // Connect the second client, so the existing one should be disconnected and
+ // the new should be functional.
+ auto new_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(existing_client);
+ VerifyTestInterface(&new_client, ZX_OK);
+}
+
+// Verify that if we connect twice to a prefer-existing bound service, the new
+// connection gets closed.
+TEST_F(ScopedServiceBindingTest, SingleClientPreferExisting) {
+ // Teardown the default multi-client binding and create a prefer-existing one.
+ service_binding_ = nullptr;
+ ScopedSingleClientServiceBinding<testfidl::TestInterface,
+ ScopedServiceBindingPolicy::kPreferExisting>
+ binding(outgoing_directory_.get(), &test_service_);
+
+ // Connect the first client, and verify that it is functional.
+ auto existing_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&existing_client, ZX_OK);
+
+ // Connect the second client, then verify that the it gets closed and the
+ // existing one remains functional.
+ auto new_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(new_client);
+ VerifyTestInterface(&existing_client, ZX_OK);
+}
+
+// Verify that the default single-client binding policy is prefer-new.
+TEST_F(ScopedServiceBindingTest, SingleClientDefaultIsPreferNew) {
+ // Teardown the default multi-client binding and create a prefer-new one.
+ service_binding_ = nullptr;
+ ScopedSingleClientServiceBinding<testfidl::TestInterface> binding(
+ outgoing_directory_.get(), &test_service_);
+
+ // Connect the first client, and verify that it is functional.
+ auto existing_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&existing_client, ZX_OK);
+
+ // Connect the second client, so the existing one should be disconnected and
+ // the new should be functional.
+ auto new_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(existing_client);
+ VerifyTestInterface(&new_client, ZX_OK);
+}
+
+// Verify that we can publish a debug service.
+TEST_F(ScopedServiceBindingTest, ConnectDebugService) {
+ // Remove the public service binding.
+ service_binding_.reset();
+
+ // Publish the test service to the "debug" directory.
+ ScopedServiceBinding<testfidl::TestInterface> debug_service_binding(
+ outgoing_directory_->debug_dir(), &test_service_);
+
+ auto debug_stub =
+ debug_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&debug_stub, ZX_OK);
+
+ auto release_stub =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&release_stub, ZX_ERR_PEER_CLOSED);
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/service_directory.cc b/chromium/base/fuchsia/service_directory.cc
index ae21911b83e..0b84904c67e 100644
--- a/chromium/base/fuchsia/service_directory.cc
+++ b/chromium/base/fuchsia/service_directory.cc
@@ -4,11 +4,10 @@
#include "base/fuchsia/service_directory.h"
-#include <lib/async/default.h>
-#include <lib/svc/dir.h>
-#include <zircon/process.h>
-#include <zircon/processargs.h>
+#include <lib/sys/cpp/component_context.h>
+#include <lib/sys/cpp/outgoing_directory.h>
+#include "base/fuchsia/default_context.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
@@ -22,128 +21,27 @@ ServiceDirectory::ServiceDirectory(
Initialize(std::move(request));
}
-ServiceDirectory::ServiceDirectory() = default;
-
-ServiceDirectory::~ServiceDirectory() {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(services_.empty());
+ServiceDirectory::ServiceDirectory(sys::OutgoingDirectory* directory)
+ : directory_(directory) {}
- // Only the root ServiceDirectory "owns" svc_dir_.
- if (!sub_directory_) {
- zx_status_t status = svc_dir_destroy(svc_dir_);
- ZX_DCHECK(status == ZX_OK, status);
- }
-}
+ServiceDirectory::ServiceDirectory() = default;
+ServiceDirectory::~ServiceDirectory() = default;
// static
ServiceDirectory* ServiceDirectory::GetDefault() {
static NoDestructor<ServiceDirectory> directory(
- fidl::InterfaceRequest<::fuchsia::io::Directory>(
- zx::channel(zx_take_startup_handle(PA_DIRECTORY_REQUEST))));
+ ComponentContextForCurrentProcess()->outgoing().get());
return directory.get();
}
void ServiceDirectory::Initialize(
fidl::InterfaceRequest<::fuchsia::io::Directory> request) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(!svc_dir_);
-
- zx_status_t status =
- svc_dir_create(async_get_default_dispatcher(),
- request.TakeChannel().release(), &svc_dir_);
- ZX_CHECK(status == ZX_OK, status);
-
- debug_ = WrapUnique(new ServiceDirectory(svc_dir_, "debug"));
-}
-
-void ServiceDirectory::AddServiceUnsafe(
- StringPiece name,
- RepeatingCallback<void(zx::channel)> connect_callback) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(svc_dir_);
- DCHECK(services_.find(name) == services_.end());
-
- std::string name_str = name.as_string();
- services_[name_str] = connect_callback;
-
- if (sub_directory_) {
- zx_status_t status =
- svc_dir_add_service(svc_dir_, sub_directory_, name_str.c_str(), this,
- &ServiceDirectory::HandleConnectRequest);
- ZX_DCHECK(status == ZX_OK, status);
- } else {
- // Publish to "svc".
- zx_status_t status =
- svc_dir_add_service(svc_dir_, "svc", name_str.c_str(), this,
- &ServiceDirectory::HandleConnectRequest);
- ZX_DCHECK(status == ZX_OK, status);
-
- // Publish to "public" for compatibility.
- status = svc_dir_add_service(svc_dir_, "public", name_str.c_str(), this,
- &ServiceDirectory::HandleConnectRequest);
- ZX_DCHECK(status == ZX_OK, status);
-
- // Publish to the legacy "flat" namespace, which is required by some
- // clients.
- status = svc_dir_add_service(svc_dir_, nullptr, name_str.c_str(), this,
- &ServiceDirectory::HandleConnectRequest);
- ZX_DCHECK(status == ZX_OK, status);
- }
-}
-
-void ServiceDirectory::RemoveService(StringPiece name) {
- DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(svc_dir_);
-
- std::string name_str = name.as_string();
-
- auto it = services_.find(name_str);
- DCHECK(it != services_.end());
- services_.erase(it);
-
- if (sub_directory_) {
- zx_status_t status =
- svc_dir_remove_service(svc_dir_, sub_directory_, name_str.c_str());
- ZX_DCHECK(status == ZX_OK, status);
- } else {
- // Unregister from "svc", "public", and flat namespace.
- zx_status_t status =
- svc_dir_remove_service(svc_dir_, "svc", name_str.c_str());
- ZX_DCHECK(status == ZX_OK, status);
- status = svc_dir_remove_service(svc_dir_, "public", name_str.c_str());
- ZX_DCHECK(status == ZX_OK, status);
- status = svc_dir_remove_service(svc_dir_, nullptr, name_str.c_str());
- ZX_DCHECK(status == ZX_OK, status);
- }
-}
-
-void ServiceDirectory::RemoveAllServices() {
- while (!services_.empty()) {
- RemoveService(services_.begin()->first);
- }
-}
-
-// static
-void ServiceDirectory::HandleConnectRequest(void* context,
- const char* service_name,
- zx_handle_t service_request) {
- auto* directory = reinterpret_cast<ServiceDirectory*>(context);
- DCHECK_CALLED_ON_VALID_THREAD(directory->thread_checker_);
-
- auto it = directory->services_.find(service_name);
-
- // HandleConnectRequest() is expected to be called only for registered
- // services.
- DCHECK(it != directory->services_.end());
-
- it->second.Run(zx::channel(service_request));
-}
-
-ServiceDirectory::ServiceDirectory(svc_dir_t* svc_dir, const char* name) {
- DCHECK(svc_dir);
-
- svc_dir_ = svc_dir;
- sub_directory_ = name;
+ DCHECK(!owned_directory_);
+ owned_directory_ = std::make_unique<sys::OutgoingDirectory>();
+ directory_ = owned_directory_.get();
+ directory_->GetOrCreateDirectory("svc")->Serve(
+ ::fuchsia::io::OPEN_RIGHT_READABLE | ::fuchsia::io::OPEN_RIGHT_WRITABLE,
+ request.TakeChannel());
}
} // namespace fuchsia
diff --git a/chromium/base/fuchsia/service_directory.h b/chromium/base/fuchsia/service_directory.h
index 293efdcfe69..e5f9568e307 100644
--- a/chromium/base/fuchsia/service_directory.h
+++ b/chromium/base/fuchsia/service_directory.h
@@ -17,9 +17,10 @@
#include "base/containers/flat_map.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
-#include "base/threading/thread_checker.h"
-typedef struct svc_dir svc_dir_t;
+namespace sys {
+class OutgoingDirectory;
+} // namespace sys
namespace base {
namespace fuchsia {
@@ -34,14 +35,19 @@ namespace fuchsia {
// Debug services are published to a "debug" sub-directory only accessible by
// other services via the Hub.
//
-// Not thread-safe. All methods must be called on the thread that created the
-// object.
+// TODO(crbug.com/974072): Currently this class is just a wrapper around
+// sys::OutgoingDirectory. Migrate all code to use sys::OutgoingDirectory and
+// remove this class.
class BASE_EXPORT ServiceDirectory {
public:
// Responds to service requests over the supplied |request| channel.
explicit ServiceDirectory(
fidl::InterfaceRequest<::fuchsia::io::Directory> request);
+ // Wraps a sys::OutgoingDirectory. The |directory| must outlive
+ // the ServiceDirectory object.
+ explicit ServiceDirectory(sys::OutgoingDirectory* directory);
+
// Creates an uninitialized ServiceDirectory instance. Initialize must be
// called on the instance before any services can be registered. Unless you
// need separate construction & initialization for a ServiceDirectory member,
@@ -58,51 +64,11 @@ class BASE_EXPORT ServiceDirectory {
// supplied |directory_request| channel.
void Initialize(fidl::InterfaceRequest<::fuchsia::io::Directory> request);
- template <typename Interface>
- void AddService(RepeatingCallback<void(fidl::InterfaceRequest<Interface>)>
- connect_callback) {
- AddServiceUnsafe(
- Interface::Name_,
- BindRepeating(
- [](decltype(connect_callback) callback, zx::channel request) {
- callback.Run(
- fidl::InterfaceRequest<Interface>(std::move(request)));
- },
- connect_callback));
- }
- void RemoveService(StringPiece name);
- void RemoveAllServices();
-
- // Returns the debug ServiceDirectory.
- ServiceDirectory* debug() const { return debug_.get(); }
-
- // Passes requests for |name| through to a generic |connect_callback|.
- // This is used only when proxying requests for interfaces not known at
- // compile-time. Use the type-safe APIs above whenever possible.
- void AddServiceUnsafe(StringPiece name,
- RepeatingCallback<void(zx::channel)> connect_callback);
+ sys::OutgoingDirectory* outgoing_directory() { return directory_; }
private:
- // Sub-directory constructor.
- ServiceDirectory(svc_dir_t* svc_dir, const char* name);
-
- // Called by |svc_dir_| to handle service requests.
- static void HandleConnectRequest(void* context,
- const char* service_name,
- zx_handle_t service_request);
-
- THREAD_CHECKER(thread_checker_);
-
- // Owned by the root directory.
- svc_dir_t* svc_dir_ = nullptr;
- flat_map<std::string, RepeatingCallback<void(zx::channel)>> services_;
-
- // The debug sub-directory. Empty if this is a sub-directory.
- std::unique_ptr<ServiceDirectory> debug_;
-
- // If mon-null, this directory represents a sub-directory of the root
- // ServiceDirectory.
- const char* sub_directory_ = nullptr;
+ std::unique_ptr<sys::OutgoingDirectory> owned_directory_;
+ sys::OutgoingDirectory* directory_;
DISALLOW_COPY_AND_ASSIGN(ServiceDirectory);
};
diff --git a/chromium/base/fuchsia/service_directory_test_base.cc b/chromium/base/fuchsia/service_directory_test_base.cc
index 35fc351afef..5668728e44b 100644
--- a/chromium/base/fuchsia/service_directory_test_base.cc
+++ b/chromium/base/fuchsia/service_directory_test_base.cc
@@ -7,52 +7,48 @@
#include <lib/fdio/directory.h>
#include <utility>
+#include "base/bind.h"
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/test/test_timeouts.h"
+
namespace base {
namespace fuchsia {
-ServiceDirectoryTestBase::ServiceDirectoryTestBase() {
- // TODO(https://crbug.com/920920): Remove the ServiceDirectory's implicit
- // "public" sub-directory and update this setup logic.
-
+ServiceDirectoryTestBase::ServiceDirectoryTestBase()
+ : run_timeout_(TestTimeouts::action_timeout(), BindRepeating([]() {
+ ADD_FAILURE() << "Run() timed out.";
+ })) {
// Mount service dir and publish the service.
+ outgoing_directory_ = std::make_unique<sys::OutgoingDirectory>();
fidl::InterfaceHandle<::fuchsia::io::Directory> directory;
- service_directory_ =
- std::make_unique<ServiceDirectory>(directory.NewRequest());
+ zx_status_t status =
+ outgoing_directory_->Serve(directory.NewRequest().TakeChannel());
+ ZX_CHECK(status == ZX_OK, status);
service_binding_ =
std::make_unique<ScopedServiceBinding<testfidl::TestInterface>>(
- service_directory_.get(), &test_service_);
+ outgoing_directory_.get(), &test_service_);
- // Create the ServiceDirectoryClient, connected to the "svc" sub-directory.
+ // Create the sys::ServiceDirectory, connected to the "svc" sub-directory.
fidl::InterfaceHandle<::fuchsia::io::Directory> svc_directory;
CHECK_EQ(fdio_service_connect_at(
- directory.channel().get(), "/svc/.",
+ directory.channel().get(), "svc",
svc_directory.NewRequest().TakeChannel().release()),
ZX_OK);
- public_service_directory_client_ =
- std::make_unique<ServiceDirectoryClient>(std::move(svc_directory));
+ public_service_directory_ =
+ std::make_unique<sys::ServiceDirectory>(std::move(svc_directory));
- // Create the ServiceDirectoryClient, connected to the "debug" sub-directory.
+ // Create the sys::ServiceDirectory, connected to the "debug" sub-directory.
fidl::InterfaceHandle<::fuchsia::io::Directory> debug_directory;
CHECK_EQ(fdio_service_connect_at(
- directory.channel().get(), "/debug/.",
+ directory.channel().get(), "debug",
debug_directory.NewRequest().TakeChannel().release()),
ZX_OK);
- debug_service_directory_client_ =
- std::make_unique<ServiceDirectoryClient>(std::move(debug_directory));
-
- // Create the ServiceDirectoryClient, connected to the "public" sub-directory
- // (same contents as "svc", provided for compatibility).
- fidl::InterfaceHandle<::fuchsia::io::Directory> public_directory;
- CHECK_EQ(fdio_service_connect_at(
- directory.channel().get(), "/public/.",
- public_directory.NewRequest().TakeChannel().release()),
- ZX_OK);
- legacy_public_service_directory_client_ =
- std::make_unique<ServiceDirectoryClient>(std::move(public_directory));
+ debug_service_directory_ =
+ std::make_unique<sys::ServiceDirectory>(std::move(debug_directory));
- // Create a ServiceDirectoryClient for the "private" part of the directory.
- root_service_directory_client_ =
- std::make_unique<ServiceDirectoryClient>(std::move(directory));
+ // Create a sys::ServiceDirectory for the "private" part of the directory.
+ root_service_directory_ =
+ std::make_unique<sys::ServiceDirectory>(std::move(directory));
}
ServiceDirectoryTestBase::~ServiceDirectoryTestBase() = default;
@@ -61,7 +57,7 @@ void ServiceDirectoryTestBase::VerifyTestInterface(
fidl::InterfacePtr<testfidl::TestInterface>* stub,
zx_status_t expected_error) {
// Call the service and wait for response.
- base::RunLoop run_loop;
+ RunLoop run_loop;
zx_status_t actual_error = ZX_OK;
stub->set_error_handler([&run_loop, &actual_error](zx_status_t status) {
diff --git a/chromium/base/fuchsia/service_directory_test_base.h b/chromium/base/fuchsia/service_directory_test_base.h
index 8099b830720..34a7ccb8dfd 100644
--- a/chromium/base/fuchsia/service_directory_test_base.h
+++ b/chromium/base/fuchsia/service_directory_test_base.h
@@ -5,14 +5,16 @@
#ifndef BASE_FUCHSIA_SERVICE_DIRECTORY_TEST_BASE_H_
#define BASE_FUCHSIA_SERVICE_DIRECTORY_TEST_BASE_H_
-#include <lib/zx/channel.h>
+#include <lib/sys/cpp/outgoing_directory.h>
+#include <lib/sys/cpp/service_directory.h>
+#include <zircon/types.h>
#include <memory>
#include "base/fuchsia/scoped_service_binding.h"
-#include "base/fuchsia/service_directory_client.h"
#include "base/fuchsia/test_interface_impl.h"
#include "base/fuchsia/testfidl/cpp/fidl.h"
#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -27,18 +29,18 @@ class ServiceDirectoryTestBase : public testing::Test {
zx_status_t expected_error);
protected:
+ const RunLoop::ScopedRunTimeoutForTest run_timeout_;
+
MessageLoopForIO message_loop_;
- std::unique_ptr<ServiceDirectory> service_directory_;
+ std::unique_ptr<sys::OutgoingDirectory> outgoing_directory_;
TestInterfaceImpl test_service_;
std::unique_ptr<ScopedServiceBinding<testfidl::TestInterface>>
service_binding_;
- std::unique_ptr<ServiceDirectoryClient> public_service_directory_client_;
- std::unique_ptr<ServiceDirectoryClient> debug_service_directory_client_;
- std::unique_ptr<ServiceDirectoryClient>
- legacy_public_service_directory_client_;
- std::unique_ptr<ServiceDirectoryClient> root_service_directory_client_;
+ std::unique_ptr<sys::ServiceDirectory> public_service_directory_;
+ std::unique_ptr<sys::ServiceDirectory> debug_service_directory_;
+ std::unique_ptr<sys::ServiceDirectory> root_service_directory_;
DISALLOW_COPY_AND_ASSIGN(ServiceDirectoryTestBase);
};
diff --git a/chromium/base/fuchsia/service_directory_unittest.cc b/chromium/base/fuchsia/service_directory_unittest.cc
deleted file mode 100644
index 8bf9688e51c..00000000000
--- a/chromium/base/fuchsia/service_directory_unittest.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/fuchsia/service_directory.h"
-
-#include <lib/fdio/fdio.h>
-#include <lib/zx/channel.h>
-#include <utility>
-
-#include "base/bind.h"
-#include "base/fuchsia/service_directory_test_base.h"
-#include "base/location.h"
-#include "base/run_loop.h"
-#include "base/task_runner.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace fuchsia {
-
-class ServiceDirectoryTest : public ServiceDirectoryTestBase {};
-
-// Verifies that ServiceDirectoryClient can consume a public service in
-// ServiceDirectory and that connection is disconnected when the client stub is
-// destroyed.
-TEST_F(ServiceDirectoryTest, ConnectDisconnect) {
- auto stub = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&stub, ZX_OK);
-
- base::RunLoop run_loop;
- service_binding_->SetOnLastClientCallback(run_loop.QuitClosure());
-
- base::ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(
- [](base::RunLoop* run_loop) {
- ADD_FAILURE();
- run_loop->Quit();
- },
- &run_loop),
- TestTimeouts::action_timeout());
-
- stub.Unbind();
- run_loop.Run();
-}
-
-// Verify that we can connect to a service through both "public" and "svc".
-TEST_F(ServiceDirectoryTest, ConnectNewAndLegacyServices) {
- auto stub = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- auto stub2 = legacy_public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&stub, ZX_OK);
- VerifyTestInterface(&stub2, ZX_OK);
-}
-
-// Verify that we can connect to the same service more than once.
-TEST_F(ServiceDirectoryTest, ConnectMulti) {
- auto stub = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- auto stub2 = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&stub, ZX_OK);
- VerifyTestInterface(&stub2, ZX_OK);
-}
-
-// Verify that services are also exported to the legacy flat service namespace.
-TEST_F(ServiceDirectoryTest, ConnectLegacy) {
- auto stub = root_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&stub, ZX_OK);
-}
-
-// Verify that ServiceDirectoryClient can handle the case when the service
-// directory connection is disconnected.
-TEST_F(ServiceDirectoryTest, DirectoryGone) {
- service_binding_.reset();
- service_directory_.reset();
-
- fidl::InterfacePtr<testfidl::TestInterface> stub;
- zx_status_t status =
- public_service_directory_client_->ConnectToService(stub.NewRequest());
- EXPECT_EQ(status, ZX_ERR_PEER_CLOSED);
-
- VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
-}
-
-// Verify that the case when the service doesn't exist is handled properly.
-TEST_F(ServiceDirectoryTest, NoService) {
- service_binding_.reset();
- auto stub = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
-}
-
-// Verify that we can connect to a debug service.
-TEST_F(ServiceDirectoryTest, ConnectDebugService) {
- // Remove the public service binding.
- service_binding_.reset();
-
- // Publish the test service to the "debug" directory.
- ScopedServiceBinding<testfidl::TestInterface> debug_service_binding(
- service_directory_->debug(), &test_service_);
-
- auto debug_stub = debug_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&debug_stub, ZX_OK);
-
- auto release_stub = public_service_directory_client_
- ->ConnectToService<testfidl::TestInterface>();
- VerifyTestInterface(&release_stub, ZX_ERR_PEER_CLOSED);
-}
-
-} // namespace fuchsia
-} // namespace base
diff --git a/chromium/base/fuchsia/service_provider_impl.cc b/chromium/base/fuchsia/service_provider_impl.cc
index c21ce6c1998..936ff20b587 100644
--- a/chromium/base/fuchsia/service_provider_impl.cc
+++ b/chromium/base/fuchsia/service_provider_impl.cc
@@ -4,11 +4,23 @@
#include "base/fuchsia/service_provider_impl.h"
+#include <lib/sys/cpp/outgoing_directory.h>
#include <utility>
namespace base {
namespace fuchsia {
+// static
+std::unique_ptr<ServiceProviderImpl>
+ServiceProviderImpl::CreateForOutgoingDirectory(
+ sys::OutgoingDirectory* outgoing_directory) {
+ fidl::InterfaceHandle<::fuchsia::io::Directory> service_directory;
+ outgoing_directory->GetOrCreateDirectory("svc")->Serve(
+ ::fuchsia::io::OPEN_RIGHT_READABLE | ::fuchsia::io::OPEN_RIGHT_WRITABLE,
+ service_directory.NewRequest().TakeChannel());
+ return std::make_unique<ServiceProviderImpl>(std::move(service_directory));
+}
+
ServiceProviderImpl::ServiceProviderImpl(
fidl::InterfaceHandle<::fuchsia::io::Directory> service_directory)
: directory_(std::move(service_directory)) {}
diff --git a/chromium/base/fuchsia/service_provider_impl.h b/chromium/base/fuchsia/service_provider_impl.h
index 186b5d1dddf..99a70aec881 100644
--- a/chromium/base/fuchsia/service_provider_impl.h
+++ b/chromium/base/fuchsia/service_provider_impl.h
@@ -17,6 +17,10 @@
#include "base/fuchsia/service_directory_client.h"
#include "base/macros.h"
+namespace sys {
+class OutgoingDirectory;
+} // namespace sys
+
namespace base {
namespace fuchsia {
@@ -25,6 +29,11 @@ namespace fuchsia {
// TODO(https://crbug.com/920920): Remove this when ServiceProvider is gone.
class BASE_EXPORT ServiceProviderImpl : public ::fuchsia::sys::ServiceProvider {
public:
+ // Constructor that creates ServiceProvider for public services in the
+ // specified OutgoingDirectory.
+ static std::unique_ptr<ServiceProviderImpl> CreateForOutgoingDirectory(
+ sys::OutgoingDirectory* outgoing_directory);
+
explicit ServiceProviderImpl(
fidl::InterfaceHandle<::fuchsia::io::Directory> service_directory);
~ServiceProviderImpl() override;
diff --git a/chromium/base/fuchsia/service_provider_impl_unittest.cc b/chromium/base/fuchsia/service_provider_impl_unittest.cc
index 4a36b301df0..116c6389b95 100644
--- a/chromium/base/fuchsia/service_provider_impl_unittest.cc
+++ b/chromium/base/fuchsia/service_provider_impl_unittest.cc
@@ -4,11 +4,11 @@
#include "base/fuchsia/service_provider_impl.h"
+#include <lib/sys/cpp/outgoing_directory.h>
#include <lib/zx/channel.h>
#include <utility>
#include "base/fuchsia/scoped_service_binding.h"
-#include "base/fuchsia/service_directory.h"
#include "base/fuchsia/test_interface_impl.h"
#include "base/fuchsia/testfidl/cpp/fidl.h"
#include "base/message_loop/message_loop.h"
@@ -20,7 +20,12 @@ namespace fuchsia {
class ServiceProviderImplTest : public testing::Test {
public:
- ServiceProviderImplTest() = default;
+ ServiceProviderImplTest() {
+ provider_impl_ =
+ ServiceProviderImpl::CreateForOutgoingDirectory(&service_directory_);
+ provider_impl_->AddBinding(provider_client_.NewRequest());
+ }
+
~ServiceProviderImplTest() override = default;
void VerifyTestInterface(fidl::InterfacePtr<testfidl::TestInterface>* stub,
@@ -52,27 +57,25 @@ class ServiceProviderImplTest : public testing::Test {
MessageLoopForIO message_loop_;
TestInterfaceImpl test_service_;
+ sys::OutgoingDirectory service_directory_;
+ std::unique_ptr<ServiceProviderImpl> provider_impl_;
+ ::fuchsia::sys::ServiceProviderPtr provider_client_;
+
DISALLOW_COPY_AND_ASSIGN(ServiceProviderImplTest);
};
-// Verifies that we can connect to the service service more than once.
+// Verifies that we can connect to the service more than once.
TEST_F(ServiceProviderImplTest, ConnectMulti) {
- fidl::InterfaceHandle<::fuchsia::io::Directory> directory_channel;
- ServiceDirectory service_directory(directory_channel.NewRequest());
- ServiceProviderImpl provider_impl(std::move(directory_channel));
ScopedServiceBinding<testfidl::TestInterface> service_binding(
- &service_directory, &test_service_);
-
- ::fuchsia::sys::ServiceProviderPtr provider_client;
- provider_impl.AddBinding(provider_client.NewRequest());
+ &service_directory_, &test_service_);
testfidl::TestInterfacePtr stub;
- provider_client->ConnectToService(testfidl::TestInterface::Name_,
- stub.NewRequest().TakeChannel());
+ provider_client_->ConnectToService(testfidl::TestInterface::Name_,
+ stub.NewRequest().TakeChannel());
testfidl::TestInterfacePtr stub2;
- provider_client->ConnectToService(testfidl::TestInterface::Name_,
- stub2.NewRequest().TakeChannel());
+ provider_client_->ConnectToService(testfidl::TestInterface::Name_,
+ stub2.NewRequest().TakeChannel());
VerifyTestInterface(&stub, ZX_OK);
VerifyTestInterface(&stub2, ZX_OK);
@@ -80,16 +83,9 @@ TEST_F(ServiceProviderImplTest, ConnectMulti) {
// Verify that the case when the service doesn't exist is handled properly.
TEST_F(ServiceProviderImplTest, NoService) {
- fidl::InterfaceHandle<::fuchsia::io::Directory> directory_channel;
- ServiceDirectory service_directory(directory_channel.NewRequest());
- ServiceProviderImpl provider_impl(std::move(directory_channel));
-
- ::fuchsia::sys::ServiceProviderPtr provider_client;
- provider_impl.AddBinding(provider_client.NewRequest());
-
testfidl::TestInterfacePtr stub;
- provider_client->ConnectToService(testfidl::TestInterface::Name_,
- stub.NewRequest().TakeChannel());
+ provider_client_->ConnectToService(testfidl::TestInterface::Name_,
+ stub.NewRequest().TakeChannel());
VerifyTestInterface(&stub, ZX_ERR_PEER_CLOSED);
}
diff --git a/chromium/base/fuchsia/startup_context.cc b/chromium/base/fuchsia/startup_context.cc
index 080a6c73386..b1b0ebc1d12 100644
--- a/chromium/base/fuchsia/startup_context.cc
+++ b/chromium/base/fuchsia/startup_context.cc
@@ -5,77 +5,95 @@
#include "base/fuchsia/startup_context.h"
#include <fuchsia/io/cpp/fidl.h>
+#include <lib/sys/cpp/outgoing_directory.h>
+#include <lib/sys/cpp/service_directory.h>
#include "base/fuchsia/file_utils.h"
namespace base {
namespace fuchsia {
-StartupContext::StartupContext(::fuchsia::sys::StartupInfo startup_info)
- : startup_info_(std::move(startup_info)) {
+StartupContext::StartupContext(::fuchsia::sys::StartupInfo startup_info) {
+ std::unique_ptr<sys::ServiceDirectory> incoming_services;
+
// Component manager generates |flat_namespace|, so things are horribly broken
// if |flat_namespace| is malformed.
- CHECK_EQ(startup_info_.flat_namespace.directories.size(),
- startup_info_.flat_namespace.paths.size());
-
- // Find the /svc directory and wrap it into a ServiceDirectoryClient.
- for (size_t i = 0; i < startup_info_.flat_namespace.paths.size(); ++i) {
- if (startup_info_.flat_namespace.paths[i] == kServiceDirectoryPath) {
- incoming_services_ = std::make_unique<ServiceDirectoryClient>(
- fidl::InterfaceHandle<::fuchsia::io::Directory>(
- std::move(startup_info_.flat_namespace.directories[i])));
+ CHECK_EQ(startup_info.flat_namespace.directories.size(),
+ startup_info.flat_namespace.paths.size());
+
+ // Find the /svc directory and wrap it into a sys::ServiceDirectory.
+ for (size_t i = 0; i < startup_info.flat_namespace.paths.size(); ++i) {
+ if (startup_info.flat_namespace.paths[i] == kServiceDirectoryPath) {
+ incoming_services = std::make_unique<sys::ServiceDirectory>(
+ std::move(startup_info.flat_namespace.directories[i]));
break;
}
}
// TODO(https://crbug.com/933834): Remove these workarounds when we migrate to
// the new component manager.
- if (!incoming_services_ && startup_info_.launch_info.flat_namespace) {
+ if (!incoming_services && startup_info.launch_info.flat_namespace) {
LOG(WARNING) << "Falling back to LaunchInfo namespace";
for (size_t i = 0;
- i < startup_info_.launch_info.flat_namespace->paths.size(); ++i) {
- if (startup_info_.launch_info.flat_namespace->paths[i] ==
+ i < startup_info.launch_info.flat_namespace->paths.size(); ++i) {
+ if (startup_info.launch_info.flat_namespace->paths[i] ==
kServiceDirectoryPath) {
- incoming_services_ = std::make_unique<ServiceDirectoryClient>(
- fidl::InterfaceHandle<::fuchsia::io::Directory>(std::move(
- startup_info_.launch_info.flat_namespace->directories[i])));
+ incoming_services = std::make_unique<sys::ServiceDirectory>(
+ std::move(startup_info.launch_info.flat_namespace->directories[i]));
break;
}
}
}
- if (!incoming_services_ && startup_info_.launch_info.additional_services) {
+
+ if (!incoming_services && startup_info.launch_info.additional_services) {
LOG(WARNING) << "Falling back to additional ServiceList services";
- // Construct a ServiceDirectory and publish the additional services into it.
- fidl::InterfaceHandle<::fuchsia::io::Directory> incoming_directory;
+ // Construct a OutgoingDirectory and publish the additional services into
+ // it.
additional_services_.Bind(
- std::move(startup_info_.launch_info.additional_services->provider));
- additional_services_directory_ =
- std::make_unique<ServiceDirectory>(incoming_directory.NewRequest());
- for (auto& name : startup_info_.launch_info.additional_services->names) {
- additional_services_directory_->AddServiceUnsafe(
- name, base::BindRepeating(
- &::fuchsia::sys::ServiceProvider::ConnectToService,
- base::Unretained(additional_services_.get()), name));
+ std::move(startup_info.launch_info.additional_services->provider));
+ additional_services_directory_ = std::make_unique<sys::OutgoingDirectory>();
+ for (auto& name : startup_info.launch_info.additional_services->names) {
+ additional_services_directory_->AddPublicService(
+ std::make_unique<vfs::Service>([this, name](
+ zx::channel channel,
+ async_dispatcher_t* dispatcher) {
+ additional_services_->ConnectToService(name, std::move(channel));
+ }),
+ name);
}
- // Publish those services to the caller as |incoming_services_|.
- incoming_services_ = std::make_unique<ServiceDirectoryClient>(
- fidl::InterfaceHandle<::fuchsia::io::Directory>(
- std::move(incoming_directory)));
+ // Publish those services to the caller as |incoming_services|.
+ fidl::InterfaceHandle<::fuchsia::io::Directory> incoming_directory;
+ additional_services_directory_->GetOrCreateDirectory("svc")->Serve(
+ ::fuchsia::io::OPEN_RIGHT_READABLE | ::fuchsia::io::OPEN_RIGHT_WRITABLE,
+ incoming_directory.NewRequest().TakeChannel());
+ incoming_services =
+ std::make_unique<sys::ServiceDirectory>(std::move(incoming_directory));
}
-}
-StartupContext::~StartupContext() = default;
+ if (!incoming_services) {
+ LOG(WARNING) << "Component started without a service directory";
-ServiceDirectory* StartupContext::public_services() {
- if (!public_services_ && startup_info_.launch_info.directory_request) {
- public_services_ = std::make_unique<ServiceDirectory>(
- fidl::InterfaceRequest<::fuchsia::io::Directory>(
- std::move(startup_info_.launch_info.directory_request)));
+ // Create a dummy ServiceDirectoryClient with a channel that's not
+ // connected on the other end.
+ fidl::InterfaceHandle<::fuchsia::io::Directory> dummy_directory;
+ ignore_result(dummy_directory.NewRequest());
+ incoming_services =
+ std::make_unique<sys::ServiceDirectory>(std::move(dummy_directory));
}
- return public_services_.get();
+
+ component_context_ = std::make_unique<sys::ComponentContext>(
+ std::move(incoming_services),
+ std::move(startup_info.launch_info.directory_request));
+
+ service_directory_ =
+ std::make_unique<ServiceDirectory>(component_context_->outgoing().get());
+ service_directory_client_ = std::make_unique<ServiceDirectoryClient>(
+ component_context_->svc()->CloneChannel());
}
+StartupContext::~StartupContext() = default;
+
} // namespace fuchsia
} // namespace base
diff --git a/chromium/base/fuchsia/startup_context.h b/chromium/base/fuchsia/startup_context.h
index fa4e330a407..aa879fa100e 100644
--- a/chromium/base/fuchsia/startup_context.h
+++ b/chromium/base/fuchsia/startup_context.h
@@ -6,6 +6,7 @@
#define BASE_FUCHSIA_STARTUP_CONTEXT_H_
#include <fuchsia/sys/cpp/fidl.h>
+#include <lib/sys/cpp/component_context.h>
#include <memory>
#include "base/base_export.h"
@@ -26,28 +27,32 @@ class BASE_EXPORT StartupContext {
explicit StartupContext(::fuchsia::sys::StartupInfo startup_info);
virtual ~StartupContext();
- // Returns the namespace of services published for use by the component.
- const ServiceDirectoryClient* incoming_services() const {
- DCHECK(incoming_services_);
- return incoming_services_.get();
+ // Returns the ComponentContext for the current component. Note that all
+ // outgoing services should be bound immediately after the first call to this
+ // API, before returning control to the message loop, at which point we will
+ // start processing service connection requests.
+ sys::ComponentContext* component_context() const {
+ return component_context_.get();
}
- // Returns the outgoing directory into which this component binds services.
- // Note that all services should be bound immediately after the first call to
- // this API, before returning control to the message loop, at which point we
- // will start processing service connection requests.
- ServiceDirectory* public_services();
+ // TODO(crbug.com/974072): These are legacy ServiceDirectory and
+ // ServiceDirectoryClient. Remove once all clients have been migrated to
+ // sys::OutgoingDirectory and sys::ServiceDirectory.
+ ServiceDirectoryClient* incoming_services() const {
+ return service_directory_client_.get();
+ }
+ ServiceDirectory* public_services() { return service_directory_.get(); }
private:
- ::fuchsia::sys::StartupInfo startup_info_;
-
- std::unique_ptr<ServiceDirectoryClient> incoming_services_;
- std::unique_ptr<ServiceDirectory> public_services_;
-
// TODO(https://crbug.com/933834): Remove these when we migrate to the new
// component manager APIs.
::fuchsia::sys::ServiceProviderPtr additional_services_;
- std::unique_ptr<ServiceDirectory> additional_services_directory_;
+ std::unique_ptr<sys::OutgoingDirectory> additional_services_directory_;
+
+ std::unique_ptr<sys::ComponentContext> component_context_;
+
+ std::unique_ptr<ServiceDirectory> service_directory_;
+ std::unique_ptr<ServiceDirectoryClient> service_directory_client_;
DISALLOW_COPY_AND_ASSIGN(StartupContext);
};
diff --git a/chromium/base/hash/sha1_boringssl.cc b/chromium/base/hash/sha1_boringssl.cc
new file mode 100644
index 00000000000..53eafbc84db
--- /dev/null
+++ b/chromium/base/hash/sha1_boringssl.cc
@@ -0,0 +1,28 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash/sha1.h"
+
+#include <stdint.h>
+
+#include "base/strings/string_util.h"
+#include "third_party/boringssl/src/include/openssl/crypto.h"
+#include "third_party/boringssl/src/include/openssl/sha.h"
+
+namespace base {
+
+void SHA1HashBytes(const unsigned char* data, size_t len, unsigned char* hash) {
+ CRYPTO_library_init();
+ SHA1(data, len, hash);
+}
+
+std::string SHA1HashString(const std::string& str) {
+ CRYPTO_library_init();
+ std::string digest;
+ SHA1(reinterpret_cast<const uint8_t*>(str.data()), str.size(),
+ reinterpret_cast<uint8_t*>(base::WriteInto(&digest, kSHA1Length + 1)));
+ return digest;
+}
+
+} // namespace base
diff --git a/chromium/base/hash/sha1_perftest.cc b/chromium/base/hash/sha1_perftest.cc
new file mode 100644
index 00000000000..fe75d1585ab
--- /dev/null
+++ b/chromium/base/hash/sha1_perftest.cc
@@ -0,0 +1,60 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash/sha1.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+static void Timing(const size_t len) {
+ std::vector<uint8_t> buf(len);
+ base::RandBytes(buf.data(), len);
+
+ const int runs = 111;
+ std::vector<base::TimeDelta> utime(runs);
+ unsigned char digest[base::kSHA1Length];
+ memset(digest, 0, base::kSHA1Length);
+
+ double total_test_time = 0.0;
+ for (int i = 0; i < runs; ++i) {
+ auto start = base::TimeTicks::Now();
+ base::SHA1HashBytes(buf.data(), len, digest);
+ auto end = base::TimeTicks::Now();
+ utime[i] = end - start;
+ total_test_time += utime[i].InMicroseconds();
+ }
+
+ std::sort(utime.begin(), utime.end());
+ const int med = runs / 2;
+ const int min = 0;
+
+ // No need for conversions as length is in bytes and time in usecs:
+ // MB/s = (len / (bytes/megabytes)) / (usecs / usecs/sec)
+ // MB/s = (len / 1,000,000)/(usecs / 1,000,000)
+ // MB/s = (len * 1,000,000)/(usecs * 1,000,000)
+ // MB/s = len/utime
+ double median_rate = len / utime[med].InMicroseconds();
+ double max_rate = len / utime[min].InMicroseconds();
+
+ perf_test::PrintResult("len=", base::NumberToString(len), "median",
+ median_rate, "MB/s", true);
+ perf_test::PrintResult("usecs=", base::NumberToString(total_test_time), "max",
+ max_rate, "MB/s", true);
+}
+
+TEST(SHA1PerfTest, Speed) {
+ Timing(1024 * 1024U >> 1);
+ Timing(1024 * 1024U >> 5);
+ Timing(1024 * 1024U >> 6);
+ Timing(1024 * 1024U >> 7);
+}
diff --git a/chromium/base/i18n/streaming_utf8_validator_unittest.cc b/chromium/base/i18n/streaming_utf8_validator_unittest.cc
index 37969689333..18cbab64186 100644
--- a/chromium/base/i18n/streaming_utf8_validator_unittest.cc
+++ b/chromium/base/i18n/streaming_utf8_validator_unittest.cc
@@ -116,7 +116,7 @@ TEST_F(StreamingUtf8ValidatorThoroughTest, TestEverything) {
base::AutoLock al(lock_);
uint32_t begin = 0;
do {
- base::PostTaskWithTraits(
+ base::PostTask(
FROM_HERE, {base::TaskShutdownBehavior::BLOCK_SHUTDOWN},
base::BindOnce(&StreamingUtf8ValidatorThoroughTest::TestRange,
base::Unretained(this), begin,
diff --git a/chromium/base/i18n/time_formatting.h b/chromium/base/i18n/time_formatting.h
index dfbfeea6c14..64b09964466 100644
--- a/chromium/base/i18n/time_formatting.h
+++ b/chromium/base/i18n/time_formatting.h
@@ -47,10 +47,6 @@ enum DateFormat {
DATE_FORMAT_MONTH_WEEKDAY_DAY,
};
-// TODO(derat@chromium.org): Update all of these functions to return boolean
-// "success" values and use out-params for formatted strings:
-// http://crbug.com/698802
-
// Returns the time of day, e.g., "3:07 PM".
BASE_I18N_EXPORT string16 TimeFormatTimeOfDay(const Time& time);
diff --git a/chromium/base/immediate_crash.h b/chromium/base/immediate_crash.h
index b948c7aeb92..94ee14f1289 100644
--- a/chromium/base/immediate_crash.h
+++ b/chromium/base/immediate_crash.h
@@ -8,80 +8,128 @@
#include "build/build_config.h"
// Crashes in the fastest possible way with no attempt at logging.
-// There are different constraints to satisfy here, see http://crbug.com/664209
-// for more context:
-// - The trap instructions, and hence the PC value at crash time, have to be
-// distinct and not get folded into the same opcode by the compiler.
-// On Linux/Android this is tricky because GCC still folds identical
-// asm volatile blocks. The workaround is generating distinct opcodes for
-// each CHECK using the __COUNTER__ macro.
-// - The debug info for the trap instruction has to be attributed to the source
-// line that has the CHECK(), to make crash reports actionable. This rules
-// out the ability of using a inline function, at least as long as clang
-// doesn't support attribute(artificial).
-// - Failed CHECKs should produce a signal that is distinguishable from an
-// invalid memory access, to improve the actionability of crash reports.
-// - The compiler should treat the CHECK as no-return instructions, so that the
-// trap code can be efficiently packed in the prologue of the function and
-// doesn't interfere with the main execution flow.
-// - When debugging, developers shouldn't be able to accidentally step over a
-// CHECK. This is achieved by putting opcodes that will cause a non
-// continuable exception after the actual trap instruction.
-// - Don't cause too much binary bloat.
+// There are several constraints; see http://crbug.com/664209 for more context.
+//
+// - TRAP_SEQUENCE_() must be fatal. It should not be possible to ignore the
+// resulting exception or simply hit 'continue' to skip over it in a debugger.
+// - Different instances of TRAP_SEQUENCE_() must not be folded together, to
+// ensure crash reports are debuggable. Unlike __builtin_trap(), asm volatile
+// blocks will not be folded together.
+// Note: TRAP_SEQUENCE_() previously required an instruction with a unique
+// nonce since unlike clang, GCC folds together identical asm volatile
+// blocks.
+// - TRAP_SEQUENCE_() must produce a signal that is distinct from an invalid
+// memory access.
+// - TRAP_SEQUENCE_() must be treated as a set of noreturn instructions.
+// __builtin_unreachable() is used to provide that hint here. clang also uses
+// this as a heuristic to pack the instructions in the function epilogue to
+// improve code density.
+//
+// Additional properties that are nice to have:
+// - TRAP_SEQUENCE_() should be as compact as possible.
+// - The first instruction of TRAP_SEQUENCE_() should not change, to avoid
+// shifting crash reporting clusters. As a consequence of this, explicit
+// assembly is preferred over intrinsics.
+// Note: this last bullet point may no longer be true, and may be removed in
+// the future.
+
+// Note: TRAP_SEQUENCE Is currently split into two macro helpers due to the fact
+// that clang emits an actual instruction for __builtin_unreachable() on certain
+// platforms (see https://crbug.com/958675). In addition, the int3/bkpt/brk will
+// be removed in followups, so splitting it up like this now makes it easy to
+// land the followups.
+
#if defined(COMPILER_GCC)
-#if defined(ARCH_CPU_X86_FAMILY) && !defined(OS_NACL)
-// int 3 will generate a SIGTRAP.
-#define TRAP_SEQUENCE() \
- asm volatile( \
- "int3; ud2; push %0;" ::"i"(static_cast<unsigned char>(__COUNTER__)))
+#if defined(OS_NACL)
+
+// Crash report accuracy is not guaranteed on NaCl.
+#define TRAP_SEQUENCE1_() __builtin_trap()
+#define TRAP_SEQUENCE2_() asm volatile("")
+
+#elif defined(ARCH_CPU_X86_FAMILY)
+
+// TODO(https://crbug.com/958675): In theory, it should be possible to use just
+// int3. However, there are a number of crashes with SIGILL as the exception
+// code, so it seems likely that there's a signal handler that allows execution
+// to continue after SIGTRAP.
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+
+#if defined(OS_MACOSX)
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Mac.
+#define TRAP_SEQUENCE2_() asm volatile("")
+#else
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+#endif // defined(OS_MACOSX)
+
+#elif defined(ARCH_CPU_ARMEL)
-#elif defined(ARCH_CPU_ARMEL) && !defined(OS_NACL)
// bkpt will generate a SIGBUS when running on armv7 and a SIGTRAP when running
// as a 32 bit userspace app on arm64. There doesn't seem to be any way to
// cause a SIGTRAP from userspace without using a syscall (which would be a
// problem for sandboxing).
-#define TRAP_SEQUENCE() \
- asm volatile("bkpt #0; udf %0;" ::"i"(__COUNTER__ % 256))
+// TODO(https://crbug.com/958675): Remove bkpt from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("bkpt #0")
+#define TRAP_SEQUENCE2_() asm volatile("udf #0")
+
+#elif defined(ARCH_CPU_ARM64)
-#elif defined(ARCH_CPU_ARM64) && !defined(OS_NACL)
// This will always generate a SIGTRAP on arm64.
-#define TRAP_SEQUENCE() \
- asm volatile("brk #0; hlt %0;" ::"i"(__COUNTER__ % 65536))
+// TODO(https://crbug.com/958675): Remove brk from this sequence.
+#define TRAP_SEQUENCE1_() asm volatile("brk #0")
+#define TRAP_SEQUENCE2_() asm volatile("hlt #0")
#else
+
// Crash report accuracy will not be guaranteed on other architectures, but at
// least this will crash as expected.
-#define TRAP_SEQUENCE() __builtin_trap()
+#define TRAP_SEQUENCE1_() __builtin_trap()
+#define TRAP_SEQUENCE2_() asm volatile("")
+
#endif // ARCH_CPU_*
#elif defined(COMPILER_MSVC)
-// Clang is cleverer about coalescing int3s, so we need to add a unique-ish
-// instruction following the __debugbreak() to have it emit distinct locations
-// for CHECKs rather than collapsing them all together. It would be nice to use
-// a short intrinsic to do this (and perhaps have only one implementation for
-// both clang and MSVC), however clang-cl currently does not support intrinsics.
-// On the flip side, MSVC x64 doesn't support inline asm. So, we have to have
-// two implementations. Normally clang-cl's version will be 5 bytes (1 for
-// `int3`, 2 for `ud2`, 2 for `push byte imm`, however, TODO(scottmg):
-// https://crbug.com/694670 clang-cl doesn't currently support %'ing
-// __COUNTER__, so eventually it will emit the dword form of push.
-// TODO(scottmg): Reinvestigate a short sequence that will work on both
-// compilers once clang supports more intrinsics. See https://crbug.com/693713.
#if !defined(__clang__)
-#define TRAP_SEQUENCE() __debugbreak()
+
+// MSVC x64 doesn't support inline asm, so use the MSVC intrinsic.
+#define TRAP_SEQUENCE1_() __debugbreak()
+#define TRAP_SEQUENCE2_()
+
#elif defined(ARCH_CPU_ARM64)
-#define TRAP_SEQUENCE() \
- __asm volatile("brk #0\n hlt %0\n" ::"i"(__COUNTER__ % 65536));
+
+#define TRAP_SEQUENCE1_() __asm volatile("brk #0\n")
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Win64
+#define TRAP_SEQUENCE2_() __asm volatile("")
+
#else
-#define TRAP_SEQUENCE() ({ {__asm int 3 __asm ud2 __asm push __COUNTER__}; })
+
+#define TRAP_SEQUENCE1_() asm volatile("int3")
+
+#if defined(ARCH_CPU_64_BITS)
+// Intentionally empty: __builtin_unreachable() is always part of the sequence
+// (see IMMEDIATE_CRASH below) and already emits a ud2 on Win64
+#define TRAP_SEQUENCE2_() asm volatile("")
+#else
+#define TRAP_SEQUENCE2_() asm volatile("ud2")
+#endif // defined(ARCH_CPU_64_bits)
+
#endif // __clang__
#else
-#error Port
+
+#error No supported trap sequence!
+
#endif // COMPILER_GCC
+#define TRAP_SEQUENCE_() \
+ do { \
+ TRAP_SEQUENCE1_(); \
+ TRAP_SEQUENCE2_(); \
+ } while (false)
+
// CHECK() and the trap sequence can be invoked from a constexpr function.
// This could make compilation fail on GCC, as it forbids directly using inline
// asm inside a constexpr function. However, it allows calling a lambda
@@ -91,24 +139,34 @@
// full name of the lambda will typically include the name of the function that
// calls CHECK() and the debugger will still break at the right line of code.
#if !defined(COMPILER_GCC)
-#define WRAPPED_TRAP_SEQUENCE() TRAP_SEQUENCE()
+
+#define WRAPPED_TRAP_SEQUENCE_() TRAP_SEQUENCE_()
+
#else
-#define WRAPPED_TRAP_SEQUENCE() \
- do { \
- [] { TRAP_SEQUENCE(); }(); \
+
+#define WRAPPED_TRAP_SEQUENCE_() \
+ do { \
+ [] { TRAP_SEQUENCE_(); }(); \
} while (false)
-#endif
+
+#endif // !defined(COMPILER_GCC)
#if defined(__clang__) || defined(COMPILER_GCC)
-#define IMMEDIATE_CRASH() \
- ({ \
- WRAPPED_TRAP_SEQUENCE(); \
- __builtin_unreachable(); \
+
+// __builtin_unreachable() hints to the compiler that this is noreturn and can
+// be packed in the function epilogue.
+#define IMMEDIATE_CRASH() \
+ ({ \
+ WRAPPED_TRAP_SEQUENCE_(); \
+ __builtin_unreachable(); \
})
+
#else
+
// This is supporting non-chromium user of logging.h to build with MSVC, like
// pdfium. On MSVC there is no __builtin_unreachable().
-#define IMMEDIATE_CRASH() WRAPPED_TRAP_SEQUENCE()
-#endif
+#define IMMEDIATE_CRASH() WRAPPED_TRAP_SEQUENCE_()
+
+#endif // defined(__clang__) || defined(COMPILER_GCC)
#endif // BASE_IMMEDIATE_CRASH_H_
diff --git a/chromium/base/immediate_crash_unittest.cc b/chromium/base/immediate_crash_unittest.cc
index 8507bbc1ad3..8fc9d38392c 100644
--- a/chromium/base/immediate_crash_unittest.cc
+++ b/chromium/base/immediate_crash_unittest.cc
@@ -20,6 +20,11 @@
namespace base {
+// Compile test.
+int TestImmediateCrashTreatedAsNoReturn() {
+ IMMEDIATE_CRASH();
+}
+
// iOS is excluded, since it doesn't support loading shared libraries.
#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_CHROMEOS) || \
@@ -80,27 +85,12 @@ TEST(ImmediateCrashTest, ExpectedOpcodeSequence) {
ASSERT_NE(function_body.end(), it) << "Failed to find return! ";
// Look for two IMMEDIATE_CRASH() opcode sequences.
- base::Optional<uint8_t> nonce;
for (int i = 0; i < 2; ++i) {
// INT 3
EXPECT_EQ(0xCC, *++it);
// UD2
EXPECT_EQ(0x0F, *++it);
EXPECT_EQ(0x0B, *++it);
- // PUSH
- EXPECT_EQ(0x6A, *++it);
- // Immediate nonce argument to PUSH
- if (!nonce) {
- nonce = *++it;
- } else {
- EXPECT_NE(*nonce, *++it);
- }
-#if (defined(OS_WIN) && defined(ARCH_CPU_64_BITS)) || defined(OS_MACOSX)
- // On Windows x64 and Mac, __builtin_unreachable() generates UD2. See
- // https://crbug.com/958373.
- EXPECT_EQ(0x0F, *++it);
- EXPECT_EQ(0x0B, *++it);
-#endif // defined(OS_WIN) || defined(OS_MACOSX)
}
#elif defined(ARCH_CPU_ARMEL)
@@ -129,17 +119,11 @@ TEST(ImmediateCrashTest, ExpectedOpcodeSequence) {
ASSERT_NE(function_body.end(), it) << "Failed to find return! ";
// Look for two IMMEDIATE_CRASH() opcode sequences.
- base::Optional<uint8_t> nonce;
for (int i = 0; i < 2; ++i) {
// BKPT #0
EXPECT_EQ(0xBE00, *++it);
- // UDF #<nonce>
- EXPECT_EQ(0xDE00, *++it & 0xFF00);
- if (!nonce) {
- nonce = *it & 0x00FF;
- } else {
- EXPECT_NE(*nonce, *it & 0x00FF);
- }
+ // UDF #0
+ EXPECT_EQ(0xDE00, *++it);
}
#elif defined(ARCH_CPU_ARM64)
@@ -158,17 +142,11 @@ TEST(ImmediateCrashTest, ExpectedOpcodeSequence) {
ASSERT_NE(function_body.end(), it) << "Failed to find return! ";
// Look for two IMMEDIATE_CRASH() opcode sequences.
- base::Optional<uint16_t> nonce;
for (int i = 0; i < 2; ++i) {
// BRK #0
EXPECT_EQ(0XD4200000, *++it);
- // HLT #<nonce>
- EXPECT_EQ(0xD4400000, *++it & 0xFFE00000);
- if (!nonce) {
- nonce = (*it >> 5) & 0xFFFF;
- } else {
- EXPECT_NE(*nonce, (*it >> 5) & 0xFFFF);
- }
+ // HLT #0
+ EXPECT_EQ(0xD4400000, *++it);
}
#endif // defined(ARCH_CPU_X86_FAMILY)
diff --git a/chromium/base/ios/crb_protocol_observers.mm b/chromium/base/ios/crb_protocol_observers.mm
index 1a3b9f73d22..86a081e8fec 100644
--- a/chromium/base/ios/crb_protocol_observers.mm
+++ b/chromium/base/ios/crb_protocol_observers.mm
@@ -104,7 +104,7 @@ id Iterator::GetNext() {
DCHECK(observer);
DCHECK([observer conformsToProtocol:self.protocol]);
- if (base::ContainsValue(_observers, observer))
+ if (base::Contains(_observers, observer))
return;
_observers.push_back(observer);
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index 91b045afb33..0a512301abe 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -22,6 +22,9 @@ BASE_EXPORT bool IsRunningOnIOS11OrLater();
// Returns whether the operating system is iOS 12 or later.
BASE_EXPORT bool IsRunningOnIOS12OrLater();
+// Returns whether the operating system is iOS 13 or later.
+BASE_EXPORT bool IsRunningOnIOS13OrLater();
+
// Returns whether the operating system is at the given version or later.
BASE_EXPORT bool IsRunningOnOrLater(int32_t major,
int32_t minor,
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index 608a883034e..a17d19b6826 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -43,6 +43,11 @@ bool IsRunningOnIOS12OrLater() {
return is_running_on_or_later;
}
+bool IsRunningOnIOS13OrLater() {
+ static const bool is_running_on_or_later = IsRunningOnOrLater(13, 0, 0);
+ return is_running_on_or_later;
+}
+
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const int32_t* current_version = OSVersionAsArray();
int32_t version[] = {major, minor, bug_fix};
diff --git a/chromium/base/location.cc b/chromium/base/location.cc
index ed55f09cafe..cf189341c73 100644
--- a/chromium/base/location.cc
+++ b/chromium/base/location.cc
@@ -69,6 +69,25 @@ NOINLINE Location Location::CreateFromHere(const char* function_name,
return Location(function_name, file_name, line_number, RETURN_ADDRESS());
}
+#if SUPPORTS_LOCATION_BUILTINS && BUILDFLAG(ENABLE_LOCATION_SOURCE)
+// static
+NOINLINE Location Location::Current(const char* function_name,
+ const char* file_name,
+ int line_number) {
+ return Location(function_name, file_name, line_number, RETURN_ADDRESS());
+}
+#elif SUPPORTS_LOCATION_BUILTINS
+// static
+NOINLINE Location Location::Current(const char* file_name) {
+ return Location(file_name, RETURN_ADDRESS());
+}
+#else
+// static
+NOINLINE Location Location::Current() {
+ return Location(nullptr, RETURN_ADDRESS());
+}
+#endif
+
//------------------------------------------------------------------------------
NOINLINE const void* GetProgramCounter() {
return RETURN_ADDRESS();
diff --git a/chromium/base/location.h b/chromium/base/location.h
index 491cddae8eb..c07e747fe8f 100644
--- a/chromium/base/location.h
+++ b/chromium/base/location.h
@@ -14,9 +14,23 @@
#include "base/base_export.h"
#include "base/debug/debugging_buildflags.h"
#include "base/hash/hash.h"
+#include "build/build_config.h"
namespace base {
+#if defined(__has_builtin)
+// Clang allows detection of these builtins.
+#define SUPPORTS_LOCATION_BUILTINS \
+ (__has_builtin(__builtin_FUNCTION) && __has_builtin(__builtin_FILE) && \
+ __has_builtin(__builtin_LINE))
+#elif defined(COMPILER_GCC) && __GNUC__ >= 7
+// GCC has supported these for a long time, but they point at the function
+// declaration in the case of default arguments, rather than at the call site.
+#define SUPPORTS_LOCATION_BUILTINS 1
+#else
+#define SUPPORTS_LOCATION_BUILTINS 0
+#endif
+
// Location provides basic info where of an object was constructed, or was
// significantly brought to life.
class BASE_EXPORT Location {
@@ -74,6 +88,16 @@ class BASE_EXPORT Location {
const char* file_name,
int line_number);
+#if SUPPORTS_LOCATION_BUILTINS && BUILDFLAG(ENABLE_LOCATION_SOURCE)
+ static Location Current(const char* function_name = __builtin_FUNCTION(),
+ const char* file_name = __builtin_FILE(),
+ int line_number = __builtin_LINE());
+#elif SUPPORTS_LOCATION_BUILTINS
+ static Location Current(const char* file_name = __builtin_FILE());
+#else
+ static Location Current();
+#endif
+
private:
const char* function_name_ = nullptr;
const char* file_name_ = nullptr;
diff --git a/chromium/base/location_unittest.cc b/chromium/base/location_unittest.cc
new file mode 100644
index 00000000000..154920b231d
--- /dev/null
+++ b/chromium/base/location_unittest.cc
@@ -0,0 +1,39 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/location.h"
+
+#include "base/compiler_specific.h"
+#include "base/debug/debugging_buildflags.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+// This is a typical use: taking Location::Current as a default parameter.
+// So even though this looks contrived, it confirms that such usage works as
+// expected.
+Location WhereAmI(const Location& location = Location::Current()) {
+ return location;
+}
+
+} // namespace
+
+TEST(LocationTest, CurrentYieldsCorrectValue) {
+ int previous_line = __LINE__;
+ Location here = WhereAmI();
+ EXPECT_NE(here.program_counter(), WhereAmI().program_counter());
+#if SUPPORTS_LOCATION_BUILTINS
+ EXPECT_THAT(here.file_name(), ::testing::EndsWith("location_unittest.cc"));
+#if BUILDFLAG(ENABLE_LOCATION_SOURCE)
+ EXPECT_EQ(here.line_number(), previous_line + 1);
+ EXPECT_STREQ("TestBody", here.function_name());
+#endif
+#endif
+ ALLOW_UNUSED_LOCAL(previous_line);
+}
+
+} // namespace base
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index 36b8bfcd922..4c4bfa6af52 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -97,7 +97,7 @@ typedef pthread_mutex_t* MutexHandle;
#include "base/debug/debugger.h"
#include "base/debug/stack_trace.h"
#include "base/debug/task_trace.h"
-#include "base/lazy_instance.h"
+#include "base/no_destructor.h"
#include "base/path_service.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/string_piece.h"
@@ -166,8 +166,10 @@ bool show_error_dialogs = false;
// An assert handler override specified by the client to be called instead of
// the debug message dialog and process termination. Assert handlers are stored
// in stack to allow overriding and restoring.
-base::LazyInstance<base::stack<LogAssertHandlerFunction>>::Leaky
- log_assert_handler_stack = LAZY_INSTANCE_INITIALIZER;
+base::stack<LogAssertHandlerFunction>& GetLogAssertHandlerStack() {
+ static base::NoDestructor<base::stack<LogAssertHandlerFunction>> instance;
+ return *instance;
+}
// A log message handler that gets notified of every log message we process.
LogMessageHandlerFunction log_message_handler = nullptr;
@@ -480,13 +482,23 @@ bool ShouldCreateLogMessage(int severity) {
if (severity < g_min_log_level)
return false;
- // Return true here unless we know ~LogMessage won't do anything. Note that
- // ~LogMessage writes to stderr if severity_ >= kAlwaysPrintErrorLevel, even
- // when g_logging_destination is LOG_NONE.
+ // Return true here unless we know ~LogMessage won't do anything.
return g_logging_destination != LOG_NONE || log_message_handler ||
severity >= kAlwaysPrintErrorLevel;
}
+// Returns true when LOG_TO_STDERR flag is set, or |severity| is high.
+// If |severity| is high then true will be returned when no log destinations are
+// set, or only LOG_TO_FILE is set, since that is useful for local development
+// and debugging.
+bool ShouldLogToStderr(int severity) {
+ if (g_logging_destination & LOG_TO_STDERR)
+ return true;
+ if (severity >= kAlwaysPrintErrorLevel)
+ return (g_logging_destination & ~LOG_TO_FILE) == LOG_NONE;
+ return false;
+}
+
int GetVlogVerbosity() {
return std::max(-1, LOG_INFO - GetMinLogLevel());
}
@@ -521,11 +533,11 @@ void SetShowErrorDialogs(bool enable_dialogs) {
ScopedLogAssertHandler::ScopedLogAssertHandler(
LogAssertHandlerFunction handler) {
- log_assert_handler_stack.Get().push(std::move(handler));
+ GetLogAssertHandlerStack().push(std::move(handler));
}
ScopedLogAssertHandler::~ScopedLogAssertHandler() {
- log_assert_handler_stack.Get().pop();
+ GetLogAssertHandlerStack().pop();
}
void SetLogMessageHandler(LogMessageHandlerFunction handler) {
@@ -848,11 +860,7 @@ LogMessage::~LogMessage() {
#endif // OS_FUCHSIA
}
- if ((g_logging_destination & LOG_TO_STDERR) != 0 ||
- severity_ >= kAlwaysPrintErrorLevel) {
- // Write logs with destination LOG_TO_STDERR to stderr. Also output to
- // stderr for logs above a certain log level to better detect and diagnose
- // problems with unit tests, especially on the buildbots.
+ if (ShouldLogToStderr(severity_)) {
ignore_result(fwrite(str_newline.data(), str_newline.size(), 1, stderr));
fflush(stderr);
}
@@ -907,10 +915,9 @@ LogMessage::~LogMessage() {
base::size(str_stack.data));
base::debug::Alias(&str_stack);
- if (log_assert_handler_stack.IsCreated() &&
- !log_assert_handler_stack.Get().empty()) {
+ if (!GetLogAssertHandlerStack().empty()) {
LogAssertHandlerFunction log_assert_handler =
- log_assert_handler_stack.Get().top();
+ GetLogAssertHandlerStack().top();
if (log_assert_handler) {
log_assert_handler.Run(
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index bab26ef57b9..c29357952fd 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -597,6 +597,16 @@ MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
+// Overload for types that no operator<< but do have .ToString() defined.
+template <typename T>
+inline typename std::enable_if<
+ !base::internal::SupportsOstreamOperator<const T&>::value &&
+ base::internal::SupportsToString<const T&>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << v.ToString();
+}
+
// Provide an overload for functions and function pointers. Function pointers
// don't implicitly convert to void* but do implicitly convert to bool, so
// without this function pointers are always printed as 1 or 0. (MSVC isn't
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index b316176c21c..265ad280bdf 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/logging.h"
+#include <sstream>
+
#include "base/bind.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
@@ -36,15 +40,17 @@
#include <fuchsia/logger/cpp/fidl.h>
#include <fuchsia/logger/cpp/fidl_test_base.h>
#include <lib/fidl/cpp/binding.h>
+#include <lib/zx/channel.h>
#include <lib/zx/event.h>
-#include <lib/zx/port.h>
+#include <lib/zx/exception.h>
#include <lib/zx/process.h>
#include <lib/zx/thread.h>
#include <lib/zx/time.h>
#include <zircon/process.h>
#include <zircon/syscalls/debug.h>
-#include <zircon/syscalls/port.h>
+#include <zircon/syscalls/exception.h>
#include <zircon/types.h>
+
#include "base/fuchsia/fuchsia_logging.h"
#include "base/fuchsia/service_directory_client.h"
#endif // OS_FUCHSIA
@@ -234,6 +240,81 @@ TEST_F(LoggingTest, LogToStdErrFlag) {
LOG(INFO) << mock_log_source_stderr.Log();
}
+// Check that messages with severity ERROR or higher are always logged to
+// stderr if no log-destinations are set, other than LOG_TO_FILE.
+// This test is currently only POSIX-compatible.
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
+namespace {
+void TestForLogToStderr(int log_destinations,
+ bool* did_log_info,
+ bool* did_log_error) {
+ const char kInfoLogMessage[] = "This is an INFO level message";
+ const char kErrorLogMessage[] = "Here we have a message of level ERROR";
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ // Set up logging.
+ LoggingSettings settings;
+ settings.logging_dest = log_destinations;
+ base::FilePath file_logs_path;
+ if (log_destinations & LOG_TO_FILE) {
+ file_logs_path = temp_dir.GetPath().Append("file.log");
+ settings.log_file = file_logs_path.value().c_str();
+ }
+ InitLogging(settings);
+
+ // Create a file and change stderr to write to that file, to easily check
+ // contents.
+ base::FilePath stderr_logs_path = temp_dir.GetPath().Append("stderr.log");
+ base::File stderr_logs = base::File(
+ stderr_logs_path,
+ base::File::FLAG_CREATE | base::File::FLAG_WRITE | base::File::FLAG_READ);
+ base::ScopedFD stderr_backup = base::ScopedFD(dup(STDERR_FILENO));
+ int dup_result = dup2(stderr_logs.GetPlatformFile(), STDERR_FILENO);
+ ASSERT_EQ(dup_result, STDERR_FILENO);
+
+ LOG(INFO) << kInfoLogMessage;
+ LOG(ERROR) << kErrorLogMessage;
+
+ // Restore the original stderr logging destination.
+ dup_result = dup2(stderr_backup.get(), STDERR_FILENO);
+ ASSERT_EQ(dup_result, STDERR_FILENO);
+
+ // Check which of the messages were written to stderr.
+ std::string written_logs;
+ ASSERT_TRUE(base::ReadFileToString(stderr_logs_path, &written_logs));
+ *did_log_info = written_logs.find(kInfoLogMessage) != std::string::npos;
+ *did_log_error = written_logs.find(kErrorLogMessage) != std::string::npos;
+}
+} // namespace
+
+TEST_F(LoggingTest, AlwaysLogErrorsToStderr) {
+ bool did_log_info = false;
+ bool did_log_error = false;
+
+ // When no destinations are specified, ERRORs should still log to stderr.
+ TestForLogToStderr(LOG_NONE, &did_log_info, &did_log_error);
+ EXPECT_FALSE(did_log_info);
+ EXPECT_TRUE(did_log_error);
+
+ // Logging only to a file should also log ERRORs to stderr as well.
+ TestForLogToStderr(LOG_TO_FILE, &did_log_info, &did_log_error);
+ EXPECT_FALSE(did_log_info);
+ EXPECT_TRUE(did_log_error);
+
+ // ERRORs should not be logged to stderr if any destination besides FILE is
+ // set.
+ TestForLogToStderr(LOG_TO_SYSTEM_DEBUG_LOG, &did_log_info, &did_log_error);
+ EXPECT_FALSE(did_log_info);
+ EXPECT_FALSE(did_log_error);
+
+ // Both ERRORs and INFO should be logged if LOG_TO_STDERR is set.
+ TestForLogToStderr(LOG_TO_STDERR, &did_log_info, &did_log_error);
+ EXPECT_TRUE(did_log_info);
+ EXPECT_TRUE(did_log_error);
+}
+#endif
+
// Official builds have CHECKs directly call BreakDebugger.
#if !defined(OFFICIAL_BUILD)
@@ -330,31 +411,33 @@ TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
}
#endif
-static const unsigned int kExceptionPortKey = 1u;
-static const unsigned int kThreadEndedPortKey = 2u;
-
struct thread_data_t {
// For signaling the thread ended properly.
- zx::unowned_event event;
- // For registering thread termination.
- zx::unowned_port port;
+ zx::event event;
+ // For catching thread exceptions. Created by the crashing thread.
+ zx::channel channel;
// Location where the thread is expected to crash.
int death_location;
};
-void* CrashThread(void* arg) {
- zx_status_t status;
+// Indicates the exception channel has been created successfully.
+constexpr zx_signals_t kChannelReadySignal = ZX_USER_SIGNAL_0;
+
+// Indicates an error setting up the crash thread.
+constexpr zx_signals_t kCrashThreadErrorSignal = ZX_USER_SIGNAL_1;
+void* CrashThread(void* arg) {
thread_data_t* data = (thread_data_t*)arg;
int death_location = data->death_location;
- // Register the exception handler on the port.
- status = zx::thread::self()->bind_exception_port(*data->port,
- kExceptionPortKey, 0);
+ // Register the exception handler.
+ zx_status_t status =
+ zx::thread::self()->create_exception_channel(0, &data->channel);
if (status != ZX_OK) {
- data->event->signal(0, ZX_USER_SIGNAL_0);
+ data->event.signal(0, kCrashThreadErrorSignal);
return nullptr;
}
+ data->event.signal(0, kChannelReadySignal);
DO_CHECK(death_location != 1);
DO_CHECK(death_location != 2);
@@ -362,44 +445,48 @@ void* CrashThread(void* arg) {
// We should never reach this point, signal the thread incorrectly ended
// properly.
- data->event->signal(0, ZX_USER_SIGNAL_0);
+ data->event.signal(0, kCrashThreadErrorSignal);
return nullptr;
}
// Runs the CrashThread function in a separate thread.
void SpawnCrashThread(int death_location, uintptr_t* child_crash_addr) {
- zx::port port;
zx::event event;
- zx_status_t status;
-
- status = zx::port::create(0, &port);
- ASSERT_EQ(status, ZX_OK);
- status = zx::event::create(0, &event);
- ASSERT_EQ(status, ZX_OK);
-
- // Register the thread ended event on the port.
- status = event.wait_async(port, kThreadEndedPortKey, ZX_USER_SIGNAL_0,
- ZX_WAIT_ASYNC_ONCE);
+ zx_status_t status = zx::event::create(0, &event);
ASSERT_EQ(status, ZX_OK);
// Run the thread.
- thread_data_t thread_data = {zx::unowned_event(event), zx::unowned_port(port),
- death_location};
+ thread_data_t thread_data = {std::move(event), zx::channel(), death_location};
pthread_t thread;
int ret = pthread_create(&thread, nullptr, CrashThread, &thread_data);
ASSERT_EQ(ret, 0);
- // Wait on the port.
- zx_port_packet_t packet;
- status = port.wait(zx::time::infinite(), &packet);
+ // Wait for the thread to set up its exception channel.
+ zx_signals_t signals = 0;
+ status =
+ thread_data.event.wait_one(kChannelReadySignal | kCrashThreadErrorSignal,
+ zx::time::infinite(), &signals);
+ ASSERT_EQ(status, ZX_OK);
+ ASSERT_EQ(signals, kChannelReadySignal);
+
+ // Wait for the exception and read it out of the channel.
+ status =
+ thread_data.channel.wait_one(ZX_CHANNEL_READABLE | ZX_CHANNEL_PEER_CLOSED,
+ zx::time::infinite(), &signals);
ASSERT_EQ(status, ZX_OK);
// Check the thread did crash and not terminate.
- ASSERT_EQ(packet.key, kExceptionPortKey);
+ ASSERT_FALSE(signals & ZX_CHANNEL_PEER_CLOSED);
+
+ zx_exception_info_t exception_info;
+ zx::exception exception;
+ status = thread_data.channel.read(
+ 0, &exception_info, exception.reset_and_get_address(),
+ sizeof(exception_info), 1, nullptr, nullptr);
+ ASSERT_EQ(status, ZX_OK);
// Get the crash address.
zx::thread zircon_thread;
- status = zx::process::self()->get_child(packet.exception.tid,
- ZX_RIGHT_SAME_RIGHTS, &zircon_thread);
+ status = exception.get_thread(&zircon_thread);
ASSERT_EQ(status, ZX_OK);
zx_thread_state_general_regs_t buffer;
status = zircon_thread.read_state(ZX_THREAD_STATE_GENERAL_REGS, &buffer,
@@ -977,6 +1064,46 @@ TEST_F(LoggingTest, LogMessageMarkersOnStack) {
}
#endif // !defined(ADDRESS_SANITIZER)
+const char* kToStringResult = "to_string";
+const char* kOstreamResult = "ostream";
+
+struct StructWithOstream {};
+
+std::ostream& operator<<(std::ostream& out, const StructWithOstream&) {
+ return out << kOstreamResult;
+}
+
+TEST(MakeCheckOpValueStringTest, HasOnlyOstream) {
+ std::ostringstream oss;
+ logging::MakeCheckOpValueString(&oss, StructWithOstream());
+ EXPECT_EQ(kOstreamResult, oss.str());
+}
+
+struct StructWithToString {
+ std::string ToString() const { return kToStringResult; }
+};
+
+TEST(MakeCheckOpValueStringTest, HasOnlyToString) {
+ std::ostringstream oss;
+ logging::MakeCheckOpValueString(&oss, StructWithToString());
+ EXPECT_EQ(kToStringResult, oss.str());
+}
+
+struct StructWithToStringAndOstream {
+ std::string ToString() const { return kToStringResult; }
+};
+
+std::ostream& operator<<(std::ostream& out,
+ const StructWithToStringAndOstream&) {
+ return out << kOstreamResult;
+}
+
+TEST(MakeCheckOpValueStringTest, HasOstreamAndToString) {
+ std::ostringstream oss;
+ logging::MakeCheckOpValueString(&oss, StructWithToStringAndOstream());
+ EXPECT_EQ(kOstreamResult, oss.str());
+}
+
} // namespace
} // namespace logging
diff --git a/chromium/base/mac/foundation_util.h b/chromium/base/mac/foundation_util.h
index 7ddecaa33c0..19dc0892b5b 100644
--- a/chromium/base/mac/foundation_util.h
+++ b/chromium/base/mac/foundation_util.h
@@ -5,6 +5,7 @@
#ifndef BASE_MAC_FOUNDATION_UTIL_H_
#define BASE_MAC_FOUNDATION_UTIL_H_
+#include <AvailabilityMacros.h>
#include <CoreFoundation/CoreFoundation.h>
#include <string>
@@ -52,7 +53,7 @@ typedef CR_FORWARD_ENUM(unsigned int, NSSearchPathDirectory);
typedef unsigned int NSSearchPathDomainMask;
#endif
-#if defined(OS_IOS)
+#if defined(OS_IOS) || defined(MAC_OS_X_VERSION_10_15)
typedef struct CF_BRIDGED_TYPE(id) __SecCertificate* SecCertificateRef;
typedef struct CF_BRIDGED_TYPE(id) __SecKey* SecKeyRef;
typedef struct CF_BRIDGED_TYPE(id) __SecPolicy* SecPolicyRef;
diff --git a/chromium/base/mac/mac_util.h b/chromium/base/mac/mac_util.h
index 2ff62ec582d..3b575288f8e 100644
--- a/chromium/base/mac/mac_util.h
+++ b/chromium/base/mac/mac_util.h
@@ -19,7 +19,7 @@ class FilePath;
namespace mac {
// Full screen modes, in increasing order of priority. More permissive modes
-// take predecence.
+// take precedence.
enum FullScreenMode {
kFullScreenModeHideAll = 0,
kFullScreenModeHideDock = 1,
@@ -112,23 +112,27 @@ BASE_EXPORT int MacOSXMinorVersion();
} // namespace internal
-// Run-time OS version checks. Use these instead of
+// Run-time OS version checks. Prefer @available in Objective-C files. If that
+// is not possible, use these functions instead of
// base::SysInfo::OperatingSystemVersionNumbers. Prefer the "AtLeast" and
-// "AtMost" variants to those that check for a specific version, unless you
-// know for sure that you need to check for a specific version.
-
-#define DEFINE_IS_OS_FUNCS(V, TEST_DEPLOYMENT_TARGET) \
- inline bool IsOS10_##V() { \
- TEST_DEPLOYMENT_TARGET(>, V, false) \
- return internal::MacOSXMinorVersion() == V; \
- } \
- inline bool IsAtLeastOS10_##V() { \
- TEST_DEPLOYMENT_TARGET(>=, V, true) \
- return internal::MacOSXMinorVersion() >= V; \
- } \
- inline bool IsAtMostOS10_##V() { \
- TEST_DEPLOYMENT_TARGET(>, V, false) \
- return internal::MacOSXMinorVersion() <= V; \
+// "AtMost" variants to those that check for a specific version, unless you know
+// for sure that you need to check for a specific version.
+
+#define DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, TEST_DEPLOYMENT_TARGET) \
+ inline bool IsOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>, V, false) \
+ return internal::MacOSXMinorVersion() == V; \
+ } \
+ inline bool IsAtMostOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>, V, false) \
+ return internal::MacOSXMinorVersion() <= V; \
+ }
+
+#define DEFINE_IS_OS_FUNCS(V, TEST_DEPLOYMENT_TARGET) \
+ DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(V, TEST_DEPLOYMENT_TARGET) \
+ inline bool IsAtLeastOS10_##V() { \
+ TEST_DEPLOYMENT_TARGET(>=, V, true) \
+ return internal::MacOSXMinorVersion() >= V; \
}
#define TEST_DEPLOYMENT_TARGET(OP, V, RET) \
@@ -136,26 +140,19 @@ BASE_EXPORT int MacOSXMinorVersion();
return RET;
#define IGNORE_DEPLOYMENT_TARGET(OP, V, RET)
-DEFINE_IS_OS_FUNCS(9, TEST_DEPLOYMENT_TARGET)
-DEFINE_IS_OS_FUNCS(10, TEST_DEPLOYMENT_TARGET)
+// Notes:
+// - When bumping the minimum version of the macOS required by Chromium, remove
+// lines from below corresponding to versions of the macOS no longer
+// supported. Ensure that the minimum supported version uses the
+// DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED macro.
+// - When bumping the minimum version of the macOS SDK required to build
+// Chromium, remove the #ifdef that switches between TEST_DEPLOYMENT_TARGET
+// and IGNORE_DEPLOYMENT_TARGET.
-#ifdef MAC_OS_X_VERSION_10_11
+DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED(10, TEST_DEPLOYMENT_TARGET)
DEFINE_IS_OS_FUNCS(11, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(11, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#ifdef MAC_OS_X_VERSION_10_12
DEFINE_IS_OS_FUNCS(12, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(12, IGNORE_DEPLOYMENT_TARGET)
-#endif
-
-#ifdef MAC_OS_X_VERSION_10_13
DEFINE_IS_OS_FUNCS(13, TEST_DEPLOYMENT_TARGET)
-#else
-DEFINE_IS_OS_FUNCS(13, IGNORE_DEPLOYMENT_TARGET)
-#endif
#ifdef MAC_OS_X_VERSION_10_14
DEFINE_IS_OS_FUNCS(14, TEST_DEPLOYMENT_TARGET)
@@ -163,15 +160,22 @@ DEFINE_IS_OS_FUNCS(14, TEST_DEPLOYMENT_TARGET)
DEFINE_IS_OS_FUNCS(14, IGNORE_DEPLOYMENT_TARGET)
#endif
+#ifdef MAC_OS_X_VERSION_10_15
+DEFINE_IS_OS_FUNCS(15, TEST_DEPLOYMENT_TARGET)
+#else
+DEFINE_IS_OS_FUNCS(15, IGNORE_DEPLOYMENT_TARGET)
+#endif
+
#undef IGNORE_DEPLOYMENT_TARGET
#undef TEST_DEPLOYMENT_TARGET
+#undef DEFINE_IS_OS_FUNCS_CR_MIN_REQUIRED
#undef DEFINE_IS_OS_FUNCS
// This should be infrequently used. It only makes sense to use this to avoid
// codepaths that are very likely to break on future (unreleased, untested,
// unborn) OS releases, or to log when the OS is newer than any known version.
-inline bool IsOSLaterThan10_14_DontCallThis() {
- return !IsAtMostOS10_14();
+inline bool IsOSLaterThan10_15_DontCallThis() {
+ return !IsAtMostOS10_15();
}
// Retrieve the system's model identifier string from the IOKit registry:
@@ -186,6 +190,14 @@ BASE_EXPORT bool ParseModelIdentifier(const std::string& ident,
int32_t* major,
int32_t* minor);
+// Returns an OS name + version string. e.g.:
+//
+// "macOS Version 10.14.3 (Build 18D109)"
+//
+// Parts of this string change based on OS locale, so it's only useful for
+// displaying to the user.
+BASE_EXPORT std::string GetOSDisplayName();
+
} // namespace mac
} // namespace base
diff --git a/chromium/base/mac/mac_util.mm b/chromium/base/mac/mac_util.mm
index 9a6b8fdb5da..ccd4bc47f2c 100644
--- a/chromium/base/mac/mac_util.mm
+++ b/chromium/base/mac/mac_util.mm
@@ -420,11 +420,10 @@ int MacOSXMinorVersionInternal() {
// version for Darwin versions beginning with 6, corresponding to Mac OS X
// 10.2. Since this correspondence may change in the future, warn when
// encountering a version higher than anything seen before. Older Darwin
- // versions, or versions that can't be determined, result in
- // immediate death.
+ // versions, or versions that can't be determined, result in immediate death.
CHECK(darwin_major_version >= 6);
int mac_os_x_minor_version = darwin_major_version - 4;
- DLOG_IF(WARNING, darwin_major_version > 18)
+ DLOG_IF(WARNING, darwin_major_version > 19)
<< "Assuming Darwin " << base::NumberToString(darwin_major_version)
<< " is macOS 10." << base::NumberToString(mac_os_x_minor_version);
@@ -483,5 +482,16 @@ bool ParseModelIdentifier(const std::string& ident,
return true;
}
+std::string GetOSDisplayName() {
+ std::string os_name;
+ if (IsAtMostOS10_11())
+ os_name = "OS X";
+ else
+ os_name = "macOS";
+ std::string version_string = base::SysNSStringToUTF8(
+ [[NSProcessInfo processInfo] operatingSystemVersionString]);
+ return os_name + " " + version_string;
+}
+
} // namespace mac
} // namespace base
diff --git a/chromium/base/mac/mac_util_unittest.mm b/chromium/base/mac/mac_util_unittest.mm
index c1585be252f..645dc34d6cd 100644
--- a/chromium/base/mac/mac_util_unittest.mm
+++ b/chromium/base/mac/mac_util_unittest.mm
@@ -143,168 +143,90 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
// - FALSE/TRUE/FALSE (it is not the later version, it is "at most" the later
// version, it is not "at least" the later version)
- // TODO(avi): Is there a better way to test this? Maybe with macros? Are
- // macros a better way to test this?
+#define TEST_FOR_PAST_OS(V) \
+ EXPECT_FALSE(IsOS10_##V()); \
+ EXPECT_FALSE(IsAtMostOS10_##V()); \
+ EXPECT_TRUE(IsAtLeastOS10_##V());
- if (major == 10) {
- if (minor == 9) {
- EXPECT_TRUE(IsOS10_9());
- EXPECT_TRUE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
-
- EXPECT_FALSE(IsOS10_10());
- EXPECT_TRUE(IsAtMostOS10_10());
- EXPECT_FALSE(IsAtLeastOS10_10());
-
- EXPECT_FALSE(IsOS10_11());
- EXPECT_TRUE(IsAtMostOS10_11());
- EXPECT_FALSE(IsAtLeastOS10_11());
-
- EXPECT_FALSE(IsOS10_12());
- EXPECT_TRUE(IsAtMostOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
-
- EXPECT_FALSE(IsOS10_13());
- EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
-
- EXPECT_FALSE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_FALSE(IsAtLeastOS10_14());
+#define TEST_FOR_SAME_OS(V) \
+ EXPECT_TRUE(IsOS10_##V()); \
+ EXPECT_TRUE(IsAtMostOS10_##V()); \
+ EXPECT_TRUE(IsAtLeastOS10_##V());
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
- } else if (minor == 10) {
- EXPECT_FALSE(IsOS10_9());
- EXPECT_FALSE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
+#define TEST_FOR_FUTURE_OS(V) \
+ EXPECT_FALSE(IsOS10_##V()); \
+ EXPECT_TRUE(IsAtMostOS10_##V()); \
+ EXPECT_FALSE(IsAtLeastOS10_##V());
+ if (major == 10) {
+ if (minor == 10) {
EXPECT_TRUE(IsOS10_10());
EXPECT_TRUE(IsAtMostOS10_10());
- EXPECT_TRUE(IsAtLeastOS10_10());
-
- EXPECT_FALSE(IsOS10_11());
- EXPECT_TRUE(IsAtMostOS10_11());
- EXPECT_FALSE(IsAtLeastOS10_11());
-
- EXPECT_FALSE(IsOS10_12());
- EXPECT_TRUE(IsAtMostOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
-
- EXPECT_FALSE(IsOS10_13());
- EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
- EXPECT_FALSE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_FALSE(IsAtLeastOS10_14());
+ TEST_FOR_FUTURE_OS(11);
+ TEST_FOR_FUTURE_OS(12);
+ TEST_FOR_FUTURE_OS(13);
+ TEST_FOR_FUTURE_OS(14);
+ TEST_FOR_FUTURE_OS(15);
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
} else if (minor == 11) {
- EXPECT_FALSE(IsOS10_9());
- EXPECT_FALSE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
-
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
- EXPECT_TRUE(IsAtLeastOS10_10());
-
- EXPECT_TRUE(IsOS10_11());
- EXPECT_TRUE(IsAtMostOS10_11());
- EXPECT_TRUE(IsAtLeastOS10_11());
-
- EXPECT_FALSE(IsOS10_12());
- EXPECT_TRUE(IsAtMostOS10_12());
- EXPECT_FALSE(IsAtLeastOS10_12());
- EXPECT_FALSE(IsOS10_13());
- EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
+ TEST_FOR_SAME_OS(11);
+ TEST_FOR_FUTURE_OS(12);
+ TEST_FOR_FUTURE_OS(13);
+ TEST_FOR_FUTURE_OS(14);
+ TEST_FOR_FUTURE_OS(15);
- EXPECT_FALSE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_FALSE(IsAtLeastOS10_14());
-
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
} else if (minor == 12) {
- EXPECT_FALSE(IsOS10_9());
- EXPECT_FALSE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
-
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
- EXPECT_TRUE(IsAtLeastOS10_10());
-
- EXPECT_FALSE(IsOS10_11());
- EXPECT_FALSE(IsAtMostOS10_11());
- EXPECT_TRUE(IsAtLeastOS10_11());
-
- EXPECT_TRUE(IsOS10_12());
- EXPECT_TRUE(IsAtMostOS10_12());
- EXPECT_TRUE(IsAtLeastOS10_12());
- EXPECT_FALSE(IsOS10_13());
- EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_FALSE(IsAtLeastOS10_13());
+ TEST_FOR_PAST_OS(11);
+ TEST_FOR_SAME_OS(12);
+ TEST_FOR_FUTURE_OS(13);
+ TEST_FOR_FUTURE_OS(14);
+ TEST_FOR_FUTURE_OS(15);
- EXPECT_FALSE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_FALSE(IsAtLeastOS10_14());
-
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
} else if (minor == 13) {
- EXPECT_FALSE(IsOS10_9());
- EXPECT_FALSE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
-
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
- EXPECT_TRUE(IsAtLeastOS10_10());
-
- EXPECT_FALSE(IsOS10_11());
- EXPECT_FALSE(IsAtMostOS10_11());
- EXPECT_TRUE(IsAtLeastOS10_11());
- EXPECT_FALSE(IsOS10_12());
- EXPECT_FALSE(IsAtMostOS10_12());
- EXPECT_TRUE(IsAtLeastOS10_12());
+ TEST_FOR_PAST_OS(11);
+ TEST_FOR_PAST_OS(12);
+ TEST_FOR_SAME_OS(13);
+ TEST_FOR_FUTURE_OS(14);
+ TEST_FOR_FUTURE_OS(15);
- EXPECT_TRUE(IsOS10_13());
- EXPECT_TRUE(IsAtMostOS10_13());
- EXPECT_TRUE(IsAtLeastOS10_13());
-
- EXPECT_FALSE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_FALSE(IsAtLeastOS10_14());
-
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
} else if (minor == 14) {
- EXPECT_FALSE(IsOS10_9());
- EXPECT_FALSE(IsAtMostOS10_9());
- EXPECT_TRUE(IsAtLeastOS10_9());
-
EXPECT_FALSE(IsOS10_10());
EXPECT_FALSE(IsAtMostOS10_10());
- EXPECT_TRUE(IsAtLeastOS10_10());
-
- EXPECT_FALSE(IsOS10_11());
- EXPECT_FALSE(IsAtMostOS10_11());
- EXPECT_TRUE(IsAtLeastOS10_11());
- EXPECT_FALSE(IsOS10_12());
- EXPECT_FALSE(IsAtMostOS10_12());
- EXPECT_TRUE(IsAtLeastOS10_12());
+ TEST_FOR_PAST_OS(11);
+ TEST_FOR_PAST_OS(12);
+ TEST_FOR_PAST_OS(13);
+ TEST_FOR_SAME_OS(14);
+ TEST_FOR_FUTURE_OS(15);
- EXPECT_FALSE(IsOS10_13());
- EXPECT_FALSE(IsAtMostOS10_13());
- EXPECT_TRUE(IsAtLeastOS10_13());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
+ } else if (minor == 15) {
+ EXPECT_FALSE(IsOS10_10());
+ EXPECT_FALSE(IsAtMostOS10_10());
- EXPECT_TRUE(IsOS10_14());
- EXPECT_TRUE(IsAtMostOS10_14());
- EXPECT_TRUE(IsAtLeastOS10_14());
+ TEST_FOR_PAST_OS(11);
+ TEST_FOR_PAST_OS(12);
+ TEST_FOR_PAST_OS(13);
+ TEST_FOR_PAST_OS(14);
+ TEST_FOR_SAME_OS(15);
- EXPECT_FALSE(IsOSLaterThan10_14_DontCallThis());
+ EXPECT_FALSE(IsOSLaterThan10_15_DontCallThis());
} else {
- // Not nine, ten, eleven, twelve, thirteen, or fourteen. Ah, ah, ah.
+ // Not ten, eleven, twelve, thirteen, fourteen, or fifteen. Ah, ah, ah.
EXPECT_TRUE(false);
}
} else {
@@ -313,6 +235,10 @@ TEST_F(MacUtilTest, IsOSEllipsis) {
}
}
+#undef TEST_FOR_PAST_OS
+#undef TEST_FOR_SAME_OS
+#undef TEST_FOR_FUTURE_OS
+
TEST_F(MacUtilTest, ParseModelIdentifier) {
std::string model;
int32_t major = 1, minor = 2;
diff --git a/chromium/base/mac/mach_port_broker.h b/chromium/base/mac/mach_port_broker.h
deleted file mode 100644
index 41e58200c3c..00000000000
--- a/chromium/base/mac/mach_port_broker.h
+++ /dev/null
@@ -1,110 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_MACH_PORT_BROKER_H_
-#define BASE_MAC_MACH_PORT_BROKER_H_
-
-#include <mach/mach.h>
-
-#include <map>
-#include <memory>
-#include <string>
-
-#include "base/base_export.h"
-#include "base/mac/dispatch_source_mach.h"
-#include "base/mac/scoped_mach_port.h"
-#include "base/macros.h"
-#include "base/process/port_provider_mac.h"
-#include "base/process/process_handle.h"
-#include "base/synchronization/lock.h"
-
-namespace base {
-
-// On OS X, the task port of a process is required to collect metrics about the
-// process, and to insert Mach ports into the process. Running |task_for_pid()|
-// is only allowed for privileged code. However, a process has port rights to
-// all its subprocesses, so let the child processes send their Mach port to the
-// parent over IPC.
-//
-// Mach ports can only be sent over Mach IPC, not over the |socketpair()| that
-// the regular IPC system uses. Hence, the child processes opens a Mach
-// connection shortly after launching and ipc their mach data to the parent
-// process. A single |MachPortBroker| with a given name is expected to exist in
-// the parent process.
-//
-// Since this data arrives over a separate channel, it is not available
-// immediately after a child process has been started.
-class BASE_EXPORT MachPortBroker : public base::PortProvider {
- public:
- // For use in child processes. This will send the task port of the current
- // process over Mach IPC to the port registered by name (via this class) in
- // the parent process. Returns true if the message was sent successfully
- // and false if otherwise.
- static bool ChildSendTaskPortToParent(const std::string& name);
-
- // Returns the Mach port name to use when sending or receiving messages.
- // Does the Right Thing in the browser and in child processes.
- static std::string GetMachPortName(const std::string& name, bool is_child);
-
- MachPortBroker(const std::string& name);
- ~MachPortBroker() override;
-
- // Performs any initialization work.
- bool Init();
-
- // Adds a placeholder to the map for the given pid with MACH_PORT_NULL.
- // Callers are expected to later update the port with FinalizePid(). Callers
- // MUST acquire the lock given by GetLock() before calling this method (and
- // release the lock afterwards).
- void AddPlaceholderForPid(base::ProcessHandle pid);
-
- // Removes |pid| from the task port map. Callers MUST acquire the lock given
- // by GetLock() before calling this method (and release the lock afterwards).
- void InvalidatePid(base::ProcessHandle pid);
-
- // The lock that protects this MachPortBroker object. Callers MUST acquire
- // and release this lock around calls to AddPlaceholderForPid(),
- // InvalidatePid(), and FinalizePid();
- base::Lock& GetLock() { return lock_; }
-
- // Implement |base::PortProvider|.
- mach_port_t TaskForPid(base::ProcessHandle process) const override;
-
- private:
- friend class MachPortBrokerTest;
-
- // Message handler that is invoked on |dispatch_source_| when an
- // incoming message needs to be received.
- void HandleRequest();
-
- // Updates the mapping for |pid| to include the given |mach_info|. Does
- // nothing if PlaceholderForPid() has not already been called for the given
- // |pid|. Callers MUST acquire the lock given by GetLock() before calling
- // this method (and release the lock afterwards). Returns true if the port
- // was accepeted for the PID, or false if it was rejected (e.g. due to an
- // unknown sender).
- bool FinalizePid(base::ProcessHandle pid, mach_port_t task_port);
-
- // Name used to identify a particular port broker.
- const std::string name_;
-
- // The Mach port on which the server listens.
- base::mac::ScopedMachReceiveRight server_port_;
-
- // The dispatch source and queue on which Mach messages will be received.
- std::unique_ptr<base::DispatchSourceMach> dispatch_source_;
-
- // Stores mach info for every process in the broker.
- typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
- MachMap mach_map_;
-
- // Mutex that guards |mach_map_|.
- mutable base::Lock lock_;
-
- DISALLOW_COPY_AND_ASSIGN(MachPortBroker);
-};
-
-} // namespace base
-
-#endif // BASE_MAC_MACH_PORT_BROKER_H_
diff --git a/chromium/base/mac/mach_port_broker.mm b/chromium/base/mac/mach_port_broker.mm
deleted file mode 100644
index 2e0bcb71019..00000000000
--- a/chromium/base/mac/mach_port_broker.mm
+++ /dev/null
@@ -1,206 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/mach_port_broker.h"
-
-#include <bsm/libbsm.h>
-#include <servers/bootstrap.h>
-
-#include "base/logging.h"
-#include "base/mac/foundation_util.h"
-#include "base/mac/mach_logging.h"
-#include "base/mac/scoped_mach_msg_destroy.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-
-namespace base {
-
-namespace {
-
-constexpr mach_msg_id_t kTaskPortMessageId = 'tskp';
-
-// Mach message structure used in the child as a sending message.
-struct MachPortBroker_ChildSendMsg {
- mach_msg_header_t header;
- mach_msg_body_t body;
- mach_msg_port_descriptor_t child_task_port;
-};
-
-// Complement to the ChildSendMsg, this is used in the parent for receiving
-// a message. Contains a message trailer with audit information.
-struct MachPortBroker_ParentRecvMsg : public MachPortBroker_ChildSendMsg {
- mach_msg_audit_trailer_t trailer;
-};
-
-} // namespace
-
-// static
-bool MachPortBroker::ChildSendTaskPortToParent(const std::string& name) {
- // Look up the named MachPortBroker port that's been registered with the
- // bootstrap server.
- mac::ScopedMachSendRight parent_port;
- std::string bootstrap_name = GetMachPortName(name, true);
- kern_return_t kr = bootstrap_look_up(
- bootstrap_port, const_cast<char*>(bootstrap_name.c_str()),
- mac::ScopedMachSendRight::Receiver(parent_port).get());
- if (kr != KERN_SUCCESS) {
- BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up " << bootstrap_name;
- return false;
- }
-
- // Create the check in message. This will copy a send right on this process'
- // (the child's) task port and send it to the parent.
- MachPortBroker_ChildSendMsg msg{};
- msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND) |
- MACH_MSGH_BITS_COMPLEX;
- msg.header.msgh_size = sizeof(msg);
- msg.header.msgh_remote_port = parent_port.get();
- msg.header.msgh_id = kTaskPortMessageId;
- msg.body.msgh_descriptor_count = 1;
- msg.child_task_port.name = mach_task_self();
- msg.child_task_port.disposition = MACH_MSG_TYPE_PORT_SEND;
- msg.child_task_port.type = MACH_MSG_PORT_DESCRIPTOR;
-
- kr = mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
- 0, MACH_PORT_NULL, 100 /*milliseconds*/, MACH_PORT_NULL);
- if (kr != KERN_SUCCESS) {
- MACH_LOG(ERROR, kr) << "mach_msg";
- return false;
- }
-
- return true;
-}
-
-// static
-std::string MachPortBroker::GetMachPortName(const std::string& name,
- bool is_child) {
- // In child processes, use the parent's pid.
- const pid_t pid = is_child ? getppid() : getpid();
- return base::StringPrintf(
- "%s.%s.%d", base::mac::BaseBundleID(), name.c_str(), pid);
-}
-
-mach_port_t MachPortBroker::TaskForPid(base::ProcessHandle pid) const {
- base::AutoLock lock(lock_);
- MachPortBroker::MachMap::const_iterator it = mach_map_.find(pid);
- if (it == mach_map_.end())
- return MACH_PORT_NULL;
- return it->second;
-}
-
-MachPortBroker::MachPortBroker(const std::string& name) : name_(name) {}
-
-MachPortBroker::~MachPortBroker() {}
-
-bool MachPortBroker::Init() {
- DCHECK(server_port_.get() == MACH_PORT_NULL);
-
- // Check in with launchd and publish the service name.
- std::string bootstrap_name = GetMachPortName(name_, false);
- kern_return_t kr = bootstrap_check_in(
- bootstrap_port, bootstrap_name.c_str(),
- mac::ScopedMachReceiveRight::Receiver(server_port_).get());
- if (kr != KERN_SUCCESS) {
- BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_check_in " << bootstrap_name;
- return false;
- }
-
- // Start the dispatch source.
- std::string queue_name =
- base::StringPrintf("%s.MachPortBroker", base::mac::BaseBundleID());
- dispatch_source_.reset(new base::DispatchSourceMach(
- queue_name.c_str(), server_port_.get(), ^{ HandleRequest(); }));
- dispatch_source_->Resume();
-
- return true;
-}
-
-void MachPortBroker::AddPlaceholderForPid(base::ProcessHandle pid) {
- lock_.AssertAcquired();
- DCHECK_EQ(0u, mach_map_.count(pid));
- mach_map_[pid] = MACH_PORT_NULL;
-}
-
-void MachPortBroker::InvalidatePid(base::ProcessHandle pid) {
- lock_.AssertAcquired();
-
- MachMap::iterator mach_it = mach_map_.find(pid);
- if (mach_it != mach_map_.end()) {
- kern_return_t kr = mach_port_deallocate(mach_task_self(), mach_it->second);
- MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
- mach_map_.erase(mach_it);
- }
-}
-
-void MachPortBroker::HandleRequest() {
- MachPortBroker_ParentRecvMsg msg{};
- msg.header.msgh_size = sizeof(msg);
- msg.header.msgh_local_port = server_port_.get();
-
- const mach_msg_option_t options = MACH_RCV_MSG |
- MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_AUDIT) |
- MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT);
-
- kern_return_t kr = mach_msg(&msg.header,
- options,
- 0,
- sizeof(msg),
- server_port_.get(),
- MACH_MSG_TIMEOUT_NONE,
- MACH_PORT_NULL);
- if (kr != KERN_SUCCESS) {
- MACH_LOG(ERROR, kr) << "mach_msg";
- return;
- }
-
- // Destroy any rights that this class does not take ownership of.
- ScopedMachMsgDestroy scoped_msg(&msg.header);
-
- // Validate that the received message is what is expected.
- if ((msg.header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0 ||
- msg.header.msgh_id != kTaskPortMessageId ||
- msg.header.msgh_size != sizeof(MachPortBroker_ChildSendMsg) ||
- msg.child_task_port.disposition != MACH_MSG_TYPE_PORT_SEND ||
- msg.child_task_port.type != MACH_MSG_PORT_DESCRIPTOR) {
- LOG(ERROR) << "Received unexpected message";
- return;
- }
-
- // Use the kernel audit information to make sure this message is from
- // a task that this process spawned. The kernel audit token contains the
- // unspoofable pid of the task that sent the message.
- pid_t child_pid = audit_token_to_pid(msg.trailer.msgh_audit);
- mach_port_t child_task_port = msg.child_task_port.name;
-
- // Take the lock and update the broker information.
- {
- base::AutoLock lock(lock_);
- if (FinalizePid(child_pid, child_task_port)) {
- scoped_msg.Disarm();
- }
- }
- NotifyObservers(child_pid);
-}
-
-bool MachPortBroker::FinalizePid(base::ProcessHandle pid,
- mach_port_t task_port) {
- lock_.AssertAcquired();
-
- MachMap::iterator it = mach_map_.find(pid);
- if (it == mach_map_.end()) {
- // Do nothing for unknown pids.
- LOG(ERROR) << "Unknown process " << pid << " is sending Mach IPC messages!";
- return false;
- }
-
- if (it->second != MACH_PORT_NULL) {
- NOTREACHED();
- return false;
- }
-
- it->second = task_port;
- return true;
-}
-
-} // namespace base
diff --git a/chromium/base/mac/mach_port_broker_unittest.cc b/chromium/base/mac/mach_port_broker_unittest.cc
deleted file mode 100644
index bff8eb6a9bc..00000000000
--- a/chromium/base/mac/mach_port_broker_unittest.cc
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/mach_port_broker.h"
-
-#include "base/command_line.h"
-#include "base/synchronization/lock.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/test/multiprocess_test.h"
-#include "base/test/test_timeouts.h"
-#include "testing/gtest/include/gtest/gtest.h"
-#include "testing/multiprocess_func_list.h"
-
-namespace base {
-
-namespace {
-const char kBootstrapPortName[] = "thisisatest";
-}
-
-class MachPortBrokerTest : public testing::Test,
- public base::PortProvider::Observer {
- public:
- MachPortBrokerTest()
- : broker_(kBootstrapPortName),
- event_(base::WaitableEvent::ResetPolicy::MANUAL,
- base::WaitableEvent::InitialState::NOT_SIGNALED),
- received_process_(kNullProcessHandle) {
- broker_.AddObserver(this);
- }
- ~MachPortBrokerTest() override {
- broker_.RemoveObserver(this);
- }
-
- // Helper function to acquire/release locks and call |PlaceholderForPid()|.
- void AddPlaceholderForPid(base::ProcessHandle pid) {
- base::AutoLock lock(broker_.GetLock());
- broker_.AddPlaceholderForPid(pid);
- }
-
- // Helper function to acquire/release locks and call |FinalizePid()|.
- void FinalizePid(base::ProcessHandle pid,
- mach_port_t task_port) {
- base::AutoLock lock(broker_.GetLock());
- broker_.FinalizePid(pid, task_port);
- }
-
- void WaitForTaskPort() {
- event_.Wait();
- }
-
- // base::PortProvider::Observer:
- void OnReceivedTaskPort(ProcessHandle process) override {
- received_process_ = process;
- event_.Signal();
- }
-
- protected:
- MachPortBroker broker_;
- WaitableEvent event_;
- ProcessHandle received_process_;
-};
-
-TEST_F(MachPortBrokerTest, Locks) {
- // Acquire and release the locks. Nothing bad should happen.
- base::AutoLock lock(broker_.GetLock());
-}
-
-TEST_F(MachPortBrokerTest, AddPlaceholderAndFinalize) {
- // Add a placeholder for PID 1.
- AddPlaceholderForPid(1);
- EXPECT_EQ(0u, broker_.TaskForPid(1));
-
- // Finalize PID 1.
- FinalizePid(1, 100u);
- EXPECT_EQ(100u, broker_.TaskForPid(1));
-
- // Should be no entry for PID 2.
- EXPECT_EQ(0u, broker_.TaskForPid(2));
-}
-
-TEST_F(MachPortBrokerTest, FinalizeUnknownPid) {
- // Finalizing an entry for an unknown pid should not add it to the map.
- FinalizePid(1u, 100u);
- EXPECT_EQ(0u, broker_.TaskForPid(1u));
-}
-
-MULTIPROCESS_TEST_MAIN(MachPortBrokerTestChild) {
- CHECK(base::MachPortBroker::ChildSendTaskPortToParent(kBootstrapPortName));
- return 0;
-}
-
-TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
- ASSERT_TRUE(broker_.Init());
- CommandLine command_line(
- base::GetMultiProcessTestChildBaseCommandLine());
- broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
- "MachPortBrokerTestChild", command_line, LaunchOptions());
- broker_.AddPlaceholderForPid(test_child_process.Handle());
- broker_.GetLock().Release();
-
- WaitForTaskPort();
- EXPECT_EQ(test_child_process.Handle(), received_process_);
-
- int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
- TestTimeouts::action_timeout(), &rv));
- EXPECT_EQ(0, rv);
-
- EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
-}
-
-TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
- ASSERT_TRUE(broker_.Init());
- CommandLine command_line(
- base::GetMultiProcessTestChildBaseCommandLine());
- broker_.GetLock().Acquire();
- base::Process test_child_process = base::SpawnMultiProcessTestChild(
- "MachPortBrokerTestChild", command_line, LaunchOptions());
- broker_.GetLock().Release();
-
- int rv = -1;
- ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
- TestTimeouts::action_timeout(), &rv));
- EXPECT_EQ(0, rv);
-
- EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
- broker_.TaskForPid(test_child_process.Handle()));
-}
-
-} // namespace base
diff --git a/chromium/base/mac/mach_port_rendezvous.cc b/chromium/base/mac/mach_port_rendezvous.cc
index 5e71440a38e..43e5806bec6 100644
--- a/chromium/base/mac/mach_port_rendezvous.cc
+++ b/chromium/base/mac/mach_port_rendezvous.cc
@@ -258,11 +258,15 @@ void MachPortRendezvousServer::OnClientExited(pid_t pid) {
// static
MachPortRendezvousClient* MachPortRendezvousClient::GetInstance() {
- static MachPortRendezvousClient* client = new MachPortRendezvousClient();
- if (!client->did_acquire_ports()) {
- bool ok = client->AcquirePorts();
- DCHECK(ok);
+ static MachPortRendezvousClient* client = []() -> auto* {
+ auto* client = new MachPortRendezvousClient();
+ if (!client->AcquirePorts()) {
+ delete client;
+ client = nullptr;
+ }
+ return client;
}
+ ();
return client;
}
@@ -296,8 +300,6 @@ std::string MachPortRendezvousClient::GetBootstrapName() {
bool MachPortRendezvousClient::AcquirePorts() {
AutoLock lock(lock_);
- did_acquire_ports_ = true;
-
mac::ScopedMachSendRight server_port;
std::string bootstrap_name = GetBootstrapName();
kern_return_t kr = bootstrap_look_up(
diff --git a/chromium/base/mac/mach_port_rendezvous.h b/chromium/base/mac/mach_port_rendezvous.h
index 422b2df30e8..177a5c3d4f3 100644
--- a/chromium/base/mac/mach_port_rendezvous.h
+++ b/chromium/base/mac/mach_port_rendezvous.h
@@ -162,7 +162,9 @@ class BASE_EXPORT MachPortRendezvousClient {
public:
// Connects to the MachPortRendezvousServer and requests any registered Mach
// ports. This only performs the rendezvous once. Subsequent calls to this
- // method return the same instance.
+ // method return the same instance. If the rendezvous fails, which can happen
+ // if the server is not available, this returns null. Acquiring zero ports
+ // from the exchange is not considered a failure.
static MachPortRendezvousClient* GetInstance();
// Returns the Mach send right that was registered with |key|. If no such
@@ -202,13 +204,8 @@ class BASE_EXPORT MachPortRendezvousClient {
// MachRendezvousPort with MACH_PORT_NULL is returned.
MachRendezvousPort PortForKey(MachPortsForRendezvous::key_type key);
- bool did_acquire_ports() { return did_acquire_ports_; }
-
// Lock for the below data members.
Lock lock_;
- // Flag for if the client has attempted to acquire ports. If the client
- // experienced an error in doing so, this will still be true.
- bool did_acquire_ports_ = false;
// The collection of ports that was acquired.
MachPortsForRendezvous ports_;
diff --git a/chromium/base/mac/mach_port_rendezvous_unittest.cc b/chromium/base/mac/mach_port_rendezvous_unittest.cc
index 5af2a864c3f..a66c1cefe86 100644
--- a/chromium/base/mac/mach_port_rendezvous_unittest.cc
+++ b/chromium/base/mac/mach_port_rendezvous_unittest.cc
@@ -9,6 +9,7 @@
#include <utility>
#include "base/at_exit.h"
+#include "base/mac/foundation_util.h"
#include "base/mac/mach_logging.h"
#include "base/strings/stringprintf.h"
#include "base/test/multiprocess_test.h"
@@ -206,4 +207,25 @@ TEST_F(MachPortRendezvousServerTest, DestroyRight) {
}
}
+MULTIPROCESS_TEST_MAIN(FailToRendezvous) {
+ // The rendezvous system uses the BaseBundleID to construct the bootstrap
+ // server name, so changing it will result in a failure to look it up.
+ base::mac::SetBaseBundleID("org.chromium.totallyfake");
+ CHECK_EQ(nullptr, base::MachPortRendezvousClient::GetInstance());
+ return 0;
+}
+
+TEST_F(MachPortRendezvousServerTest, FailToRendezvous) {
+ auto* server = MachPortRendezvousServer::GetInstance();
+ ASSERT_TRUE(server);
+
+ Process child = SpawnChild("FailToRendezvous");
+
+ int exit_code;
+ ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+ child, TestTimeouts::action_timeout(), &exit_code));
+
+ EXPECT_EQ(0, exit_code);
+}
+
} // namespace base
diff --git a/chromium/base/mac/mach_port_util.cc b/chromium/base/mac/mach_port_util.cc
deleted file mode 100644
index 0eee21085d8..00000000000
--- a/chromium/base/mac/mach_port_util.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/mac/mach_port_util.h"
-
-#include "base/logging.h"
-
-namespace base {
-
-namespace {
-
-// Struct for sending a complex Mach message.
-struct MachSendComplexMessage {
- mach_msg_header_t header;
- mach_msg_body_t body;
- mach_msg_port_descriptor_t data;
-};
-
-// Struct for receiving a complex message.
-struct MachReceiveComplexMessage {
- mach_msg_header_t header;
- mach_msg_body_t body;
- mach_msg_port_descriptor_t data;
- mach_msg_trailer_t trailer;
-};
-
-} // namespace
-
-kern_return_t SendMachPort(mach_port_t endpoint,
- mach_port_t port_to_send,
- int disposition) {
- MachSendComplexMessage send_msg;
- send_msg.header.msgh_bits =
- MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0) | MACH_MSGH_BITS_COMPLEX;
- send_msg.header.msgh_size = sizeof(send_msg);
- send_msg.header.msgh_remote_port = endpoint;
- send_msg.header.msgh_local_port = MACH_PORT_NULL;
- send_msg.header.msgh_reserved = 0;
- send_msg.header.msgh_id = 0;
- send_msg.body.msgh_descriptor_count = 1;
- send_msg.data.name = port_to_send;
- send_msg.data.disposition = disposition;
- send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
-
- kern_return_t kr =
- mach_msg(&send_msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
- send_msg.header.msgh_size,
- 0, // receive limit
- MACH_PORT_NULL, // receive name
- 0, // timeout
- MACH_PORT_NULL); // notification port
-
- if (kr != KERN_SUCCESS)
- mach_port_deallocate(mach_task_self(), endpoint);
-
- return kr;
-}
-
-base::mac::ScopedMachSendRight ReceiveMachPort(mach_port_t port_to_listen_on) {
- MachReceiveComplexMessage recv_msg;
- mach_msg_header_t* recv_hdr = &recv_msg.header;
- recv_hdr->msgh_local_port = port_to_listen_on;
- recv_hdr->msgh_size = sizeof(recv_msg);
-
- kern_return_t kr =
- mach_msg(recv_hdr, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
- recv_hdr->msgh_size, port_to_listen_on, 0, MACH_PORT_NULL);
- if (kr != KERN_SUCCESS)
- return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
- if (recv_msg.header.msgh_id != 0)
- return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
- return base::mac::ScopedMachSendRight(recv_msg.data.name);
-}
-
-mach_port_name_t CreateIntermediateMachPort(
- mach_port_t task_port,
- base::mac::ScopedMachSendRight port_to_insert,
- MachCreateError* error_code) {
- DCHECK_NE(mach_task_self(), task_port);
- DCHECK_NE(static_cast<mach_port_name_t>(MACH_PORT_NULL), task_port);
-
- // Make a port with receive rights in the destination task.
- mach_port_name_t endpoint;
- kern_return_t kr =
- mach_port_allocate(task_port, MACH_PORT_RIGHT_RECEIVE, &endpoint);
- if (kr != KERN_SUCCESS) {
- if (error_code)
- *error_code = MachCreateError::ERROR_MAKE_RECEIVE_PORT;
- return MACH_PORT_NULL;
- }
-
- // Change its message queue limit so that it accepts one message.
- mach_port_limits limits = {};
- limits.mpl_qlimit = 1;
- kr = mach_port_set_attributes(task_port, endpoint, MACH_PORT_LIMITS_INFO,
- reinterpret_cast<mach_port_info_t>(&limits),
- MACH_PORT_LIMITS_INFO_COUNT);
- if (kr != KERN_SUCCESS) {
- if (error_code)
- *error_code = MachCreateError::ERROR_SET_ATTRIBUTES;
- mach_port_deallocate(task_port, endpoint);
- return MACH_PORT_NULL;
- }
-
- // Get a send right.
- mach_port_t send_once_right;
- mach_msg_type_name_t send_right_type;
- kr =
- mach_port_extract_right(task_port, endpoint, MACH_MSG_TYPE_MAKE_SEND_ONCE,
- &send_once_right, &send_right_type);
- if (kr != KERN_SUCCESS) {
- if (error_code)
- *error_code = MachCreateError::ERROR_EXTRACT_DEST_RIGHT;
- mach_port_deallocate(task_port, endpoint);
- return MACH_PORT_NULL;
- }
- DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND_ONCE),
- send_right_type);
-
- // This call takes ownership of |send_once_right|.
- kr = base::SendMachPort(
- send_once_right, port_to_insert.get(), MACH_MSG_TYPE_COPY_SEND);
- if (kr != KERN_SUCCESS) {
- if (error_code)
- *error_code = MachCreateError::ERROR_SEND_MACH_PORT;
- mach_port_deallocate(task_port, endpoint);
- return MACH_PORT_NULL;
- }
-
- // Endpoint is intentionally leaked into the destination task. An IPC must be
- // sent to the destination task so that it can clean up this port.
- return endpoint;
-}
-
-} // namespace base
diff --git a/chromium/base/mac/mach_port_util.h b/chromium/base/mac/mach_port_util.h
deleted file mode 100644
index f7a7f325f1a..00000000000
--- a/chromium/base/mac/mach_port_util.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MAC_MACH_PORT_UTIL_H_
-#define BASE_MAC_MACH_PORT_UTIL_H_
-
-#include <mach/mach.h>
-
-#include "base/base_export.h"
-#include "base/mac/scoped_mach_port.h"
-
-namespace base {
-
-enum class MachCreateError {
- ERROR_MAKE_RECEIVE_PORT,
- ERROR_SET_ATTRIBUTES,
- ERROR_EXTRACT_DEST_RIGHT,
- ERROR_SEND_MACH_PORT,
-};
-
-// Sends a Mach port to |dest_port|. Assumes that |dest_port| is a send once
-// right. Takes ownership of |dest_port|.
-BASE_EXPORT kern_return_t SendMachPort(mach_port_t dest_port,
- mach_port_t port_to_send,
- int disposition);
-
-// Receives a Mach port from |port_to_listen_on|, which should have exactly one
-// queued message. Returns |MACH_PORT_NULL| on any error.
-BASE_EXPORT base::mac::ScopedMachSendRight ReceiveMachPort(
- mach_port_t port_to_listen_on);
-
-// Creates an intermediate Mach port in |task_port| and sends |port_to_insert|
-// as a mach_msg to the intermediate Mach port.
-// |task_port| is the task port of another process.
-// |port_to_insert| must be a send right in the current task's name space.
-// Returns the intermediate port on success, and MACH_PORT_NULL on failure.
-// On failure, |error_code| is set if not null.
-// This method takes ownership of |port_to_insert|. On success, ownership is
-// passed to the intermediate Mach port.
-BASE_EXPORT mach_port_name_t CreateIntermediateMachPort(
- mach_port_t task_port,
- base::mac::ScopedMachSendRight port_to_insert,
- MachCreateError* error_code);
-
-} // namespace base
-
-#endif // BASE_MAC_MACH_PORT_UTIL_H_
diff --git a/chromium/base/mac/objc_release_properties_unittest.mm b/chromium/base/mac/objc_release_properties_unittest.mm
index 5f1565052fe..4b51e42b2ec 100644
--- a/chromium/base/mac/objc_release_properties_unittest.mm
+++ b/chromium/base/mac/objc_release_properties_unittest.mm
@@ -217,7 +217,7 @@ struct NumberHolder {
@selector(baseCvcDynamic), @selector(derivedCvcDynamic),
@selector(protoCvcDynamic),
};
- if (!base::ContainsValue(dynamicMethods, sel)) {
+ if (!base::Contains(dynamicMethods, sel)) {
return NO;
}
id (*imp)() = []() -> id { return nil; };
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index 57494b592bf..f8e6c53d2a6 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -32,6 +32,12 @@
#include "base/win/windows_version.h"
#endif
+#if defined(OS_FUCHSIA)
+#include <lib/zx/vmo.h>
+#include <zircon/types.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#endif
+
namespace base {
namespace {
@@ -416,7 +422,13 @@ bool DiscardableSharedMemory::Purge(Time current_time) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
CHECK(ptr);
}
-#endif
+#elif defined(OS_FUCHSIA)
+ zx::unowned_vmo vmo = shared_memory_region_.GetPlatformHandle();
+ zx_status_t status =
+ vmo->op_range(ZX_VMO_OP_DECOMMIT, AlignToPageSize(sizeof(SharedState)),
+ AlignToPageSize(mapped_size_), nullptr, 0);
+ ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
+#endif // defined(OS_FUCHSIA)
last_known_usage_ = Time();
return true;
diff --git a/chromium/base/memory/fake_memory_pressure_monitor.cc b/chromium/base/memory/fake_memory_pressure_monitor.cc
index 713b1618ac7..59fd3ef66eb 100644
--- a/chromium/base/memory/fake_memory_pressure_monitor.cc
+++ b/chromium/base/memory/fake_memory_pressure_monitor.cc
@@ -20,7 +20,7 @@ void FakeMemoryPressureMonitor::SetAndNotifyMemoryPressure(
}
base::MemoryPressureMonitor::MemoryPressureLevel
-FakeMemoryPressureMonitor::GetCurrentPressureLevel() {
+FakeMemoryPressureMonitor::GetCurrentPressureLevel() const {
return memory_pressure_level_;
}
diff --git a/chromium/base/memory/fake_memory_pressure_monitor.h b/chromium/base/memory/fake_memory_pressure_monitor.h
index 2194b5f845e..d012876d54e 100644
--- a/chromium/base/memory/fake_memory_pressure_monitor.h
+++ b/chromium/base/memory/fake_memory_pressure_monitor.h
@@ -19,7 +19,7 @@ class FakeMemoryPressureMonitor : public base::MemoryPressureMonitor {
void SetAndNotifyMemoryPressure(MemoryPressureLevel level);
// base::MemoryPressureMonitor overrides:
- MemoryPressureLevel GetCurrentPressureLevel() override;
+ MemoryPressureLevel GetCurrentPressureLevel() const override;
void SetDispatchCallback(const DispatchCallback& callback) override;
private:
diff --git a/chromium/base/memory/memory_pressure_monitor.h b/chromium/base/memory/memory_pressure_monitor.h
index 13bc4839e34..40d3415d753 100644
--- a/chromium/base/memory/memory_pressure_monitor.h
+++ b/chromium/base/memory/memory_pressure_monitor.h
@@ -40,7 +40,7 @@ class BASE_EXPORT MemoryPressureMonitor {
static const base::TimeDelta kUMAMemoryPressureLevelPeriod;
// Returns the currently observed memory pressure.
- virtual MemoryPressureLevel GetCurrentPressureLevel() = 0;
+ virtual MemoryPressureLevel GetCurrentPressureLevel() const = 0;
// Sets a notification callback. The default callback invokes
// base::MemoryPressureListener::NotifyMemoryPressure.
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.cc b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
index 2b4ff01cb9c..c85ea9ac0f6 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
@@ -1,18 +1,26 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#include "base/memory/memory_pressure_monitor_chromeos.h"
#include <fcntl.h>
-#include <sys/select.h>
+#include <sys/poll.h>
+#include <string>
+#include <vector>
#include "base/bind.h"
+#include "base/files/file_util.h"
#include "base/metrics/histogram_macros.h"
+#include "base/no_destructor.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "base/single_thread_task_runner.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
#include "base/system/sys_info.h"
+#include "base/task/post_task.h"
+#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -20,282 +28,275 @@ namespace base {
namespace chromeos {
namespace {
-// Type-safe version of |g_monitor| from base/memory/memory_pressure_monitor.cc,
-// this was originally added because TabManagerDelegate for chromeos needs to
-// call into ScheduleEarlyCheck which isn't a public API in the base
-// MemoryPressureMonitor. This matters because ChromeOS may create a
-// FakeMemoryPressureMonitor for browser tests and that's why this type-specific
-// version was added.
-MemoryPressureMonitor* g_monitor = nullptr;
-
-// The time between memory pressure checks. While under critical pressure, this
-// is also the timer to repeat cleanup attempts. Note: this is only for the UMA
-// ChromeOS.MemoryPressureLevel.
-const int kMemoryPressureIntervalMs = 1000;
-
-// The time which should pass between two moderate memory pressure calls.
-const int kModerateMemoryPressureCooldownMs = 10000;
-
-// Number of event polls before the next moderate pressure event can be sent.
-const int kModerateMemoryPressureCooldown =
- kModerateMemoryPressureCooldownMs / kMemoryPressureIntervalMs;
-
-// Threshold constants to emit pressure events.
-const int kNormalMemoryPressureModerateThresholdPercent = 60;
-const int kNormalMemoryPressureCriticalThresholdPercent = 95;
-const int kAggressiveMemoryPressureModerateThresholdPercent = 35;
-const int kAggressiveMemoryPressureCriticalThresholdPercent = 70;
-
-// The possible state for memory pressure level. The values should be in line
-// with values in MemoryPressureListener::MemoryPressureLevel and should be
-// updated if more memory pressure levels are introduced.
-enum MemoryPressureLevelUMA {
- MEMORY_PRESSURE_LEVEL_NONE = 0,
- MEMORY_PRESSURE_LEVEL_MODERATE,
- MEMORY_PRESSURE_LEVEL_CRITICAL,
- NUM_MEMORY_PRESSURE_LEVELS
-};
-
-// This is the file that will exist if low memory notification is available
-// on the device. Whenever it becomes readable, it signals a low memory
-// condition.
-const char kLowMemFile[] = "/dev/chromeos-low-mem";
-
-// Converts a |MemoryPressureThreshold| value into a used memory percentage for
-// the moderate pressure event.
-int GetModerateMemoryThresholdInPercent(
- MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
- return thresholds == MemoryPressureMonitor::
- THRESHOLD_AGGRESSIVE_CACHE_DISCARD ||
- thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
- ? kAggressiveMemoryPressureModerateThresholdPercent
- : kNormalMemoryPressureModerateThresholdPercent;
+// Type-safe version of |g_monitor_notifying| from
+// base/memory/memory_pressure_monitor.cc, this was originally added because
+// TabManagerDelegate for chromeos needs to call into ScheduleEarlyCheck which
+// isn't a public API in the base MemoryPressureMonitor. This matters because
+// ChromeOS may create a FakeMemoryPressureMonitor for browser tests and that's
+// why this type-specific version was added.
+MemoryPressureMonitor* g_monitor_notifying = nullptr;
+
+// We try not to re-notify on moderate too frequently, this time
+// controls how frequently we will notify after our first notification.
+constexpr base::TimeDelta kModerateMemoryPressureCooldownTime =
+ base::TimeDelta::FromSeconds(10);
+
+// The margin mem file contains the two memory levels, the first is the
+// critical level and the second is the moderate level. Note, this
+// file may contain more values but only the first two are used for
+// memory pressure notifications in chromeos.
+constexpr char kMarginMemFile[] = "/sys/kernel/mm/chromeos-low_mem/margin";
+
+// The available memory file contains the available memory as determined
+// by the kernel.
+constexpr char kAvailableMemFile[] =
+ "/sys/kernel/mm/chromeos-low_mem/available";
+
+// Converts an available memory value in MB to a memory pressure level.
+MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromAvailable(
+ int available_mb,
+ int moderate_avail_mb,
+ int critical_avail_mb) {
+ if (available_mb < critical_avail_mb)
+ return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
+ if (available_mb < moderate_avail_mb)
+ return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
+
+ return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
}
-// Converts a |MemoryPressureThreshold| value into a used memory percentage for
-// the critical pressure event.
-int GetCriticalMemoryThresholdInPercent(
- MemoryPressureMonitor::MemoryPressureThresholds thresholds) {
- return thresholds == MemoryPressureMonitor::
- THRESHOLD_AGGRESSIVE_TAB_DISCARD ||
- thresholds == MemoryPressureMonitor::THRESHOLD_AGGRESSIVE
- ? kAggressiveMemoryPressureCriticalThresholdPercent
- : kNormalMemoryPressureCriticalThresholdPercent;
-}
+int64_t ReadAvailableMemoryMB(int available_fd) {
+ // Read the available memory.
+ char buf[32] = {};
-// Converts free percent of memory into a memory pressure value.
-MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromFillLevel(
- int actual_fill_level,
- int moderate_threshold,
- int critical_threshold) {
- if (actual_fill_level < moderate_threshold)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
- return actual_fill_level < critical_threshold
- ? MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE
- : MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
-}
+ // kernfs/file.c:
+ // "Once poll/select indicates that the value has changed, you
+ // need to close and re-open the file, or seek to 0 and read again.
+ ssize_t bytes_read = HANDLE_EINTR(pread(available_fd, buf, sizeof(buf), 0));
+ PCHECK(bytes_read != -1);
-// This function will be called less than once a second. It will check if
-// the kernel has detected a low memory situation.
-bool IsLowMemoryCondition(int file_descriptor) {
- fd_set fds;
- struct timeval tv;
+ std::string mem_str(buf, bytes_read);
+ int64_t available = -1;
+ CHECK(base::StringToInt64(
+ base::TrimWhitespaceASCII(mem_str, base::TrimPositions::TRIM_ALL),
+ &available));
- FD_ZERO(&fds);
- FD_SET(file_descriptor, &fds);
+ return available;
+}
- tv.tv_sec = 0;
- tv.tv_usec = 0;
+// This function will wait until the /sys/kernel/mm/chromeos-low_mem/available
+// file becomes readable and then read the latest value. This file will only
+// become readable once the available memory cross through one of the margin
+// values specified in /sys/kernel/mm/chromeos-low_mem/margin, for more
+// details see https://crrev.com/c/536336.
+bool WaitForMemoryPressureChanges(int available_fd) {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::WILL_BLOCK);
+
+ pollfd pfd = {available_fd, POLLPRI | POLLERR, 0};
+ int res = HANDLE_EINTR(poll(&pfd, 1, -1)); // Wait indefinitely.
+ PCHECK(res != -1);
+
+ if (pfd.revents != (POLLPRI | POLLERR)) {
+ // If we didn't receive POLLPRI | POLLERR it means we likely received
+ // POLLNVAL because the fd has been closed we will only log an error in
+ // other situations.
+ LOG_IF(ERROR, pfd.revents != POLLNVAL)
+ << "WaitForMemoryPressureChanges received unexpected revents: "
+ << pfd.revents;
+
+ // We no longer want to wait for a kernel notification if the fd has been
+ // closed.
+ return false;
+ }
- return HANDLE_EINTR(select(file_descriptor + 1, &fds, NULL, NULL, &tv)) > 0;
+ return true;
}
} // namespace
+MemoryPressureMonitor::MemoryPressureMonitor()
+ : MemoryPressureMonitor(kMarginMemFile,
+ kAvailableMemFile,
+ base::BindRepeating(&WaitForMemoryPressureChanges),
+ /*enable_metrics=*/true) {}
+
MemoryPressureMonitor::MemoryPressureMonitor(
- MemoryPressureThresholds thresholds)
- : current_memory_pressure_level_(
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
- moderate_pressure_repeat_count_(0),
- seconds_since_reporting_(0),
- moderate_pressure_threshold_percent_(
- GetModerateMemoryThresholdInPercent(thresholds)),
- critical_pressure_threshold_percent_(
- GetCriticalMemoryThresholdInPercent(thresholds)),
- low_mem_file_(HANDLE_EINTR(::open(kLowMemFile, O_RDONLY))),
+ const std::string& margin_file,
+ const std::string& available_file,
+ base::RepeatingCallback<bool(int)> kernel_waiting_callback,
+ bool enable_metrics)
+ : available_mem_file_(HANDLE_EINTR(open(available_file.c_str(), O_RDONLY))),
dispatch_callback_(
base::BindRepeating(&MemoryPressureListener::NotifyMemoryPressure)),
+ kernel_waiting_callback_(
+ base::BindRepeating(std::move(kernel_waiting_callback),
+ available_mem_file_.get())),
weak_ptr_factory_(this) {
- DCHECK(!g_monitor);
- g_monitor = this;
+ DCHECK(g_monitor_notifying == nullptr);
+ g_monitor_notifying = this;
+
+ CHECK(available_mem_file_.is_valid());
+ std::vector<int> margin_parts =
+ MemoryPressureMonitor::GetMarginFileParts(margin_file);
+
+ // This class SHOULD have verified kernel support by calling
+ // SupportsKernelNotifications() before creating a new instance of this.
+ // Therefore we will check fail if we don't have multiple margin values.
+ CHECK_LE(2u, margin_parts.size());
+ critical_pressure_threshold_mb_ = margin_parts[0];
+ moderate_pressure_threshold_mb_ = margin_parts[1];
+
+ if (enable_metrics) {
+ // We will report the current memory pressure at some periodic interval,
+ // the metric ChromeOS.MemoryPRessureLevel is currently reported every 1s.
+ reporting_timer_.Start(
+ FROM_HERE, base::TimeDelta::FromSeconds(1),
+ base::BindRepeating(
+ &MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics,
+ weak_ptr_factory_.GetWeakPtr()));
+ }
- StartObserving();
- LOG_IF(ERROR,
- base::SysInfo::IsRunningOnChromeOS() && !low_mem_file_.is_valid())
- << "Cannot open kernel listener";
+ ScheduleWaitForKernelNotification();
}
MemoryPressureMonitor::~MemoryPressureMonitor() {
- DCHECK(g_monitor);
- g_monitor = nullptr;
+ DCHECK(g_monitor_notifying);
+ g_monitor_notifying = nullptr;
+}
- StopObserving();
+std::vector<int> MemoryPressureMonitor::GetMarginFileParts() {
+ static const base::NoDestructor<std::vector<int>> margin_file_parts(
+ GetMarginFileParts(kMarginMemFile));
+ return *margin_file_parts;
}
-void MemoryPressureMonitor::ScheduleEarlyCheck() {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&MemoryPressureMonitor::CheckMemoryPressure,
- weak_ptr_factory_.GetWeakPtr()));
+std::vector<int> MemoryPressureMonitor::GetMarginFileParts(
+ const std::string& file) {
+ std::vector<int> margin_values;
+ std::string margin_contents;
+ if (base::ReadFileToString(base::FilePath(file), &margin_contents)) {
+ std::vector<std::string> margins =
+ base::SplitString(margin_contents, base::kWhitespaceASCII,
+ base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
+ for (const auto& v : margins) {
+ int value = -1;
+ if (!base::StringToInt(v, &value)) {
+ // If any of the values weren't parseable as an int we return
+ // nothing as the file format is unexpected.
+ LOG(ERROR) << "Unable to parse margin file contents as integer: " << v;
+ return std::vector<int>();
+ }
+ margin_values.push_back(value);
+ }
+ } else {
+ LOG(ERROR) << "Unable to read margin file: " << kMarginMemFile;
+ }
+ return margin_values;
+}
+
+bool MemoryPressureMonitor::SupportsKernelNotifications() {
+ // Unfortunately at the moment the only way to determine if the chromeos
+ // kernel supports polling on the available file is to observe two values
+ // in the margin file, if the critical and moderate levels are specified
+ // there then we know the kernel must support polling on available.
+ return MemoryPressureMonitor::GetMarginFileParts().size() >= 2;
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
+MemoryPressureMonitor::GetCurrentPressureLevel() const {
return current_memory_pressure_level_;
}
-// static
-MemoryPressureMonitor* MemoryPressureMonitor::Get() {
- return g_monitor;
-}
+// CheckMemoryPressure will get the current memory pressure level by reading
+// the available file.
+void MemoryPressureMonitor::CheckMemoryPressure() {
+ auto previous_memory_pressure = current_memory_pressure_level_;
+ int64_t mem_avail = ReadAvailableMemoryMB(available_mem_file_.get());
+ current_memory_pressure_level_ = GetMemoryPressureLevelFromAvailable(
+ mem_avail, moderate_pressure_threshold_mb_,
+ critical_pressure_threshold_mb_);
+ if (current_memory_pressure_level_ ==
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
+ last_moderate_notification_ = base::TimeTicks();
+ return;
+ }
+
+ // In the case of MODERATE memory pressure we may be in this state for quite
+ // some time so we limit the rate at which we dispatch notifications.
+ if (current_memory_pressure_level_ ==
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE) {
+ if (previous_memory_pressure == current_memory_pressure_level_) {
+ if (base::TimeTicks::Now() - last_moderate_notification_ <
+ kModerateMemoryPressureCooldownTime) {
+ return;
+ } else if (previous_memory_pressure ==
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
+ // Reset the moderate notification time if we just crossed back.
+ last_moderate_notification_ = base::TimeTicks::Now();
+ return;
+ }
+ }
+
+ last_moderate_notification_ = base::TimeTicks::Now();
+ }
-void MemoryPressureMonitor::StartObserving() {
- timer_.Start(
- FROM_HERE, TimeDelta::FromMilliseconds(kMemoryPressureIntervalMs),
- BindRepeating(
- &MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics,
- weak_ptr_factory_.GetWeakPtr()));
+ VLOG(1) << "MemoryPressureMonitor::CheckMemoryPressure dispatching "
+ "at level: "
+ << current_memory_pressure_level_;
+ dispatch_callback_.Run(current_memory_pressure_level_);
}
-void MemoryPressureMonitor::StopObserving() {
- // If StartObserving failed, StopObserving will still get called.
- timer_.Stop();
+void MemoryPressureMonitor::HandleKernelNotification(bool result) {
+ // If WaitForKernelNotification returned false then the FD has been closed and
+ // we just exit without waiting again.
+ if (!result) {
+ return;
+ }
+
+ CheckMemoryPressure();
+
+ // Now we need to schedule back our blocking task to wait for more
+ // kernel notifications.
+ ScheduleWaitForKernelNotification();
}
void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
+ // Note: If we support notifications of memory pressure changes in both
+ // directions we will not have to update the cached value as it will always
+ // be correct.
CheckMemoryPressure();
- // We report the platform independent Memory.PressureLevel after
- // kUMAMemoryPressureLevelPeriod which is 5s.
- if (seconds_since_reporting_++ ==
- base::MemoryPressureMonitor::kUMAMemoryPressureLevelPeriod.InSeconds()) {
- seconds_since_reporting_ = 0;
+
+ // We only report Memory.PressureLevel every 5seconds while
+ // we report ChromeOS.MemoryPressureLevel every 1s.
+ if (base::TimeTicks::Now() - last_pressure_level_report_ >
+ base::MemoryPressureMonitor::kUMAMemoryPressureLevelPeriod) {
+ // Record to UMA "Memory.PressureLevel" a tick is 5seconds.
RecordMemoryPressure(current_memory_pressure_level_, 1);
- }
- // Record UMA histogram statistics for the current memory pressure level.
- // TODO(lgrey): Remove this once there's a usable history for the
- // "Memory.PressureLevel" statistic
- MemoryPressureLevelUMA memory_pressure_level_uma(MEMORY_PRESSURE_LEVEL_NONE);
- switch (current_memory_pressure_level_) {
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_NONE;
- break;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_MODERATE;
- break;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- memory_pressure_level_uma = MEMORY_PRESSURE_LEVEL_CRITICAL;
- break;
+ last_pressure_level_report_ = base::TimeTicks::Now();
}
- // TODO(bgeffon): Remove this platform specific metric once all work has
- // been completed to deal with the 5s Memory.PressureLevel metric.
+ // Record UMA histogram statistics for the current memory pressure level, it
+ // would seem that only Memory.PressureLevel would be necessary.
+ constexpr int kNumberPressureLevels = 3;
UMA_HISTOGRAM_ENUMERATION("ChromeOS.MemoryPressureLevel",
- memory_pressure_level_uma,
- NUM_MEMORY_PRESSURE_LEVELS);
+ current_memory_pressure_level_,
+ kNumberPressureLevels);
}
-void MemoryPressureMonitor::CheckMemoryPressure() {
- MemoryPressureListener::MemoryPressureLevel old_pressure =
- current_memory_pressure_level_;
-
- // If we have the kernel low memory observer, we use it's flag instead of our
- // own computation (for now). Note that in "simulation mode" it can be null.
- // TODO(skuhne): We need to add code which makes sure that the kernel and this
- // computation come to similar results and then remove this override again.
- // TODO(skuhne): Add some testing framework here to see how close the kernel
- // and the internal functions are.
- if (low_mem_file_.is_valid() && IsLowMemoryCondition(low_mem_file_.get())) {
- current_memory_pressure_level_ =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
- } else {
- current_memory_pressure_level_ = GetMemoryPressureLevelFromFillLevel(
- GetUsedMemoryInPercent(),
- moderate_pressure_threshold_percent_,
- critical_pressure_threshold_percent_);
-
- // When listening to the kernel, we ignore the reported memory pressure
- // level from our own computation and reduce critical to moderate.
- if (low_mem_file_.is_valid() &&
- current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- current_memory_pressure_level_ =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
- }
- }
-
- // In case there is no memory pressure we do not notify.
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
- return;
- }
- if (old_pressure == current_memory_pressure_level_) {
- // If the memory pressure is still at the same level, we notify again for a
- // critical level. In case of a moderate level repeat however, we only send
- // a notification after a certain time has passed.
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
- ++moderate_pressure_repeat_count_ <
- kModerateMemoryPressureCooldown) {
- return;
- }
- } else if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE &&
- old_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- // When we reducing the pressure level from critical to moderate, we
- // restart the timeout and do not send another notification.
- moderate_pressure_repeat_count_ = 0;
- return;
- }
- moderate_pressure_repeat_count_ = 0;
- dispatch_callback_.Run(current_memory_pressure_level_);
+void MemoryPressureMonitor::ScheduleEarlyCheck() {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&MemoryPressureMonitor::CheckMemoryPressure,
+ weak_ptr_factory_.GetWeakPtr()));
}
-// Gets the used ChromeOS memory in percent.
-int MemoryPressureMonitor::GetUsedMemoryInPercent() {
- base::SystemMemoryInfoKB info;
- if (!base::GetSystemMemoryInfo(&info)) {
- VLOG(1) << "Cannot determine the free memory of the system.";
- return 0;
- }
- // TODO(skuhne): Instead of adding the kernel memory pressure calculation
- // logic here, we should have a kernel mechanism similar to the low memory
- // notifier in ChromeOS which offers multiple pressure states.
- // To track this, we have crbug.com/381196.
-
- // The available memory consists of "real" and virtual (z)ram memory.
- // Since swappable memory uses a non pre-deterministic compression and
- // the compression creates its own "dynamic" in the system, it gets
- // de-emphasized by the |kSwapWeight| factor.
- const int kSwapWeight = 4;
-
- // The total memory we have is the "real memory" plus the virtual (z)ram.
- int total_memory = info.total + info.swap_total / kSwapWeight;
-
- // The kernel internally uses 50MB.
- const int kMinFileMemory = 50 * 1024;
-
- // Most file memory can be easily reclaimed.
- int file_memory = info.active_file + info.inactive_file;
- // unless it is dirty or it's a minimal portion which is required.
- file_memory -= info.dirty + kMinFileMemory;
-
- // Available memory is the sum of free, swap and easy reclaimable memory.
- int available_memory =
- info.free + info.swap_free / kSwapWeight + file_memory;
-
- DCHECK(available_memory < total_memory);
- int percentage = ((total_memory - available_memory) * 100) / total_memory;
- return percentage;
+void MemoryPressureMonitor::ScheduleWaitForKernelNotification() {
+ base::PostTaskAndReplyWithResult(
+ FROM_HERE,
+ {base::MayBlock(), base::TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ kernel_waiting_callback_,
+ base::BindRepeating(&MemoryPressureMonitor::HandleKernelNotification,
+ weak_ptr_factory_.GetWeakPtr()));
}
void MemoryPressureMonitor::SetDispatchCallback(
@@ -303,5 +304,10 @@ void MemoryPressureMonitor::SetDispatchCallback(
dispatch_callback_ = callback;
}
+// static
+MemoryPressureMonitor* MemoryPressureMonitor::Get() {
+ return g_monitor_notifying;
+}
+
} // namespace chromeos
} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.h b/chromium/base/memory/memory_pressure_monitor_chromeos.h
index 563ba85081f..02fe0381bef 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.h
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.h
@@ -1,121 +1,119 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
+#include <vector>
+
#include "base/base_export.h"
#include "base/files/scoped_file.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/memory/memory_pressure_monitor.h"
#include "base/memory/weak_ptr.h"
+#include "base/time/time.h"
#include "base/timer/timer.h"
namespace base {
namespace chromeos {
-class TestMemoryPressureMonitor;
-
////////////////////////////////////////////////////////////////////////////////
// MemoryPressureMonitor
//
// A class to handle the observation of our free memory. It notifies the
// MemoryPressureListener of memory fill level changes, so that it can take
// action to reduce memory resources accordingly.
-//
class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
public:
- using GetUsedMemoryInPercentCallback = int (*)();
-
- // There are two memory pressure events:
- // MODERATE - which will mainly release caches.
- // CRITICAL - which will discard tabs.
- // The |MemoryPressureThresholds| enum selects the strategy of firing these
- // events: A conservative strategy will keep as much content in memory as
- // possible (causing the system to swap to zram) and an aggressive strategy
- // will release memory earlier to avoid swapping.
- enum MemoryPressureThresholds {
- // Use the system default.
- THRESHOLD_DEFAULT = 0,
- // Try to keep as much content in memory as possible.
- THRESHOLD_CONSERVATIVE = 1,
- // Discard caches earlier, allowing to keep more tabs in memory.
- THRESHOLD_AGGRESSIVE_CACHE_DISCARD = 2,
- // Discard tabs earlier, allowing the system to get faster.
- THRESHOLD_AGGRESSIVE_TAB_DISCARD = 3,
- // Discard caches and tabs earlier to allow the system to be faster.
- THRESHOLD_AGGRESSIVE = 4
- };
-
- explicit MemoryPressureMonitor(MemoryPressureThresholds thresholds);
+ // The MemoryPressureMonitor reads the pressure levels from the
+ // /sys/kernel/mm/chromeos-low_mem/margin and does not need to be configured.
+ //
+ // NOTE: You should check that the kernel supports notifications by calling
+ // SupportsKernelNotifications() before constructing a new instance of this
+ // class.
+ MemoryPressureMonitor();
~MemoryPressureMonitor() override;
- // Redo the memory pressure calculation soon and call again if a critical
- // memory pressure prevails. Note that this call will trigger an asynchronous
- // action which gives the system time to release memory back into the pool.
- void ScheduleEarlyCheck();
-
// Get the current memory pressure level.
MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
- override;
+ const override;
void SetDispatchCallback(const DispatchCallback& callback) override;
+ // GetMarginFileParts returns a vector of the configured margin file values.
+ // The margin file contains two or more values, but we're only concerned with
+ // the first two. The first represents critical memory pressure, the second
+ // is moderate memory pressure level.
+ static std::vector<int> GetMarginFileParts();
+
+ // SupportsKernelNotifications will return true if the kernel supports and is
+ // configured for notifications on memory availability changes.
+ static bool SupportsKernelNotifications();
+
+ // ScheduleEarlyCheck is used by the ChromeOS tab manager delegate to force it
+ // to quickly recheck pressure levels after a tab discard or some other
+ // action.
+ void ScheduleEarlyCheck();
+
+ // Returns the moderate pressure threshold as read from the margin file.
+ int ModeratePressureThresholdMBForTesting() const {
+ return moderate_pressure_threshold_mb_;
+ }
+
+ // Returns the critical pressure threshold as read from the margin file.
+ int CriticalPressureThresholdMBForTesting() const {
+ return critical_pressure_threshold_mb_;
+ }
+
// Returns a type-casted version of the current memory pressure monitor. A
// simple wrapper to base::MemoryPressureMonitor::Get.
static MemoryPressureMonitor* Get();
- private:
- friend TestMemoryPressureMonitor;
- // Starts observing the memory fill level.
- // Calls to StartObserving should always be matched with calls to
- // StopObserving.
- void StartObserving();
-
- // Stop observing the memory fill level.
- // May be safely called if StartObserving has not been called.
- void StopObserving();
-
- // The function which gets periodically called to check any changes in the
- // memory pressure. It will report pressure changes as well as continuous
- // critical pressure levels.
+ protected:
+ // This constructor is only used for testing.
+ MemoryPressureMonitor(
+ const std::string& margin_file,
+ const std::string& available_file,
+ base::RepeatingCallback<bool(int)> kernel_waiting_callback,
+ bool enable_metrics);
+
+ static std::vector<int> GetMarginFileParts(const std::string& margin_file);
void CheckMemoryPressure();
- // The function periodically checks the memory pressure changes and records
- // the UMA histogram statistics for the current memory pressure level.
+ private:
+ void HandleKernelNotification(bool result);
+ void ScheduleWaitForKernelNotification();
void CheckMemoryPressureAndRecordStatistics();
- // Get the memory pressure in percent (virtual for testing).
- virtual int GetUsedMemoryInPercent();
+ int moderate_pressure_threshold_mb_ = 0;
+ int critical_pressure_threshold_mb_ = 0;
- // The current memory pressure.
- base::MemoryPressureListener::MemoryPressureLevel
- current_memory_pressure_level_;
+ // We keep track of how long it has been since we last notified at the
+ // moderate level.
+ base::TimeTicks last_moderate_notification_;
- // A periodic timer to check for resource pressure changes. This will get
- // replaced by a kernel triggered event system (see crbug.com/381196).
- base::RepeatingTimer timer_;
+ // We keep track of how long it's been since we notified on the
+ // Memory.PressureLevel metric.
+ base::TimeTicks last_pressure_level_report_;
- // To slow down the amount of moderate pressure event calls, this counter
- // gets used to count the number of events since the last event occured.
- int moderate_pressure_repeat_count_;
+ MemoryPressureListener::MemoryPressureLevel current_memory_pressure_level_ =
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
- // The "Memory.PressureLevel" statistic is recorded every
- // 5 seconds, but the timer to report "ChromeOS.MemoryPressureLevel"
- // fires every second. This counter is used to allow reporting
- // "Memory.PressureLevel" correctly without adding another
- // timer.
- int seconds_since_reporting_;
+ // File descriptor used to read and poll(2) available memory from sysfs,
+ // In /sys/kernel/mm/chromeos-low_mem/available.
+ ScopedFD available_mem_file_;
- // The thresholds for moderate and critical pressure.
- const int moderate_pressure_threshold_percent_;
- const int critical_pressure_threshold_percent_;
+ DispatchCallback dispatch_callback_;
- // File descriptor used to detect low memory condition.
- ScopedFD low_mem_file_;
+ // A periodic timer which will be used to report a UMA metric on the current
+ // memory pressure level as theoretically we could go a very long time without
+ // ever receiving a notification.
+ base::RepeatingTimer reporting_timer_;
- DispatchCallback dispatch_callback_;
+ // Kernel waiting callback which is responsible for blocking on the
+ // available file until it receives a kernel notification, this is
+ // configurable to make testing easier.
+ base::RepeatingCallback<bool()> kernel_waiting_callback_;
base::WeakPtrFactory<MemoryPressureMonitor> weak_ptr_factory_;
@@ -124,5 +122,4 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
} // namespace chromeos
} // namespace base
-
#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_CHROMEOS_H_
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc b/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
index 0139610a69b..acc255b8ccb 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos_unittest.cc
@@ -1,173 +1,227 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
+// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_monitor_chromeos.h"
+#include <unistd.h>
+#include <string>
+
#include "base/bind.h"
+#include "base/files/file_util.h"
+#include "base/files/scoped_temp_dir.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/run_loop.h"
+#include "base/synchronization/waitable_event.h"
#include "base/system/sys_info.h"
+#include "base/task/post_task.h"
#include "base/test/scoped_task_environment.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/scoped_blocking_call.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace chromeos {
namespace {
+bool SetFileContents(const FilePath& path, const std::string& contents) {
+ return static_cast<std::string::size_type>(base::WriteFile(
+ path, contents.c_str(), contents.size())) == contents.size();
+}
-// True if the memory notifier got called.
-// Do not read/modify value directly.
-bool on_memory_pressure_called = false;
+// Since it would be very hard to mock sysfs instead we will send in our own
+// implementation of WaitForKernelNotification which instead will block on a
+// pipe that we can trigger for the test to cause a mock kernel notification.
+bool WaitForMockKernelNotification(int pipe_read_fd, int available_fd) {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::WILL_BLOCK);
-// If the memory notifier got called, this is the memory pressure reported.
-MemoryPressureListener::MemoryPressureLevel on_memory_pressure_level =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
+ // We just use a pipe to block our kernel notification thread until we have
+ // a fake kernel notification.
+ char buf = 0;
+ int res = HANDLE_EINTR(read(pipe_read_fd, &buf, sizeof(buf)));
-// Processes OnMemoryPressure calls.
-void OnMemoryPressure(MemoryPressureListener::MemoryPressureLevel level) {
- on_memory_pressure_called = true;
- on_memory_pressure_level = level;
+ // Fail if we encounter any error.
+ return res > 0;
}
-// Resets the indicator for memory pressure.
-void ResetOnMemoryPressureCalled() {
- on_memory_pressure_called = false;
+void TriggerKernelNotification(int pipe_write_fd) {
+ char buf = '1';
+ HANDLE_EINTR(write(pipe_write_fd, &buf, sizeof(buf)));
}
-// Returns true when OnMemoryPressure was called (and resets it).
-bool WasOnMemoryPressureCalled() {
- bool b = on_memory_pressure_called;
- ResetOnMemoryPressureCalled();
- return b;
+// Processes OnMemoryPressure calls by just storing the sequence of events so we
+// can validate that we received the expected pressure levels as the test runs.
+void OnMemoryPressure(
+ std::vector<MemoryPressureListener::MemoryPressureLevel>* history,
+ MemoryPressureListener::MemoryPressureLevel level) {
+ history->push_back(level);
}
} // namespace
class TestMemoryPressureMonitor : public MemoryPressureMonitor {
public:
- TestMemoryPressureMonitor()
- : MemoryPressureMonitor(THRESHOLD_DEFAULT),
- memory_in_percent_override_(0) {
- // Disable any timers which are going on and set a special memory reporting
- // function.
- StopObserving();
+ TestMemoryPressureMonitor(
+ const std::string& mock_margin_file,
+ const std::string& mock_available_file,
+ base::RepeatingCallback<bool(int)> kernel_waiting_callback,
+ bool enable_metrics)
+ : MemoryPressureMonitor(mock_margin_file,
+ mock_available_file,
+ std::move(kernel_waiting_callback),
+ enable_metrics) {}
+
+ static std::vector<int> GetMarginFileParts(const std::string& file) {
+ return MemoryPressureMonitor::GetMarginFileParts(file);
}
- ~TestMemoryPressureMonitor() override = default;
- void SetMemoryInPercentOverride(int percent) {
- memory_in_percent_override_ = percent;
- }
-
- void CheckMemoryPressureForTest() {
- CheckMemoryPressure();
- }
+ ~TestMemoryPressureMonitor() override = default;
private:
- int GetUsedMemoryInPercent() override {
- return memory_in_percent_override_;
- }
-
- int memory_in_percent_override_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitor);
};
-// This test tests the various transition states from memory pressure, looking
-// for the correct behavior on event reposting as well as state updates.
+TEST(ChromeOSMemoryPressureMonitorTest, ParseMarginFileGood) {
+ base::ScopedTempDir tmp_dir;
+ ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
+
+ FilePath margin_file = tmp_dir.GetPath().Append("margin");
+
+ ASSERT_TRUE(SetFileContents(margin_file, "123"));
+ const std::vector<int> parts1 =
+ TestMemoryPressureMonitor::GetMarginFileParts(margin_file.value());
+ ASSERT_EQ(1u, parts1.size());
+ ASSERT_EQ(123, parts1[0]);
+
+ ASSERT_TRUE(SetFileContents(margin_file, "123 456"));
+ const std::vector<int> parts2 =
+ TestMemoryPressureMonitor::GetMarginFileParts(margin_file.value());
+ ASSERT_EQ(2u, parts2.size());
+ ASSERT_EQ(123, parts2[0]);
+ ASSERT_EQ(456, parts2[1]);
+}
+
+TEST(ChromeOSMemoryPressureMonitorTest, ParseMarginFileBad) {
+ base::ScopedTempDir tmp_dir;
+ ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
+ FilePath margin_file = tmp_dir.GetPath().Append("margin");
+
+ // An empty margin file is bad.
+ ASSERT_TRUE(SetFileContents(margin_file, ""));
+ ASSERT_TRUE(TestMemoryPressureMonitor::GetMarginFileParts(margin_file.value())
+ .empty());
+
+ // The numbers will be in base10, so 4a6 would be invalid.
+ ASSERT_TRUE(SetFileContents(margin_file, "123 4a6"));
+ ASSERT_TRUE(TestMemoryPressureMonitor::GetMarginFileParts(margin_file.value())
+ .empty());
+
+ // The numbers must be integers.
+ ASSERT_TRUE(SetFileContents(margin_file, "123.2 412.3"));
+ ASSERT_TRUE(TestMemoryPressureMonitor::GetMarginFileParts(margin_file.value())
+ .empty());
+}
+
TEST(ChromeOSMemoryPressureMonitorTest, CheckMemoryPressure) {
- // crbug.com/844102:
- if (base::SysInfo::IsRunningOnChromeOS())
- return;
+ // Create a temporary directory for our margin and available files.
+ base::ScopedTempDir tmp_dir;
+ ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
+
+ FilePath margin_file = tmp_dir.GetPath().Append("margin");
+ FilePath available_file = tmp_dir.GetPath().Append("available");
+
+ // Set the margin values to 500 (critical) and 1000 (moderate).
+ const std::string kMarginContents = "500 1000";
+ ASSERT_TRUE(SetFileContents(margin_file, kMarginContents));
+
+ // Write the initial available contents.
+ const std::string kInitialAvailableContents = "1500";
+ ASSERT_TRUE(SetFileContents(available_file, kInitialAvailableContents));
test::ScopedTaskEnvironment scoped_task_environment(
test::ScopedTaskEnvironment::MainThreadType::UI);
- std::unique_ptr<TestMemoryPressureMonitor> monitor(
- new TestMemoryPressureMonitor);
+
+ // We will use a mock listener to keep track of our kernel notifications which
+ // cause event to be fired. We can just examine the sequence of pressure
+ // events when we're done to validate that the pressure events were as
+ // expected.
+ std::vector<MemoryPressureListener::MemoryPressureLevel> pressure_events;
auto listener = std::make_unique<MemoryPressureListener>(
- base::BindRepeating(&OnMemoryPressure));
- // Checking the memory pressure while 0% are used should not produce any
- // events.
- monitor->SetMemoryInPercentOverride(0);
- ResetOnMemoryPressureCalled();
-
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_FALSE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
+ base::BindRepeating(&OnMemoryPressure, &pressure_events));
+
+ // We use a pipe to notify our blocked kernel notification thread that there
+ // is a kernel notification we need to use a simple blocking syscall and
+ // read(2)/write(2) will work.
+ int fds[2] = {};
+ ASSERT_EQ(0, HANDLE_EINTR(pipe(fds)));
+
+ // Make sure the pipe FDs get closed.
+ ScopedFD write_end(fds[1]);
+ ScopedFD read_end(fds[0]);
+
+ auto monitor = std::make_unique<TestMemoryPressureMonitor>(
+ margin_file.value(), available_file.value(),
+ // Bind the read end to WaitForMockKernelNotification.
+ base::BindRepeating(&WaitForMockKernelNotification, read_end.get()),
+ /*enable_metrics=*/false);
+
+ // Validate that our margin levels are as expected after being parsed from our
+ // synthetic margin file.
+ ASSERT_EQ(500, monitor->CriticalPressureThresholdMBForTesting());
+ ASSERT_EQ(1000, monitor->ModeratePressureThresholdMBForTesting());
+
+ // At this point we have no memory pressure.
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
monitor->GetCurrentPressureLevel());
- // Setting the memory level to 80% should produce a moderate pressure level.
- monitor->SetMemoryInPercentOverride(80);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ // Moderate Pressure.
+ ASSERT_TRUE(SetFileContents(available_file, "900"));
+ TriggerKernelNotification(write_end.get());
+ RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
monitor->GetCurrentPressureLevel());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
-
- // We need to check that the event gets reposted after a while.
- int i = 0;
- for (; i < 100; i++) {
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
- if (WasOnMemoryPressureCalled()) {
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
- break;
- }
- }
- // Should be more than 5 and less than 100.
- EXPECT_LE(5, i);
- EXPECT_GE(99, i);
-
- // Setting the memory usage to 99% should produce critical levels.
- monitor->SetMemoryInPercentOverride(99);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- on_memory_pressure_level);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+
+ // Critical Pressure.
+ ASSERT_TRUE(SetFileContents(available_file, "450"));
+ TriggerKernelNotification(write_end.get());
+ RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
monitor->GetCurrentPressureLevel());
- // Calling it again should immediately produce a second call.
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- on_memory_pressure_level);
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+ // Moderate Pressure.
+ ASSERT_TRUE(SetFileContents(available_file, "550"));
+ TriggerKernelNotification(write_end.get());
+ RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
monitor->GetCurrentPressureLevel());
- // When lowering the pressure again we should not get an event, but the
- // pressure should go back to moderate.
- monitor->SetMemoryInPercentOverride(80);
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_FALSE(WasOnMemoryPressureCalled());
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ // No pressure, note: this will not cause any event.
+ ASSERT_TRUE(SetFileContents(available_file, "1150"));
+ TriggerKernelNotification(write_end.get());
+ RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
monitor->GetCurrentPressureLevel());
- // We should need exactly the same amount of calls as before, before the next
- // call comes in.
- int j = 0;
- for (; j < 100; j++) {
- monitor->CheckMemoryPressureForTest();
- RunLoop().RunUntilIdle();
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
- if (WasOnMemoryPressureCalled()) {
- EXPECT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- on_memory_pressure_level);
- break;
- }
- }
- // We should have needed exactly the same amount of checks as before.
- EXPECT_EQ(j, i);
+ // Back into moderate.
+ ASSERT_TRUE(SetFileContents(available_file, "950"));
+ TriggerKernelNotification(write_end.get());
+ RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ monitor->GetCurrentPressureLevel());
+
+ // Now our events should be MODERATE, CRITICAL, MODERATE.
+ ASSERT_EQ(4u, pressure_events.size());
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ pressure_events[0]);
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
+ pressure_events[1]);
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ pressure_events[2]);
+ ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
+ pressure_events[3]);
}
} // namespace chromeos
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.cc b/chromium/base/memory/memory_pressure_monitor_mac.cc
index a91e52a4da2..bd829fcf963 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac.cc
@@ -165,7 +165,7 @@ int MemoryPressureMonitor::GetSecondsPerUMATick() {
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
+MemoryPressureMonitor::GetCurrentPressureLevel() const {
return last_pressure_level_;
}
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.h b/chromium/base/memory/memory_pressure_monitor_mac.h
index b85b6c9017d..b98a5c3e147 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.h
+++ b/chromium/base/memory/memory_pressure_monitor_mac.h
@@ -29,7 +29,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
~MemoryPressureMonitor() override;
// Returns the currently-observed memory pressure.
- MemoryPressureLevel GetCurrentPressureLevel() override;
+ MemoryPressureLevel GetCurrentPressureLevel() const override;
void SetDispatchCallback(const DispatchCallback& callback) override;
diff --git a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.cc b/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.cc
deleted file mode 100644
index 1cd8c91da3b..00000000000
--- a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.cc
+++ /dev/null
@@ -1,312 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#include "base/memory/memory_pressure_monitor_notifying_chromeos.h"
-
-#include <fcntl.h>
-#include <sys/poll.h>
-#include <string>
-#include <vector>
-
-#include "base/bind.h"
-#include "base/files/file_util.h"
-#include "base/metrics/histogram_macros.h"
-#include "base/no_destructor.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/process/process_metrics.h"
-#include "base/single_thread_task_runner.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/strings/string_split.h"
-#include "base/strings/string_util.h"
-#include "base/system/sys_info.h"
-#include "base/task/post_task.h"
-#include "base/threading/scoped_blocking_call.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/time/time.h"
-
-namespace base {
-namespace chromeos {
-
-namespace {
-// Type-safe version of |g_monitor_notifying| from
-// base/memory/memory_pressure_monitor.cc, this was originally added because
-// TabManagerDelegate for chromeos needs to call into ScheduleEarlyCheck which
-// isn't a public API in the base MemoryPressureMonitor. This matters because
-// ChromeOS may create a FakeMemoryPressureMonitor for browser tests and that's
-// why this type-specific version was added.
-MemoryPressureMonitorNotifying* g_monitor_notifying = nullptr;
-
-// We try not to re-notify on moderate too frequently, this time
-// controls how frequently we will notify after our first notification.
-constexpr base::TimeDelta kModerateMemoryPressureCooldownTime =
- base::TimeDelta::FromSeconds(10);
-
-// The margin mem file contains the two memory levels, the first is the
-// critical level and the second is the moderate level. Note, this
-// file may contain more values but only the first two are used for
-// memory pressure notifications in chromeos.
-constexpr char kMarginMemFile[] = "/sys/kernel/mm/chromeos-low_mem/margin";
-
-// The available memory file contains the available memory as determined
-// by the kernel.
-constexpr char kAvailableMemFile[] =
- "/sys/kernel/mm/chromeos-low_mem/available";
-
-// Converts an available memory value in MB to a memory pressure level.
-MemoryPressureListener::MemoryPressureLevel GetMemoryPressureLevelFromAvailable(
- int available_mb,
- int moderate_avail_mb,
- int critical_avail_mb) {
- if (available_mb < critical_avail_mb)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL;
- if (available_mb < moderate_avail_mb)
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE;
-
- return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-}
-
-int64_t ReadAvailableMemoryMB(int available_fd) {
- // Read the available memory.
- char buf[32] = {};
-
- // kernfs/file.c:
- // "Once poll/select indicates that the value has changed, you
- // need to close and re-open the file, or seek to 0 and read again.
- ssize_t bytes_read = HANDLE_EINTR(pread(available_fd, buf, sizeof(buf), 0));
- PCHECK(bytes_read != -1);
-
- std::string mem_str(buf, bytes_read);
- int64_t available = -1;
- CHECK(base::StringToInt64(
- base::TrimWhitespaceASCII(mem_str, base::TrimPositions::TRIM_ALL),
- &available));
-
- return available;
-}
-
-// This function will wait until the /sys/kernel/mm/chromeos-low_mem/available
-// file becomes readable and then read the latest value. This file will only
-// become readable once the available memory cross through one of the margin
-// values specified in /sys/kernel/mm/chromeos-low_mem/margin, for more
-// details see https://crrev.com/c/536336.
-bool WaitForMemoryPressureChanges(int available_fd) {
- base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
- base::BlockingType::WILL_BLOCK);
-
- pollfd pfd = {available_fd, POLLPRI | POLLERR, 0};
- int res = HANDLE_EINTR(poll(&pfd, 1, -1)); // Wait indefinitely.
- PCHECK(res != -1);
-
- if (pfd.revents != (POLLPRI | POLLERR)) {
- // If we didn't receive POLLPRI | POLLERR it means we likely received
- // POLLNVAL because the fd has been closed.
- LOG(ERROR) << "WaitForMemoryPressureChanges received unexpected revents: "
- << pfd.revents;
-
- // We no longer want to wait for a kernel notification if the fd has been
- // closed.
- return false;
- }
-
- return true;
-}
-
-} // namespace
-
-MemoryPressureMonitorNotifying::MemoryPressureMonitorNotifying()
- : MemoryPressureMonitorNotifying(
- kMarginMemFile,
- kAvailableMemFile,
- base::BindRepeating(&WaitForMemoryPressureChanges),
- /*enable_metrics=*/true) {}
-
-MemoryPressureMonitorNotifying::MemoryPressureMonitorNotifying(
- const std::string& margin_file,
- const std::string& available_file,
- base::RepeatingCallback<bool(int)> kernel_waiting_callback,
- bool enable_metrics)
- : available_mem_file_(HANDLE_EINTR(open(available_file.c_str(), O_RDONLY))),
- dispatch_callback_(
- base::BindRepeating(&MemoryPressureListener::NotifyMemoryPressure)),
- kernel_waiting_callback_(
- base::BindRepeating(std::move(kernel_waiting_callback),
- available_mem_file_.get())),
- weak_ptr_factory_(this) {
- DCHECK(g_monitor_notifying == nullptr);
- g_monitor_notifying = this;
-
- CHECK(available_mem_file_.is_valid());
- std::vector<int> margin_parts =
- MemoryPressureMonitorNotifying::GetMarginFileParts(margin_file);
-
- // This class SHOULD have verified kernel support by calling
- // SupportsKernelNotifications() before creating a new instance of this.
- // Therefore we will check fail if we don't have multiple margin values.
- CHECK_LE(2u, margin_parts.size());
- critical_pressure_threshold_mb_ = margin_parts[0];
- moderate_pressure_threshold_mb_ = margin_parts[1];
-
- if (enable_metrics) {
- // We will report the current memory pressure at some periodic interval,
- // the metric ChromeOS.MemoryPRessureLevel is currently reported every 1s.
- reporting_timer_.Start(
- FROM_HERE, base::TimeDelta::FromSeconds(1),
- base::BindRepeating(&MemoryPressureMonitorNotifying::
- CheckMemoryPressureAndRecordStatistics,
- weak_ptr_factory_.GetWeakPtr()));
- }
-
- ScheduleWaitForKernelNotification();
-}
-
-MemoryPressureMonitorNotifying::~MemoryPressureMonitorNotifying() {
- DCHECK(g_monitor_notifying);
- g_monitor_notifying = nullptr;
-}
-
-std::vector<int> MemoryPressureMonitorNotifying::GetMarginFileParts() {
- static const base::NoDestructor<std::vector<int>> margin_file_parts(
- GetMarginFileParts(kMarginMemFile));
- return *margin_file_parts;
-}
-
-std::vector<int> MemoryPressureMonitorNotifying::GetMarginFileParts(
- const std::string& file) {
- std::vector<int> margin_values;
- std::string margin_contents;
- if (base::ReadFileToString(base::FilePath(file), &margin_contents)) {
- std::vector<std::string> margins =
- base::SplitString(margin_contents, base::kWhitespaceASCII,
- base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
- for (const auto& v : margins) {
- int value = -1;
- if (!base::StringToInt(v, &value)) {
- // If any of the values weren't parseable as an int we return
- // nothing as the file format is unexpected.
- LOG(ERROR) << "Unable to parse margin file contents as integer: " << v;
- return std::vector<int>();
- }
- margin_values.push_back(value);
- }
- } else {
- LOG(ERROR) << "Unable to read margin file: " << kMarginMemFile;
- }
- return margin_values;
-}
-
-bool MemoryPressureMonitorNotifying::SupportsKernelNotifications() {
- // Unfortunately at the moment the only way to determine if the chromeos
- // kernel supports polling on the available file is to observe two values
- // in the margin file, if the critical and moderate levels are specified
- // there then we know the kernel must support polling on available.
- return MemoryPressureMonitorNotifying::GetMarginFileParts().size() >= 2;
-}
-
-MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitorNotifying::GetCurrentPressureLevel() {
- return current_memory_pressure_level_;
-}
-
-// CheckMemoryPressure will get the current memory pressure level by reading
-// the available file.
-void MemoryPressureMonitorNotifying::CheckMemoryPressure() {
- auto previous_memory_pressure = current_memory_pressure_level_;
- int64_t mem_avail = ReadAvailableMemoryMB(available_mem_file_.get());
- current_memory_pressure_level_ = GetMemoryPressureLevelFromAvailable(
- mem_avail, moderate_pressure_threshold_mb_,
- critical_pressure_threshold_mb_);
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE) {
- last_moderate_notification_ = base::TimeTicks();
- return;
- }
-
- // In the case of MODERATE memory pressure we may be in this state for quite
- // some time so we limit the rate at which we dispatch notifications.
- if (current_memory_pressure_level_ ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE) {
- if (previous_memory_pressure == current_memory_pressure_level_) {
- if (base::TimeTicks::Now() - last_moderate_notification_ <
- kModerateMemoryPressureCooldownTime) {
- return;
- } else if (previous_memory_pressure ==
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL) {
- // Reset the moderate notification time if we just crossed back.
- last_moderate_notification_ = base::TimeTicks::Now();
- return;
- }
- }
-
- last_moderate_notification_ = base::TimeTicks::Now();
- }
-
- VLOG(1) << "MemoryPressureMonitorNotifying::CheckMemoryPressure dispatching "
- "at level: "
- << current_memory_pressure_level_;
- dispatch_callback_.Run(current_memory_pressure_level_);
-}
-
-void MemoryPressureMonitorNotifying::HandleKernelNotification(bool result) {
- // If WaitForKernelNotification returned false then the FD has been closed and
- // we just exit without waiting again.
- if (!result) {
- return;
- }
-
- CheckMemoryPressure();
-
- // Now we need to schedule back our blocking task to wait for more
- // kernel notifications.
- ScheduleWaitForKernelNotification();
-}
-
-void MemoryPressureMonitorNotifying::CheckMemoryPressureAndRecordStatistics() {
- // Note: If we support notifications of memory pressure changes in both
- // directions we will not have to update the cached value as it will always
- // be correct.
- CheckMemoryPressure();
-
- // We only report Memory.PressureLevel every 5seconds while
- // we report ChromeOS.MemoryPressureLevel every 1s.
- if (base::TimeTicks::Now() - last_pressure_level_report_ >
- base::MemoryPressureMonitor::kUMAMemoryPressureLevelPeriod) {
- // Record to UMA "Memory.PressureLevel" a tick is 5seconds.
- RecordMemoryPressure(current_memory_pressure_level_, 1);
- last_pressure_level_report_ = base::TimeTicks::Now();
- }
-
- // Record UMA histogram statistics for the current memory pressure level, it
- // would seem that only Memory.PressureLevel would be necessary.
- constexpr int kNumberPressureLevels = 3;
- UMA_HISTOGRAM_ENUMERATION("ChromeOS.MemoryPressureLevel",
- current_memory_pressure_level_,
- kNumberPressureLevels);
-}
-
-void MemoryPressureMonitorNotifying::ScheduleEarlyCheck() {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- base::BindOnce(&MemoryPressureMonitorNotifying::CheckMemoryPressure,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void MemoryPressureMonitorNotifying::ScheduleWaitForKernelNotification() {
- base::PostTaskWithTraitsAndReplyWithResult(
- FROM_HERE, {base::MayBlock()}, kernel_waiting_callback_,
- base::BindRepeating(
- &MemoryPressureMonitorNotifying::HandleKernelNotification,
- weak_ptr_factory_.GetWeakPtr()));
-}
-
-void MemoryPressureMonitorNotifying::SetDispatchCallback(
- const DispatchCallback& callback) {
- dispatch_callback_ = callback;
-}
-
-// static
-MemoryPressureMonitorNotifying* MemoryPressureMonitorNotifying::Get() {
- return g_monitor_notifying;
-}
-
-} // namespace chromeos
-} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.h b/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.h
deleted file mode 100644
index 5af176f531b..00000000000
--- a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos.h
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_NOTIFYING_CHROMEOS_H_
-#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_NOTIFYING_CHROMEOS_H_
-
-#include <vector>
-
-#include "base/base_export.h"
-#include "base/files/scoped_file.h"
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/memory/memory_pressure_monitor.h"
-#include "base/memory/weak_ptr.h"
-#include "base/time/time.h"
-#include "base/timer/timer.h"
-
-namespace base {
-namespace chromeos {
-
-////////////////////////////////////////////////////////////////////////////////
-// MemoryPressureMonitorNotifying
-//
-// A class to handle the observation of our free memory. It notifies the
-// MemoryPressureListener of memory fill level changes, so that it can take
-// action to reduce memory resources accordingly.
-//
-// TODO(bgeffon): This class should become chromeos::MemoryPressureMonitor
-// once all kernels support notifications.
-//
-class BASE_EXPORT MemoryPressureMonitorNotifying
- : public base::MemoryPressureMonitor {
- public:
- // The MemoryPressureMonitorNotifying reads the pressure levels from the
- // /sys/kernel/mm/chromeos-low_mem/margin and does not need to be configured.
- //
- // NOTE: You should check that the kernel supports notifications by calling
- // SupportsKernelNotifications() before constructing a new instance of this
- // class.
- MemoryPressureMonitorNotifying();
- ~MemoryPressureMonitorNotifying() override;
-
- // Get the current memory pressure level.
- MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
- override;
- void SetDispatchCallback(const DispatchCallback& callback) override;
-
- // GetMarginFileParts returns a vector of the configured margin file values.
- // The margin file contains two or more values, but we're only concerned with
- // the first two. The first represents critical memory pressure, the second
- // is moderate memory pressure level.
- static std::vector<int> GetMarginFileParts();
-
- // SupportsKernelNotifications will return true if the kernel supports and is
- // configured for notifications on memory availability changes.
- static bool SupportsKernelNotifications();
-
- // ScheduleEarlyCheck is used by the ChromeOS tab manager delegate to force it
- // to quickly recheck pressure levels after a tab discard or some other
- // action.
- void ScheduleEarlyCheck();
-
- // Returns the moderate pressure threshold as read from the margin file.
- int ModeratePressureThresholdMBForTesting() const {
- return moderate_pressure_threshold_mb_;
- }
-
- // Returns the critical pressure threshold as read from the margin file.
- int CriticalPressureThresholdMBForTesting() const {
- return critical_pressure_threshold_mb_;
- }
-
- // Returns a type-casted version of the current memory pressure monitor. A
- // simple wrapper to base::MemoryPressureMonitor::Get.
- static MemoryPressureMonitorNotifying* Get();
-
- protected:
- // This constructor is only used for testing.
- MemoryPressureMonitorNotifying(
- const std::string& margin_file,
- const std::string& available_file,
- base::RepeatingCallback<bool(int)> kernel_waiting_callback,
- bool enable_metrics);
-
- static std::vector<int> GetMarginFileParts(const std::string& margin_file);
- void CheckMemoryPressure();
-
- private:
- void HandleKernelNotification(bool result);
- void ScheduleWaitForKernelNotification();
- void CheckMemoryPressureAndRecordStatistics();
-
- int moderate_pressure_threshold_mb_ = 0;
- int critical_pressure_threshold_mb_ = 0;
-
- // We keep track of how long it has been since we last notified at the
- // moderate level.
- base::TimeTicks last_moderate_notification_;
-
- // We keep track of how long it's been since we notified on the
- // Memory.PressureLevel metric.
- base::TimeTicks last_pressure_level_report_;
-
- MemoryPressureListener::MemoryPressureLevel current_memory_pressure_level_ =
- MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
-
- // File descriptor used to read and poll(2) available memory from sysfs,
- // In /sys/kernel/mm/chromeos-low_mem/available.
- ScopedFD available_mem_file_;
-
- DispatchCallback dispatch_callback_;
-
- // A periodic timer which will be used to report a UMA metric on the current
- // memory pressure level as theoretically we could go a very long time without
- // ever receiving a notification.
- base::RepeatingTimer reporting_timer_;
-
- // Kernel waiting callback which is responsible for blocking on the
- // available file until it receives a kernel notification, this is
- // configurable to make testing easier.
- base::RepeatingCallback<bool()> kernel_waiting_callback_;
-
- base::WeakPtrFactory<MemoryPressureMonitorNotifying> weak_ptr_factory_;
-
- DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitorNotifying);
-};
-
-} // namespace chromeos
-} // namespace base
-#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_NOTIFYING_CHROMEOS_H_
diff --git a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos_unittest.cc b/chromium/base/memory/memory_pressure_monitor_notifying_chromeos_unittest.cc
deleted file mode 100644
index b9dfe6c3c02..00000000000
--- a/chromium/base/memory/memory_pressure_monitor_notifying_chromeos_unittest.cc
+++ /dev/null
@@ -1,234 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/memory_pressure_monitor_notifying_chromeos.h"
-
-#include <unistd.h>
-#include <string>
-
-#include "base/bind.h"
-#include "base/files/file_util.h"
-#include "base/files/scoped_temp_dir.h"
-#include "base/macros.h"
-#include "base/memory/memory_pressure_listener.h"
-#include "base/run_loop.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/system/sys_info.h"
-#include "base/task/post_task.h"
-#include "base/test/scoped_task_environment.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/scoped_blocking_call.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace chromeos {
-
-namespace {
-bool SetFileContents(const FilePath& path, const std::string& contents) {
- return static_cast<std::string::size_type>(base::WriteFile(
- path, contents.c_str(), contents.size())) == contents.size();
-}
-
-// Since it would be very hard to mock sysfs instead we will send in our own
-// implementation of WaitForKernelNotification which instead will block on a
-// pipe that we can trigger for the test to cause a mock kernel notification.
-bool WaitForMockKernelNotification(int pipe_read_fd, int available_fd) {
- base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
- base::BlockingType::WILL_BLOCK);
-
- // We just use a pipe to block our kernel notification thread until we have
- // a fake kernel notification.
- char buf = 0;
- int res = HANDLE_EINTR(read(pipe_read_fd, &buf, sizeof(buf)));
-
- // Fail if we encounter any error.
- return res > 0;
-}
-
-void TriggerKernelNotification(int pipe_write_fd) {
- char buf = '1';
- HANDLE_EINTR(write(pipe_write_fd, &buf, sizeof(buf)));
-}
-
-// Processes OnMemoryPressure calls by just storing the sequence of events so we
-// can validate that we received the expected pressure levels as the test runs.
-void OnMemoryPressure(
- std::vector<MemoryPressureListener::MemoryPressureLevel>* history,
- MemoryPressureListener::MemoryPressureLevel level) {
- history->push_back(level);
-}
-
-} // namespace
-
-class TestMemoryPressureMonitorNotifying
- : public MemoryPressureMonitorNotifying {
- public:
- TestMemoryPressureMonitorNotifying(
- const std::string& mock_margin_file,
- const std::string& mock_available_file,
- base::RepeatingCallback<bool(int)> kernel_waiting_callback,
- bool enable_metrics)
- : MemoryPressureMonitorNotifying(mock_margin_file,
- mock_available_file,
- std::move(kernel_waiting_callback),
- enable_metrics) {}
-
- static std::vector<int> GetMarginFileParts(const std::string& file) {
- return MemoryPressureMonitorNotifying::GetMarginFileParts(file);
- }
-
- ~TestMemoryPressureMonitorNotifying() override = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TestMemoryPressureMonitorNotifying);
-};
-
-TEST(ChromeOSMemoryPressureMonitorNotifyingTest, ParseMarginFileGood) {
- base::ScopedTempDir tmp_dir;
- ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
-
- FilePath margin_file = tmp_dir.GetPath().Append("margin");
-
- ASSERT_TRUE(SetFileContents(margin_file, "123"));
- const std::vector<int> parts1 =
- TestMemoryPressureMonitorNotifying::GetMarginFileParts(
- margin_file.value());
- ASSERT_EQ(1u, parts1.size());
- ASSERT_EQ(123, parts1[0]);
-
- ASSERT_TRUE(SetFileContents(margin_file, "123 456"));
- const std::vector<int> parts2 =
- TestMemoryPressureMonitorNotifying::GetMarginFileParts(
- margin_file.value());
- ASSERT_EQ(2u, parts2.size());
- ASSERT_EQ(123, parts2[0]);
- ASSERT_EQ(456, parts2[1]);
-}
-
-TEST(ChromeOSMemoryPressureMonitorNotifyingTest, ParseMarginFileBad) {
- base::ScopedTempDir tmp_dir;
- ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
- FilePath margin_file = tmp_dir.GetPath().Append("margin");
-
- // An empty margin file is bad.
- ASSERT_TRUE(SetFileContents(margin_file, ""));
- ASSERT_TRUE(TestMemoryPressureMonitorNotifying::GetMarginFileParts(
- margin_file.value())
- .empty());
-
- // The numbers will be in base10, so 4a6 would be invalid.
- ASSERT_TRUE(SetFileContents(margin_file, "123 4a6"));
- ASSERT_TRUE(TestMemoryPressureMonitorNotifying::GetMarginFileParts(
- margin_file.value())
- .empty());
-
- // The numbers must be integers.
- ASSERT_TRUE(SetFileContents(margin_file, "123.2 412.3"));
- ASSERT_TRUE(TestMemoryPressureMonitorNotifying::GetMarginFileParts(
- margin_file.value())
- .empty());
-}
-
-TEST(ChromeOSMemoryPressureMonitorNotifyingTest, CheckMemoryPressure) {
- // Create a temporary directory for our margin and available files.
- base::ScopedTempDir tmp_dir;
- ASSERT_TRUE(tmp_dir.CreateUniqueTempDir());
-
- FilePath margin_file = tmp_dir.GetPath().Append("margin");
- FilePath available_file = tmp_dir.GetPath().Append("available");
-
- // Set the margin values to 500 (critical) and 1000 (moderate).
- const std::string kMarginContents = "500 1000";
- ASSERT_TRUE(SetFileContents(margin_file, kMarginContents));
-
- // Write the initial available contents.
- const std::string kInitialAvailableContents = "1500";
- ASSERT_TRUE(SetFileContents(available_file, kInitialAvailableContents));
-
- test::ScopedTaskEnvironment scoped_task_environment(
- test::ScopedTaskEnvironment::MainThreadType::UI);
-
- // We will use a mock listener to keep track of our kernel notifications which
- // cause event to be fired. We can just examine the sequence of pressure
- // events when we're done to validate that the pressure events were as
- // expected.
- std::vector<MemoryPressureListener::MemoryPressureLevel> pressure_events;
- auto listener = std::make_unique<MemoryPressureListener>(
- base::BindRepeating(&OnMemoryPressure, &pressure_events));
-
- // We use a pipe to notify our blocked kernel notification thread that there
- // is a kernel notification we need to use a simple blocking syscall and
- // read(2)/write(2) will work.
- int fds[2] = {};
- ASSERT_EQ(0, HANDLE_EINTR(pipe(fds)));
-
- // Make sure the pipe FDs get closed.
- ScopedFD write_end(fds[1]);
- ScopedFD read_end(fds[0]);
-
- auto monitor = std::make_unique<TestMemoryPressureMonitorNotifying>(
- margin_file.value(), available_file.value(),
- // Bind the read end to WaitForMockKernelNotification.
- base::BindRepeating(&WaitForMockKernelNotification, read_end.get()),
- /*enable_metrics=*/false);
-
- // Validate that our margin levels are as expected after being parsed from our
- // synthetic margin file.
- ASSERT_EQ(500, monitor->CriticalPressureThresholdMBForTesting());
- ASSERT_EQ(1000, monitor->ModeratePressureThresholdMBForTesting());
-
- // At this point we have no memory pressure.
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor->GetCurrentPressureLevel());
-
- // Moderate Pressure.
- ASSERT_TRUE(SetFileContents(available_file, "900"));
- TriggerKernelNotification(write_end.get());
- RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
-
- // Critical Pressure.
- ASSERT_TRUE(SetFileContents(available_file, "450"));
- TriggerKernelNotification(write_end.get());
- RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- monitor->GetCurrentPressureLevel());
-
- // Moderate Pressure.
- ASSERT_TRUE(SetFileContents(available_file, "550"));
- TriggerKernelNotification(write_end.get());
- RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
-
- // No pressure, note: this will not cause any event.
- ASSERT_TRUE(SetFileContents(available_file, "1150"));
- TriggerKernelNotification(write_end.get());
- RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE,
- monitor->GetCurrentPressureLevel());
-
- // Back into moderate.
- ASSERT_TRUE(SetFileContents(available_file, "950"));
- TriggerKernelNotification(write_end.get());
- RunLoop().RunWithTimeout(base::TimeDelta::FromSeconds(1));
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- monitor->GetCurrentPressureLevel());
-
- // Now our events should be MODERATE, CRITICAL, MODERATE.
- ASSERT_EQ(4u, pressure_events.size());
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- pressure_events[0]);
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL,
- pressure_events[1]);
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- pressure_events[2]);
- ASSERT_EQ(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE,
- pressure_events[3]);
-}
-
-} // namespace chromeos
-} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor_win.cc b/chromium/base/memory/memory_pressure_monitor_win.cc
index 34a3d715f3e..f2ccb7d7fe6 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.cc
+++ b/chromium/base/memory/memory_pressure_monitor_win.cc
@@ -88,7 +88,7 @@ void MemoryPressureMonitor::CheckMemoryPressureSoon() {
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() {
+MemoryPressureMonitor::GetCurrentPressureLevel() const {
return current_memory_pressure_level_;
}
diff --git a/chromium/base/memory/memory_pressure_monitor_win.h b/chromium/base/memory/memory_pressure_monitor_win.h
index 6a7d0bb8300..6d9df609740 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.h
+++ b/chromium/base/memory/memory_pressure_monitor_win.h
@@ -55,7 +55,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
void CheckMemoryPressureSoon();
// Get the current memory pressure level. This can be called from any thread.
- MemoryPressureLevel GetCurrentPressureLevel() override;
+ MemoryPressureLevel GetCurrentPressureLevel() const override;
void SetDispatchCallback(const DispatchCallback& callback) override;
// Returns the moderate pressure level free memory threshold, in MB.
diff --git a/chromium/base/memory/platform_shared_memory_region.h b/chromium/base/memory/platform_shared_memory_region.h
index 85d3a833f19..a04b44ab0cd 100644
--- a/chromium/base/memory/platform_shared_memory_region.h
+++ b/chromium/base/memory/platform_shared_memory_region.h
@@ -28,6 +28,12 @@
#include "base/files/scoped_file.h"
#endif
+#if defined(OS_LINUX)
+namespace content {
+class SandboxIPCHandler;
+}
+#endif
+
namespace base {
namespace subtle {
@@ -109,8 +115,32 @@ class BASE_EXPORT PlatformSharedMemoryRegion {
CREATE_FILE_MAPPING_FAILURE = 6,
REDUCE_PERMISSIONS_FAILURE = 7,
ALREADY_EXISTS = 8,
- kMaxValue = ALREADY_EXISTS
+ ALLOCATE_FILE_REGION_FAILURE = 9,
+ FSTAT_FAILURE = 10,
+ INODES_MISMATCH = 11,
+ GET_SHMEM_TEMP_DIR_FAILURE = 12,
+ kMaxValue = GET_SHMEM_TEMP_DIR_FAILURE
+ };
+
+#if defined(OS_LINUX)
+ // Structure to limit access to executable region creation.
+ struct ExecutableRegion {
+ private:
+ // Creates a new shared memory region the unsafe mode (writable and not and
+ // convertible to read-only), and in addition marked executable. A ScopedFD
+ // to this region is returned. Any any mapping will have to be done
+ // manually, including setting executable permissions if necessary
+ //
+ // This is only used to support sandbox_ipc_linux.cc, and should not be used
+ // anywhere else in chrome. This is restricted via AllowCreateExecutable.
+ // TODO(crbug.com/982879): remove this when NaCl is unshipped.
+ //
+ // Returns an invalid ScopedFD if the call fails.
+ static ScopedFD CreateFD(size_t size);
+
+ friend class content::SandboxIPCHandler;
};
+#endif
// Platform-specific shared memory type used by this class.
#if defined(OS_MACOSX) && !defined(OS_IOS)
@@ -182,7 +212,9 @@ class BASE_EXPORT PlatformSharedMemoryRegion {
~PlatformSharedMemoryRegion();
// Passes ownership of the platform handle to the caller. The current instance
- // becomes invalid. It's the responsibility of the caller to close the handle.
+ // becomes invalid. It's the responsibility of the caller to close the
+ // handle. If the current instance is invalid, ScopedPlatformHandle will also
+ // be invalid.
ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
// Returns the platform handle. The current instance keeps ownership of this
@@ -242,7 +274,13 @@ class BASE_EXPORT PlatformSharedMemoryRegion {
CreateReadOnlyRegionDeathTest);
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CheckPlatformHandlePermissionsCorrespondToMode);
- static PlatformSharedMemoryRegion Create(Mode mode, size_t size);
+ static PlatformSharedMemoryRegion Create(Mode mode,
+ size_t size
+#if defined(OS_LINUX)
+ ,
+ bool executable = false
+#endif
+ );
static bool CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
diff --git a/chromium/base/memory/platform_shared_memory_region_android.cc b/chromium/base/memory/platform_shared_memory_region_android.cc
index 88698477d62..538b18b0c45 100644
--- a/chromium/base/memory/platform_shared_memory_region_android.cc
+++ b/chromium/base/memory/platform_shared_memory_region_android.cc
@@ -8,6 +8,7 @@
#include "base/bits.h"
#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "third_party/ashmem/ashmem.h"
@@ -22,7 +23,12 @@ namespace subtle {
namespace {
-static int GetAshmemRegionProtectionMask(int fd) {
+// Emits UMA metrics about encountered errors.
+void LogCreateError(PlatformSharedMemoryRegion::CreateError error) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+}
+
+int GetAshmemRegionProtectionMask(int fd) {
int prot = ashmem_get_prot_region(fd);
if (prot < 0) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
@@ -149,13 +155,17 @@ bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
- if (size == 0)
+ if (size == 0) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SIZE_ZERO);
return {};
+ }
// Align size as required by ashmem_create_region() API documentation.
size_t rounded_size = bits::Align(size, GetPageSize());
- if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ if (rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SIZE_TOO_LARGE);
return {};
+ }
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
@@ -165,16 +175,21 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
ScopedFD fd(ashmem_create_region(
SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), rounded_size));
if (!fd.is_valid()) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE);
DPLOG(ERROR) << "ashmem_create_region failed";
return {};
}
int err = ashmem_set_prot_region(fd.get(), PROT_READ | PROT_WRITE);
if (err < 0) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE);
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return {};
}
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SUCCESS);
return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
}
diff --git a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
index 4ca16e3d653..ee4087b11a9 100644
--- a/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
+++ b/chromium/base/memory/platform_shared_memory_region_fuchsia.cc
@@ -141,8 +141,7 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
"lead to this region being non-modifiable";
zx::vmo vmo;
- zx_status_t status =
- zx::vmo::create(rounded_size, ZX_VMO_NON_RESIZABLE, &vmo);
+ zx_status_t status = zx::vmo::create(rounded_size, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmo_create";
return {};
diff --git a/chromium/base/memory/platform_shared_memory_region_posix.cc b/chromium/base/memory/platform_shared_memory_region_posix.cc
index 0b9c36a688b..1fe6f81eea6 100644
--- a/chromium/base/memory/platform_shared_memory_region_posix.cc
+++ b/chromium/base/memory/platform_shared_memory_region_posix.cc
@@ -10,6 +10,7 @@
#include "base/files/file.h"
#include "base/files/file_util.h"
+#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -18,6 +19,12 @@ namespace subtle {
namespace {
+#if !defined(OS_NACL)
+void LogCreateError(PlatformSharedMemoryRegion::CreateError error) {
+ UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error);
+}
+#endif
+
struct ScopedPathUnlinkerTraits {
static const FilePath* InvalidValue() { return nullptr; }
@@ -69,6 +76,17 @@ FDPair ScopedFDPair::get() const {
return {fd.get(), readonly_fd.get()};
}
+#if defined(OS_LINUX)
+// static
+ScopedFD PlatformSharedMemoryRegion::ExecutableRegion::CreateFD(size_t size) {
+ PlatformSharedMemoryRegion region =
+ Create(Mode::kUnsafe, size, true /* executable */);
+ if (region.IsValid())
+ return region.PassPlatformHandle().fd;
+ return ScopedFD();
+}
+#endif // defined(OS_LINUX)
+
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFDPair handle,
@@ -205,16 +223,25 @@ bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
- size_t size) {
+ size_t size
+#if defined(OS_LINUX)
+ ,
+ bool executable
+#endif
+) {
#if defined(OS_NACL)
// Untrusted code can't create descriptors or handles.
return {};
#else
- if (size == 0)
+ if (size == 0) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SIZE_ZERO);
return {};
+ }
- if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
+ if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SIZE_TOO_LARGE);
return {};
+ }
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
@@ -227,13 +254,24 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
// We don't use shm_open() API in order to support the --disable-dev-shm-usage
// flag.
FilePath directory;
- if (!GetShmemTempDir(false /* executable */, &directory))
+ if (!GetShmemTempDir(
+#if defined(OS_LINUX)
+ executable,
+#else
+ false /* executable */,
+#endif
+ &directory)) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::GET_SHMEM_TEMP_DIR_FAILURE);
return {};
+ }
FilePath path;
File shm_file(CreateAndOpenFdForTemporaryFileInDir(directory, &path));
if (!shm_file.IsValid()) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::CREATE_FILE_MAPPING_FAILURE);
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -256,34 +294,43 @@ PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
// Also open as readonly so that we can ConvertToReadOnly().
readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
if (!readonly_fd.is_valid()) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::REDUCE_PERMISSIONS_FAILURE);
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
return {};
}
}
- if (!AllocateFileRegion(&shm_file, 0, size))
+ if (!AllocateFileRegion(&shm_file, 0, size)) {
+ LogCreateError(
+ PlatformSharedMemoryRegion::CreateError::ALLOCATE_FILE_REGION_FAILURE);
return {};
+ }
if (readonly_fd.is_valid()) {
struct stat stat = {};
if (fstat(shm_file.GetPlatformFile(), &stat) != 0) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::FSTAT_FAILURE);
DPLOG(ERROR) << "fstat(fd) failed";
return {};
}
struct stat readonly_stat = {};
if (fstat(readonly_fd.get(), &readonly_stat) != 0) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::FSTAT_FAILURE);
DPLOG(ERROR) << "fstat(readonly_fd) failed";
return {};
}
if (stat.st_dev != readonly_stat.st_dev ||
stat.st_ino != readonly_stat.st_ino) {
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::INODES_MISMATCH);
LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
return {};
}
}
+ LogCreateError(PlatformSharedMemoryRegion::CreateError::SUCCESS);
return PlatformSharedMemoryRegion(
{ScopedFD(shm_file.TakePlatformFile()), std::move(readonly_fd)}, mode,
size, UnguessableToken::Create());
diff --git a/chromium/base/memory/read_only_shared_memory_region.h b/chromium/base/memory/read_only_shared_memory_region.h
index 08d0c1e5f40..6f0075848a4 100644
--- a/chromium/base/memory/read_only_shared_memory_region.h
+++ b/chromium/base/memory/read_only_shared_memory_region.h
@@ -131,7 +131,7 @@ struct MappedReadOnlyRegion {
// Helper function to check return value of
// ReadOnlySharedMemoryRegion::Create(). |region| and |mapping| either both
// valid or invalid.
- bool IsValid() {
+ bool IsValid() const {
DCHECK_EQ(region.IsValid(), mapping.IsValid());
return region.IsValid() && mapping.IsValid();
}
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index bd97f11b3f9..a01c4b5adcf 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -36,20 +36,6 @@ class FilePath;
// Options for creating a shared memory object.
struct BASE_EXPORT SharedMemoryCreateOptions {
-#if !defined(OS_FUCHSIA)
- // DEPRECATED (crbug.com/345734):
- // If NULL, the object is anonymous. This pointer is owned by the caller
- // and must live through the call to Create().
- const std::string* name_deprecated = nullptr;
-
- // DEPRECATED (crbug.com/345734):
- // If true, and the shared memory already exists, Create() will open the
- // existing shared memory and ignore the size parameter. If false,
- // shared memory must not exist. This flag is meaningless unless
- // name_deprecated is non-NULL.
- bool open_existing_deprecated = false;
-#endif
-
// Size of the shared memory object to be created.
// When opening an existing object, this has no effect.
size_t size = 0;
@@ -98,9 +84,6 @@ class BASE_EXPORT SharedMemory {
// Closes a shared memory handle.
static void CloseHandle(const SharedMemoryHandle& handle);
- // Returns the maximum number of handles that can be open at once per process.
- static size_t GetHandleLimit();
-
// Duplicates The underlying OS primitive. Returns an invalid handle on
// failure. The caller is responsible for destroying the duplicated OS
// primitive.
@@ -127,33 +110,6 @@ class BASE_EXPORT SharedMemory {
return Create(options);
}
-#if (!defined(OS_MACOSX) || defined(OS_IOS)) && !defined(OS_FUCHSIA)
- // DEPRECATED (crbug.com/345734):
- // Creates or opens a shared memory segment based on a name.
- // If open_existing is true, and the shared memory already exists,
- // opens the existing shared memory and ignores the size parameter.
- // If open_existing is false, shared memory must not exist.
- // size is the size of the block to be created.
- // Returns true on success, false on failure.
- bool CreateNamedDeprecated(
- const std::string& name, bool open_existing, size_t size) {
- SharedMemoryCreateOptions options;
- options.name_deprecated = &name;
- options.open_existing_deprecated = open_existing;
- options.size = size;
- return Create(options);
- }
-
- // Deletes resources associated with a shared memory segment based on name.
- // Not all platforms require this call.
- bool Delete(const std::string& name);
-
- // Opens a shared memory segment based on a name.
- // If read_only is true, opens for read-only access.
- // Returns true on success, false on failure.
- bool Open(const std::string& name, bool read_only);
-#endif // !defined(OS_MACOSX) || defined(OS_IOS)
-
// Maps the shared memory into the caller's address space.
// Returns true on success, false otherwise. The memory address
// is accessed via the memory() accessor. The mapped address is guaranteed to
@@ -224,7 +180,6 @@ class BASE_EXPORT SharedMemory {
// If true indicates this came from an external source so needs extra checks
// before being mapped.
bool external_section_ = false;
- string16 name_;
#elif !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
// If valid, points to the same memory region as shm_, but with readonly
// permissions.
diff --git a/chromium/base/memory/shared_memory_android.cc b/chromium/base/memory/shared_memory_android.cc
index e54e61afa74..04e4bc2dfdc 100644
--- a/chromium/base/memory/shared_memory_android.cc
+++ b/chromium/base/memory/shared_memory_android.cc
@@ -29,9 +29,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
// "name" is just a label in ashmem. It is visible in /proc/pid/maps.
- int fd = ashmem_create_region(
- options.name_deprecated ? options.name_deprecated->c_str() : "",
- rounded_size);
+ int fd = ashmem_create_region("", rounded_size);
shm_ = SharedMemoryHandle::ImportHandle(fd, options.size);
if (!shm_.IsValid()) {
DLOG(ERROR) << "Shared memory creation failed";
@@ -50,18 +48,6 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return true;
}
-bool SharedMemory::Delete(const std::string& name) {
- // Like on Windows, this is intentionally returning true as ashmem will
- // automatically releases the resource when all FDs on it are closed.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- // ashmem doesn't support name mapping
- NOTIMPLEMENTED();
- return false;
-}
-
void SharedMemory::Close() {
if (shm_.IsValid()) {
shm_.Close();
diff --git a/chromium/base/memory/shared_memory_fuchsia.cc b/chromium/base/memory/shared_memory_fuchsia.cc
index 9cef989d18d..878906a6833 100644
--- a/chromium/base/memory/shared_memory_fuchsia.cc
+++ b/chromium/base/memory/shared_memory_fuchsia.cc
@@ -38,13 +38,6 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
handle.Close();
}
-// static
-size_t SharedMemory::GetHandleLimit() {
- // Duplicated from the internal Magenta kernel constant kMaxHandleCount
- // (kernel/lib/zircon/zircon.cpp).
- return 256 * 1024u;
-}
-
bool SharedMemory::CreateAndMapAnonymous(size_t size) {
return CreateAnonymous(size) && Map(size);
}
@@ -53,8 +46,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
requested_size_ = options.size;
mapped_size_ = bits::Align(requested_size_, GetPageSize());
zx::vmo vmo;
- zx_status_t status =
- zx::vmo::create(mapped_size_, ZX_VMO_NON_RESIZABLE, &vmo);
+ zx_status_t status = zx::vmo::create(mapped_size_, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmo_create";
return false;
diff --git a/chromium/base/memory/shared_memory_helper.cc b/chromium/base/memory/shared_memory_helper.cc
index 47dfeb006a6..1df529fb39c 100644
--- a/chromium/base/memory/shared_memory_helper.cc
+++ b/chromium/base/memory/shared_memory_helper.cc
@@ -34,10 +34,6 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
ScopedFD* fd,
ScopedFD* readonly_fd,
FilePath* path) {
-#if defined(OS_LINUX)
- // It doesn't make sense to have a open-existing private piece of shmem
- DCHECK(!options.open_existing_deprecated);
-#endif // defined(OS_LINUX)
// Q: Why not use the shm_open() etc. APIs?
// A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
FilePath directory;
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index fc1af80001f..82dfeae04bb 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -99,11 +99,6 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
}
// static
-size_t SharedMemory::GetHandleLimit() {
- return GetMaxFds();
-}
-
-// static
SharedMemoryHandle SharedMemory::DuplicateHandle(
const SharedMemoryHandle& handle) {
return handle.Duplicate();
diff --git a/chromium/base/memory/shared_memory_mapping.cc b/chromium/base/memory/shared_memory_mapping.cc
index 2be2570004b..8426fa8c214 100644
--- a/chromium/base/memory/shared_memory_mapping.cc
+++ b/chromium/base/memory/shared_memory_mapping.cc
@@ -33,7 +33,7 @@ namespace base {
SharedMemoryMapping::SharedMemoryMapping() = default;
-SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
+SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept
: memory_(mapping.memory_),
size_(mapping.size_),
mapped_size_(mapping.mapped_size_),
@@ -42,7 +42,7 @@ SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping)
}
SharedMemoryMapping& SharedMemoryMapping::operator=(
- SharedMemoryMapping&& mapping) {
+ SharedMemoryMapping&& mapping) noexcept {
Unmap();
memory_ = mapping.memory_;
size_ = mapping.size_;
@@ -90,9 +90,9 @@ void SharedMemoryMapping::Unmap() {
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
- ReadOnlySharedMemoryMapping&&) = default;
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
- ReadOnlySharedMemoryMapping&&) = default;
+ ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
void* address,
size_t size,
@@ -102,9 +102,9 @@ ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
- WritableSharedMemoryMapping&&) = default;
+ WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
- WritableSharedMemoryMapping&&) = default;
+ WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
void* address,
size_t size,
diff --git a/chromium/base/memory/shared_memory_mapping.h b/chromium/base/memory/shared_memory_mapping.h
index d9569af8ee3..2b8858e1662 100644
--- a/chromium/base/memory/shared_memory_mapping.h
+++ b/chromium/base/memory/shared_memory_mapping.h
@@ -32,8 +32,8 @@ class BASE_EXPORT SharedMemoryMapping {
SharedMemoryMapping();
// Move operations are allowed.
- SharedMemoryMapping(SharedMemoryMapping&& mapping);
- SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping);
+ SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept;
+ SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping) noexcept;
// Unmaps the region if the mapping is valid.
virtual ~SharedMemoryMapping();
@@ -93,8 +93,9 @@ class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
ReadOnlySharedMemoryMapping();
// Move operations are allowed.
- ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&);
- ReadOnlySharedMemoryMapping& operator=(ReadOnlySharedMemoryMapping&&);
+ ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&) noexcept;
+ ReadOnlySharedMemoryMapping& operator=(
+ ReadOnlySharedMemoryMapping&&) noexcept;
// Returns the base address of the mapping. This is read-only memory. This is
// page-aligned. This is nullptr for invalid instances.
@@ -171,8 +172,9 @@ class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
WritableSharedMemoryMapping();
// Move operations are allowed.
- WritableSharedMemoryMapping(WritableSharedMemoryMapping&&);
- WritableSharedMemoryMapping& operator=(WritableSharedMemoryMapping&&);
+ WritableSharedMemoryMapping(WritableSharedMemoryMapping&&) noexcept;
+ WritableSharedMemoryMapping& operator=(
+ WritableSharedMemoryMapping&&) noexcept;
// Returns the base address of the mapping. This is writable memory. This is
// page-aligned. This is nullptr for invalid instances.
diff --git a/chromium/base/memory/shared_memory_nacl.cc b/chromium/base/memory/shared_memory_nacl.cc
index 4bcbb547d38..57845b07783 100644
--- a/chromium/base/memory/shared_memory_nacl.cc
+++ b/chromium/base/memory/shared_memory_nacl.cc
@@ -60,14 +60,6 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
}
-bool SharedMemory::Delete(const std::string& name) {
- return false;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- return false;
-}
-
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
if (!shm_.IsValid())
return false;
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index 0988b5ad856..ac16df2a95d 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -11,7 +11,6 @@
#include <sys/stat.h>
#include <unistd.h>
-#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -60,11 +59,6 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
}
// static
-size_t SharedMemory::GetHandleLimit() {
- return GetMaxFds();
-}
-
-// static
SharedMemoryHandle SharedMemory::DuplicateHandle(
const SharedMemoryHandle& handle) {
return handle.Duplicate();
@@ -82,12 +76,7 @@ bool SharedMemory::CreateAndMapAnonymous(size_t size) {
#if !defined(OS_ANDROID)
-// Chromium mostly only uses the unique/private shmem as specified by
-// "name == L"". The exception is in the StatsTable.
-// TODO(jrg): there is no way to "clean up" all unused named shmem if
-// we restart from a crash. (That isn't a new problem, but it is a problem.)
-// In case we want to delete it later, it may be useful to save the value
-// of mem_filename after FilePathForMemoryName().
+// This SharedMemory API uses only the unique/private shmem.
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
DCHECK(!shm_.IsValid());
if (options.size == 0) return false;
@@ -100,70 +89,13 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// and be deleted before they ever make it out to disk.
ThreadRestrictions::ScopedAllowIO allow_io;
- bool fix_size = true;
ScopedFD fd;
ScopedFD readonly_fd;
FilePath path;
- if (!options.name_deprecated || options.name_deprecated->empty()) {
- bool result =
- CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path);
- if (!result)
- return false;
- } else {
- if (!FilePathForMemoryName(*options.name_deprecated, &path))
- return false;
-
- // Make sure that the file is opened without any permission
- // to other users on the system.
- const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
-
- // First, try to create the file.
- fd.reset(HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly)));
- if (!fd.is_valid() && options.open_existing_deprecated) {
- // If this doesn't work, try and open an existing file in append mode.
- // Opening an existing file in a world writable directory has two main
- // security implications:
- // - Attackers could plant a file under their control, so ownership of
- // the file is checked below.
- // - Attackers could plant a symbolic link so that an unexpected file
- // is opened, so O_NOFOLLOW is passed to open().
-#if !defined(OS_AIX)
- fd.reset(HANDLE_EINTR(
- open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW)));
-#else
- // AIX has no 64-bit support for open flags such as -
- // O_CLOEXEC, O_NOFOLLOW and O_TTY_INIT.
- fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDWR | O_APPEND)));
-#endif
- // Check that the current user owns the file.
- // If uid != euid, then a more complex permission model is used and this
- // API is not appropriate.
- const uid_t real_uid = getuid();
- const uid_t effective_uid = geteuid();
- struct stat sb;
- if (fd.is_valid() &&
- (fstat(fd.get(), &sb) != 0 || sb.st_uid != real_uid ||
- sb.st_uid != effective_uid)) {
- LOG(ERROR) <<
- "Invalid owner when opening existing shared memory file.";
- return false;
- }
-
- // An existing file was opened, so its size should not be fixed.
- fix_size = false;
- }
+ if (!CreateAnonymousSharedMemory(options, &fd, &readonly_fd, &path))
+ return false;
- if (options.share_read_only) {
- // Also open as readonly so that we can GetReadOnlyHandle.
- readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- return false;
- }
- }
- }
- if (fd.is_valid() && fix_size) {
+ if (fd.is_valid()) {
// Get current size.
struct stat stat;
if (fstat(fd.get(), &stat) != 0)
@@ -174,8 +106,7 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
}
requested_size_ = options.size;
- }
- if (!fd.is_valid()) {
+ } else {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
@@ -200,56 +131,6 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
options.size, shm_.GetGUID());
return result;
}
-
-// Our current implementation of shmem is with mmap()ing of files.
-// These files need to be deleted explicitly.
-// In practice this call is only needed for unit tests.
-bool SharedMemory::Delete(const std::string& name) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- if (PathExists(path))
- return DeleteFile(path, false);
-
- // Doesn't exist, so success.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- FilePath path;
- if (!FilePathForMemoryName(name, &path))
- return false;
-
- read_only_ = read_only;
-
- int mode = read_only ? O_RDONLY : O_RDWR;
- ScopedFD fd(HANDLE_EINTR(open(path.value().c_str(), mode)));
- ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
- if (!readonly_fd.is_valid()) {
- DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
- return false;
- }
- int mapped_file = -1;
- int readonly_mapped_file = -1;
- bool result = PrepareMapFile(std::move(fd), std::move(readonly_fd),
- &mapped_file, &readonly_mapped_file);
- // This form of sharing shared memory is deprecated. https://crbug.com/345734.
- // However, we can't get rid of it without a significant refactor because its
- // used to communicate between two versions of the same service process, very
- // early in the life cycle.
- // Technically, we should also pass the GUID from the original shared memory
- // region. We don't do that - this means that we will overcount this memory,
- // which thankfully isn't relevant since Chrome only communicates with a
- // single version of the service process.
- // We pass the size |0|, which is a dummy size and wrong, but otherwise
- // harmless.
- shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false), 0u,
- UnguessableToken::Create());
- readonly_shm_ = SharedMemoryHandle(
- FileDescriptor(readonly_mapped_file, false), 0, shm_.GetGUID());
- return result;
-}
#endif // !defined(OS_ANDROID)
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
@@ -344,29 +225,6 @@ void SharedMemory::Close() {
}
}
-// For the given shmem named |mem_name|, return a filename to mmap()
-// (and possibly create). Modifies |filename|. Return false on
-// error, or true of we are happy.
-bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
- FilePath* path) {
- // mem_name will be used for a filename; make sure it doesn't
- // contain anything which will confuse us.
- DCHECK_EQ(std::string::npos, mem_name.find('/'));
- DCHECK_EQ(std::string::npos, mem_name.find('\0'));
-
- FilePath temp_dir;
- if (!GetShmemTempDir(false, &temp_dir))
- return false;
-
-#if defined(GOOGLE_CHROME_BUILD)
- static const char kShmem[] = "com.google.Chrome.shmem.";
-#else
- static const char kShmem[] = "org.chromium.Chromium.shmem.";
-#endif
- *path = temp_dir.AppendASCII(kShmem + mem_name);
- return true;
-}
-
SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
CHECK(readonly_shm_.IsValid());
return readonly_shm_.Duplicate();
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index f958c264252..86c98c07e45 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -60,29 +60,21 @@ namespace base {
namespace {
-#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
// Each thread will open the shared memory. Each thread will take a different 4
// byte int pointer, and keep changing it, with some small pauses in between.
// Verify that each thread's value in the shared memory is always correct.
class MultipleThreadMain : public PlatformThread::Delegate {
public:
- explicit MultipleThreadMain(int16_t id) : id_(id) {}
- ~MultipleThreadMain() override = default;
+ static const uint32_t kDataSize = 1024;
- static void CleanUp() {
- SharedMemory memory;
- memory.Delete(s_test_name_);
- }
+ MultipleThreadMain(int16_t id, SharedMemoryHandle handle)
+ : id_(id), shm_(handle, false) {}
+ ~MultipleThreadMain() override = default;
// PlatformThread::Delegate interface.
void ThreadMain() override {
- const uint32_t kDataSize = 1024;
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
- EXPECT_TRUE(rv);
- rv = memory.Map(kDataSize);
- EXPECT_TRUE(rv);
- int* ptr = static_cast<int*>(memory.memory()) + id_;
+ EXPECT_TRUE(shm_.Map(kDataSize));
+ int* ptr = static_cast<int*>(shm_.memory()) + id_;
EXPECT_EQ(0, *ptr);
for (int idx = 0; idx < 100; idx++) {
@@ -93,21 +85,16 @@ class MultipleThreadMain : public PlatformThread::Delegate {
// Reset back to 0 for the next test that uses the same name.
*ptr = 0;
- memory.Close();
+ shm_.Unmap();
}
private:
int16_t id_;
-
- static const char s_test_name_[];
+ SharedMemory shm_;
DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
};
-const char MultipleThreadMain::s_test_name_[] =
- "SharedMemoryOpenThreadTest";
-#endif // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
enum class Mode {
Default,
#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
@@ -133,125 +120,6 @@ class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
} // namespace
-// Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
-// CreateNamedDeprecated(openExisting=true)
-#if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
-TEST_P(SharedMemoryTest, OpenClose) {
- const uint32_t kDataSize = 1024;
- std::string test_name = "SharedMemoryOpenCloseTest";
-
- // Open two handles to a memory segment, confirm that they are mapped
- // separately yet point to the same space.
- SharedMemory memory1;
- bool rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory1.Open(test_name, false);
- EXPECT_FALSE(rv);
- rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
- EXPECT_TRUE(rv);
- rv = memory1.Map(kDataSize);
- EXPECT_TRUE(rv);
- SharedMemory memory2;
- rv = memory2.Open(test_name, false);
- EXPECT_TRUE(rv);
- rv = memory2.Map(kDataSize);
- EXPECT_TRUE(rv);
- EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
-
- // Make sure we don't segfault. (it actually happened!)
- ASSERT_NE(memory1.memory(), static_cast<void*>(nullptr));
- ASSERT_NE(memory2.memory(), static_cast<void*>(nullptr));
-
- // Write data to the first memory segment, verify contents of second.
- memset(memory1.memory(), '1', kDataSize);
- EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
-
- // Close the first memory segment, and verify the second has the right data.
- memory1.Close();
- char* start_ptr = static_cast<char*>(memory2.memory());
- char* end_ptr = start_ptr + kDataSize;
- for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
- EXPECT_EQ(*ptr, '1');
-
- // Close the second memory segment.
- memory2.Close();
-
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
- rv = memory2.Delete(test_name);
- EXPECT_TRUE(rv);
-}
-
-TEST_P(SharedMemoryTest, OpenExclusive) {
- const uint32_t kDataSize = 1024;
- const uint32_t kDataSize2 = 2048;
- std::ostringstream test_name_stream;
- test_name_stream << "SharedMemoryOpenExclusiveTest."
- << Time::Now().ToDoubleT();
- std::string test_name = test_name_stream.str();
-
- // Open two handles to a memory segment and check that
- // open_existing_deprecated works as expected.
- SharedMemory memory1;
- bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
- EXPECT_TRUE(rv);
-
- // Memory1 knows it's size because it created it.
- EXPECT_EQ(memory1.requested_size(), kDataSize);
-
- rv = memory1.Map(kDataSize);
- EXPECT_TRUE(rv);
-
- // The mapped memory1 must be at least the size we asked for.
- EXPECT_GE(memory1.mapped_size(), kDataSize);
-
- // The mapped memory1 shouldn't exceed rounding for allocation granularity.
- EXPECT_LT(memory1.mapped_size(),
- kDataSize + SysInfo::VMAllocationGranularity());
-
- memset(memory1.memory(), 'G', kDataSize);
-
- SharedMemory memory2;
- // Should not be able to create if openExisting is false.
- rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
- EXPECT_FALSE(rv);
-
- // Should be able to create with openExisting true.
- rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
- EXPECT_TRUE(rv);
-
- // Memory2 shouldn't know the size because we didn't create it.
- EXPECT_EQ(memory2.requested_size(), 0U);
-
- // We should be able to map the original size.
- rv = memory2.Map(kDataSize);
- EXPECT_TRUE(rv);
-
- // The mapped memory2 must be at least the size of the original.
- EXPECT_GE(memory2.mapped_size(), kDataSize);
-
- // The mapped memory2 shouldn't exceed rounding for allocation granularity.
- EXPECT_LT(memory2.mapped_size(),
- kDataSize2 + SysInfo::VMAllocationGranularity());
-
- // Verify that opening memory2 didn't truncate or delete memory 1.
- char* start_ptr = static_cast<char*>(memory2.memory());
- char* end_ptr = start_ptr + kDataSize;
- for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
- EXPECT_EQ(*ptr, 'G');
- }
-
- memory1.Close();
- memory2.Close();
-
- rv = memory1.Delete(test_name);
- EXPECT_TRUE(rv);
-}
-#endif // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-
// Check that memory is still mapped after its closed.
TEST_P(SharedMemoryTest, CloseNoUnmap) {
const size_t kDataSize = 4096;
@@ -275,13 +143,11 @@ TEST_P(SharedMemoryTest, CloseNoUnmap) {
EXPECT_EQ(nullptr, memory.memory());
}
-#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
// Create a set of N threads to each open a shared memory segment and write to
// it. Verify that they are always reading/writing consistent data.
TEST_P(SharedMemoryTest, MultipleThreads) {
const int kNumThreads = 5;
- MultipleThreadMain::CleanUp();
// On POSIX we have a problem when 2 threads try to create the shmem
// (a file) at exactly the same time, since create both creates the
// file and zerofills it. We solve the problem for this unit test
@@ -289,6 +155,11 @@ TEST_P(SharedMemoryTest, MultipleThreads) {
// intentionally don't clean up its shmem before running with
// kNumThreads.
+ SharedMemoryCreateOptions options;
+ options.size = MultipleThreadMain::kDataSize;
+ SharedMemory shm;
+ EXPECT_TRUE(shm.Create(options));
+
int threadcounts[] = { 1, kNumThreads };
for (auto numthreads : threadcounts) {
std::unique_ptr<PlatformThreadHandle[]> thread_handles;
@@ -300,7 +171,8 @@ TEST_P(SharedMemoryTest, MultipleThreads) {
// Spawn the threads.
for (int16_t index = 0; index < numthreads; index++) {
PlatformThreadHandle pth;
- thread_delegates[index] = new MultipleThreadMain(index);
+ thread_delegates[index] =
+ new MultipleThreadMain(index, shm.handle().Duplicate());
EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
thread_handles[index] = pth;
}
@@ -311,9 +183,7 @@ TEST_P(SharedMemoryTest, MultipleThreads) {
delete thread_delegates[index];
}
}
- MultipleThreadMain::CleanUp();
}
-#endif
// Allocate private (unique) shared memory with an empty string for a
// name. Make sure several of them don't point to the same thing as
@@ -734,12 +604,7 @@ TEST_P(SharedMemoryTest, UnsafeImageSection) {
PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
EXPECT_TRUE(section_handle.IsValid());
- // Check direct opening by name, from handle and duplicated from handle.
- SharedMemory shared_memory_open;
- EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
- EXPECT_FALSE(shared_memory_open.Map(1));
- EXPECT_EQ(nullptr, shared_memory_open.memory());
-
+ // Check opening from handle and duplicated from handle.
SharedMemory shared_memory_handle_local(
SharedMemoryHandle(section_handle.Take(), 1, UnguessableToken::Create()),
true);
@@ -765,89 +630,6 @@ TEST_P(SharedMemoryTest, UnsafeImageSection) {
}
#endif // defined(OS_WIN)
-// iOS does not allow multiple processes.
-// Android ashmem does not support named shared memory.
-// Fuchsia SharedMemory does not support named shared memory.
-// Mac SharedMemory does not support named shared memory. crbug.com/345734
-#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) && \
- !defined(OS_FUCHSIA)
-// On POSIX it is especially important we test shmem across processes,
-// not just across threads. But the test is enabled on all platforms.
-class SharedMemoryProcessTest : public MultiProcessTest {
- public:
- static void CleanUp() {
- SharedMemory memory;
- memory.Delete(s_test_name_);
- }
-
- static int TaskTestMain() {
- int errors = 0;
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
- EXPECT_TRUE(rv);
- if (rv != true)
- errors++;
- rv = memory.Map(s_data_size_);
- EXPECT_TRUE(rv);
- if (rv != true)
- errors++;
- int* ptr = static_cast<int*>(memory.memory());
-
- // This runs concurrently in multiple processes. Writes need to be atomic.
- subtle::Barrier_AtomicIncrement(ptr, 1);
- memory.Close();
- return errors;
- }
-
- static const char s_test_name_[];
- static const uint32_t s_data_size_;
-};
-
-const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
-const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
-
-TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
- const int kNumTasks = 5;
-
- SharedMemoryProcessTest::CleanUp();
-
- // Create a shared memory region. Set the first word to 0.
- SharedMemory memory;
- bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
- ASSERT_TRUE(rv);
- rv = memory.Map(s_data_size_);
- ASSERT_TRUE(rv);
- int* ptr = static_cast<int*>(memory.memory());
- *ptr = 0;
-
- // Start |kNumTasks| processes, each of which atomically increments the first
- // word by 1.
- Process processes[kNumTasks];
- for (auto& index : processes) {
- index = SpawnChild("SharedMemoryTestMain");
- ASSERT_TRUE(index.IsValid());
- }
-
- // Check that each process exited correctly.
- int exit_code = 0;
- for (const auto& index : processes) {
- EXPECT_TRUE(index.WaitForExit(&exit_code));
- EXPECT_EQ(0, exit_code);
- }
-
- // Check that the shared memory region reflects |kNumTasks| increments.
- ASSERT_EQ(kNumTasks, *ptr);
-
- memory.Close();
- SharedMemoryProcessTest::CleanUp();
-}
-
-MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
- return SharedMemoryProcessTest::TaskTestMain();
-}
-#endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
- // !defined(OS_FUCHSIA)
-
#if !(defined(OS_MACOSX) && !defined(OS_IOS))
// The Mach functionality is tested in shared_memory_mac_unittest.cc.
TEST_P(SharedMemoryTest, MappedId) {
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index 88e3cbbc4ea..1ac9e77b653 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -142,8 +142,6 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
SharedMemory::SharedMemory() {}
-SharedMemory::SharedMemory(const string16& name) : name_(name) {}
-
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: external_section_(true), shm_(handle), read_only_(read_only) {}
@@ -163,13 +161,6 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
}
// static
-size_t SharedMemory::GetHandleLimit() {
- // Rounded down from value reported here:
- // http://blogs.technet.com/b/markrussinovich/archive/2009/09/29/3283844.aspx
- return static_cast<size_t>(1 << 23);
-}
-
-// static
SharedMemoryHandle SharedMemory::DuplicateHandle(
const SharedMemoryHandle& handle) {
return handle.Duplicate();
@@ -198,44 +189,41 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
}
size_t rounded_size = (options.size + kSectionMask) & ~kSectionMask;
- name_ = options.name_deprecated ? ASCIIToUTF16(*options.name_deprecated)
- : string16();
SECURITY_ATTRIBUTES sa = {sizeof(sa), nullptr, FALSE};
SECURITY_DESCRIPTOR sd;
ACL dacl;
- if (name_.empty()) {
- // Add an empty DACL to enforce anonymous read-only sections.
- sa.lpSecurityDescriptor = &sd;
- if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
- LogError(INITIALIZE_ACL_FAILURE, GetLastError());
- return false;
- }
- if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
- LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
- return false;
- }
- if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
- LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
- return false;
- }
-
- if (win::GetVersion() < win::Version::WIN8_1) {
- // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
- // sections). So, we generate a random name when we need to enforce
- // read-only.
- uint64_t rand_values[4];
- RandBytes(&rand_values, sizeof(rand_values));
- name_ = ASCIIToUTF16(StringPrintf(
- "CrSharedMem_%016llx%016llx%016llx%016llx", rand_values[0],
- rand_values[1], rand_values[2], rand_values[3]));
- DCHECK(!name_.empty());
- }
+ // Add an empty DACL to enforce anonymous read-only sections.
+ sa.lpSecurityDescriptor = &sd;
+ if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
+ LogError(INITIALIZE_ACL_FAILURE, GetLastError());
+ return false;
+ }
+ if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
+ LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
+ return false;
+ }
+ if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
+ LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
+ return false;
+ }
+
+ string16 name;
+ if (win::GetVersion() < win::Version::WIN8_1) {
+ // Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
+ // sections). So, we generate a random name when we need to enforce
+ // read-only.
+ uint64_t rand_values[4];
+ RandBytes(&rand_values, sizeof(rand_values));
+ name = ASCIIToUTF16(StringPrintf("CrSharedMem_%016llx%016llx%016llx%016llx",
+ rand_values[0], rand_values[1],
+ rand_values[2], rand_values[3]));
+ DCHECK(!name.empty());
}
shm_ = SharedMemoryHandle(
CreateFileMappingWithReducedPermissions(
- &sa, rounded_size, name_.empty() ? nullptr : as_wcstr(name_)),
+ &sa, rounded_size, name.empty() ? nullptr : as_wcstr(name)),
rounded_size, UnguessableToken::Create());
if (!shm_.IsValid()) {
// The error is logged within CreateFileMappingWithReducedPermissions().
@@ -244,59 +232,18 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
requested_size_ = options.size;
- // Check if the shared memory pre-exists.
+ // If the shared memory already exists, something has gone wrong.
if (GetLastError() == ERROR_ALREADY_EXISTS) {
- // If the file already existed, set requested_size_ to 0 to show that
- // we don't know the size.
- requested_size_ = 0;
- external_section_ = true;
- if (!options.open_existing_deprecated) {
- Close();
- // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
- LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
- return false;
- }
+ Close();
+ // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
+ LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
+ return false;
}
LogError(SUCCESS, ERROR_SUCCESS);
return true;
}
-bool SharedMemory::Delete(const std::string& name) {
- // intentionally empty -- there is nothing for us to do on Windows.
- return true;
-}
-
-bool SharedMemory::Open(const std::string& name, bool read_only) {
- DCHECK(!shm_.IsValid());
- DWORD access = FILE_MAP_READ | SECTION_QUERY;
- if (!read_only)
- access |= FILE_MAP_WRITE;
- name_ = ASCIIToUTF16(name);
- read_only_ = read_only;
-
- // This form of sharing shared memory is deprecated. https://crbug.com/345734.
- // However, we can't get rid of it without a significant refactor because its
- // used to communicate between two versions of the same service process, very
- // early in the life cycle.
- // Technically, we should also pass the GUID from the original shared memory
- // region. We don't do that - this means that we will overcount this memory,
- // which thankfully isn't relevant since Chrome only communicates with a
- // single version of the service process.
- // We pass the size |0|, which is a dummy size and wrong, but otherwise
- // harmless.
- shm_ = SharedMemoryHandle(
- OpenFileMapping(access, false, name_.empty() ? nullptr : as_wcstr(name_)),
- 0u, UnguessableToken::Create());
- if (!shm_.IsValid())
- return false;
- // If a name specified assume it's an external section.
- if (!name_.empty())
- external_section_ = true;
- // Note: size_ is not set in this case.
- return true;
-}
-
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
if (!shm_.IsValid()) {
DLOG(ERROR) << "Invalid SharedMemoryHandle.";
diff --git a/chromium/base/memory/weak_ptr.cc b/chromium/base/memory/weak_ptr.cc
index 64fd4993b47..0efcc44e5df 100644
--- a/chromium/base/memory/weak_ptr.cc
+++ b/chromium/base/memory/weak_ptr.cc
@@ -46,7 +46,7 @@ WeakReference::WeakReference(const scoped_refptr<Flag>& flag) : flag_(flag) {}
WeakReference::~WeakReference() = default;
-WeakReference::WeakReference(WeakReference&& other) = default;
+WeakReference::WeakReference(WeakReference&& other) noexcept = default;
WeakReference::WeakReference(const WeakReference& other) = default;
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index af2bd3851a8..ccd22fd13dc 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -16,14 +16,13 @@
//
// class Controller {
// public:
-// Controller() : weak_factory_(this) {}
// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
// void WorkComplete(const Result& result) { ... }
// private:
// // Member variables should appear before the WeakPtrFactory, to ensure
// // that any WeakPtrs to Controller are invalidated before its members
// // variable's destructors are executed, rendering them invalid.
-// WeakPtrFactory<Controller> weak_factory_;
+// WeakPtrFactory<Controller> weak_factory_{this};
// };
//
// class Worker {
@@ -117,9 +116,9 @@ class BASE_EXPORT WeakReference {
explicit WeakReference(const scoped_refptr<Flag>& flag);
~WeakReference();
- WeakReference(WeakReference&& other);
+ WeakReference(WeakReference&& other) noexcept;
WeakReference(const WeakReference& other);
- WeakReference& operator=(WeakReference&& other) = default;
+ WeakReference& operator=(WeakReference&& other) noexcept = default;
WeakReference& operator=(const WeakReference& other) = default;
bool IsValid() const;
@@ -154,9 +153,9 @@ class BASE_EXPORT WeakPtrBase {
~WeakPtrBase();
WeakPtrBase(const WeakPtrBase& other) = default;
- WeakPtrBase(WeakPtrBase&& other) = default;
+ WeakPtrBase(WeakPtrBase&& other) noexcept = default;
WeakPtrBase& operator=(const WeakPtrBase& other) = default;
- WeakPtrBase& operator=(WeakPtrBase&& other) = default;
+ WeakPtrBase& operator=(WeakPtrBase&& other) noexcept = default;
void reset() {
ref_ = internal::WeakReference();
@@ -237,7 +236,7 @@ class WeakPtr : public internal::WeakPtrBase {
ptr_ = reinterpret_cast<uintptr_t>(t);
}
template <typename U>
- WeakPtr(WeakPtr<U>&& other) : WeakPtrBase(std::move(other)) {
+ WeakPtr(WeakPtr<U>&& other) noexcept : WeakPtrBase(std::move(other)) {
// Need to cast from U* to T* to do pointer adjustment in case of multiple
// inheritance. This also enforces the "U is a T" rule.
T* t = reinterpret_cast<U*>(other.ptr_);
diff --git a/chromium/base/memory/weak_ptr_unittest.cc b/chromium/base/memory/weak_ptr_unittest.cc
index a4629df9ba4..d6ab286644d 100644
--- a/chromium/base/memory/weak_ptr_unittest.cc
+++ b/chromium/base/memory/weak_ptr_unittest.cc
@@ -79,8 +79,8 @@ struct Arrow {
WeakPtr<Target> target;
};
struct TargetWithFactory : public Target {
- TargetWithFactory() : factory(this) {}
- WeakPtrFactory<Target> factory;
+ TargetWithFactory() {}
+ WeakPtrFactory<Target> factory{this};
};
// Helper class to create and destroy weak pointer copies
diff --git a/chromium/base/message_loop/message_loop_current.cc b/chromium/base/message_loop/message_loop_current.cc
index 71a45d077a6..54e2d2b41db 100644
--- a/chromium/base/message_loop/message_loop_current.cc
+++ b/chromium/base/message_loop/message_loop_current.cc
@@ -51,11 +51,6 @@ void MessageLoopCurrent::RemoveDestructionObserver(
current_->RemoveDestructionObserver(destruction_observer);
}
-scoped_refptr<SingleThreadTaskRunner> MessageLoopCurrent::task_runner() const {
- DCHECK(current_->IsBoundToCurrentThread());
- return current_->GetTaskRunner();
-}
-
void MessageLoopCurrent::SetTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
DCHECK(current_->IsBoundToCurrentThread());
diff --git a/chromium/base/message_loop/message_loop_current.h b/chromium/base/message_loop/message_loop_current.h
index f259d89d429..08a1c5e3673 100644
--- a/chromium/base/message_loop/message_loop_current.h
+++ b/chromium/base/message_loop/message_loop_current.h
@@ -105,11 +105,6 @@ class BASE_EXPORT MessageLoopCurrent {
// DestructionObserver is receiving a notification callback.
void RemoveDestructionObserver(DestructionObserver* destruction_observer);
- // Forwards to MessageLoop::task_runner().
- // DEPRECATED(https://crbug.com/616447): Use ThreadTaskRunnerHandle::Get()
- // instead of MessageLoopCurrent::Get()->task_runner().
- scoped_refptr<SingleThreadTaskRunner> task_runner() const;
-
// Forwards to MessageLoop::SetTaskRunner().
// DEPRECATED(https://crbug.com/825327): only owners of the MessageLoop
// instance should replace its TaskRunner.
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index edc72dc0782..3ada124892f 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -267,89 +267,6 @@ void PostNTasks(int posts_remaining) {
class MessageLoopTest : public ::testing::Test {};
-#if defined(OS_ANDROID)
-void DoNotRun() {
- ASSERT_TRUE(false);
-}
-
-void RunTest_AbortDontRunMoreTasks(bool delayed, bool init_java_first) {
- WaitableEvent test_done_event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- std::unique_ptr<android::JavaHandlerThread> java_thread;
- if (init_java_first) {
- java_thread = android::JavaHandlerThreadHelpers::CreateJavaFirst();
- } else {
- java_thread = std::make_unique<android::JavaHandlerThread>(
- "JavaHandlerThreadForTesting from AbortDontRunMoreTasks");
- }
- java_thread->Start();
- java_thread->ListenForUncaughtExceptionsForTesting();
-
- auto target =
- BindOnce(&android::JavaHandlerThreadHelpers::ThrowExceptionAndAbort,
- &test_done_event);
- if (delayed) {
- java_thread->message_loop()->task_runner()->PostDelayedTask(
- FROM_HERE, std::move(target), TimeDelta::FromMilliseconds(10));
- } else {
- java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
- std::move(target));
- java_thread->message_loop()->task_runner()->PostTask(FROM_HERE,
- BindOnce(&DoNotRun));
- }
- test_done_event.Wait();
- java_thread->Stop();
- android::ScopedJavaLocalRef<jthrowable> exception =
- java_thread->GetUncaughtExceptionIfAny();
- ASSERT_TRUE(
- android::JavaHandlerThreadHelpers::IsExceptionTestException(exception));
-}
-
-TEST_F(MessageLoopTest, JavaExceptionAbort) {
- constexpr bool delayed = false;
- constexpr bool init_java_first = false;
- RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
-}
-TEST_F(MessageLoopTest, DelayedJavaExceptionAbort) {
- constexpr bool delayed = true;
- constexpr bool init_java_first = false;
- RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
-}
-TEST_F(MessageLoopTest, JavaExceptionAbortInitJavaFirst) {
- constexpr bool delayed = false;
- constexpr bool init_java_first = true;
- RunTest_AbortDontRunMoreTasks(delayed, init_java_first);
-}
-
-TEST_F(MessageLoopTest, RunTasksWhileShuttingDownJavaThread) {
- const int kNumPosts = 6;
- DummyTaskObserver observer(kNumPosts, 1);
-
- auto java_thread = std::make_unique<android::JavaHandlerThread>("test");
- java_thread->Start();
-
- java_thread->message_loop()->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(
- [](android::JavaHandlerThread* java_thread,
- DummyTaskObserver* observer, int num_posts) {
- java_thread->message_loop()->AddTaskObserver(observer);
- ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, BindOnce([]() { ADD_FAILURE(); }),
- TimeDelta::FromDays(1));
- java_thread->StopMessageLoopForTesting();
- PostNTasks(num_posts);
- },
- Unretained(java_thread.get()), Unretained(&observer), kNumPosts));
-
- java_thread->JoinForTesting();
- java_thread.reset();
-
- EXPECT_EQ(kNumPosts, observer.num_tasks_started());
- EXPECT_EQ(kNumPosts, observer.num_tasks_processed());
-}
-#endif // defined(OS_ANDROID)
-
#if defined(OS_WIN)
void SubPumpFunc(OnceClosure on_done) {
@@ -602,13 +519,19 @@ class MessageLoopTypedTest
case MessageLoop::TYPE_UI:
return "UI_pump";
case MessageLoop::TYPE_CUSTOM:
+ break;
#if defined(OS_ANDROID)
case MessageLoop::TYPE_JAVA:
+ break;
#endif // defined(OS_ANDROID)
#if defined(OS_MACOSX)
case MessagePump::Type::NS_RUNLOOP:
+ break;
#endif // defined(OS_MACOSX)
+#if defined(OS_WIN)
+ case MessagePump::Type::UI_WITH_WM_QUIT_SUPPORT:
break;
+#endif // defined(OS_WIN)
}
NOTREACHED();
return "";
diff --git a/chromium/base/message_loop/message_pump.cc b/chromium/base/message_loop/message_pump.cc
index d39afe9821b..a3e119e5e56 100644
--- a/chromium/base/message_loop/message_pump.cc
+++ b/chromium/base/message_loop/message_pump.cc
@@ -68,6 +68,14 @@ std::unique_ptr<MessagePump> MessagePump::Create(Type type) {
return std::make_unique<MessagePumpNSRunLoop>();
#endif
+#if defined(OS_WIN)
+ case Type::UI_WITH_WM_QUIT_SUPPORT: {
+ auto pump = std::make_unique<MessagePumpForUI>();
+ pump->EnableWmQuit();
+ return pump;
+ }
+#endif // defined(OS_WIN)
+
case Type::CUSTOM:
NOTREACHED();
return nullptr;
diff --git a/chromium/base/message_loop/message_pump.h b/chromium/base/message_loop/message_pump.h
index c6ebde1d255..082c75d94f7 100644
--- a/chromium/base/message_loop/message_pump.h
+++ b/chromium/base/message_loop/message_pump.h
@@ -41,6 +41,9 @@ class BASE_EXPORT MessagePump {
// This type of pump is backed by a NSRunLoop. This is only for use on
// OSX and IOS.
//
+ // UI_WITH_WM_QUIT_SUPPORT
+ // This type of pump supports WM_QUIT messages in addition to other native
+ // UI events. This is only for use on windows.
enum class Type {
DEFAULT,
UI,
@@ -52,6 +55,9 @@ class BASE_EXPORT MessagePump {
#if defined(OS_MACOSX)
NS_RUNLOOP,
#endif // defined(OS_MACOSX)
+#if defined(OS_WIN)
+ UI_WITH_WM_QUIT_SUPPORT,
+#endif // defined(OS_WIN)
};
using MessagePumpFactory = std::unique_ptr<MessagePump>();
diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc
index 25b255dd497..948785d7d04 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.cc
+++ b/chromium/base/message_loop/message_pump_fuchsia.cc
@@ -92,14 +92,6 @@ void MessagePumpFuchsia::ZxHandleWatchController::HandleSignal(
controller->handler = nullptr;
- // |signal| can include other spurious things, in particular, that an fd
- // is writable, when we only asked to know when it was readable. In that
- // case, we don't want to call both the CanWrite and CanRead callback,
- // when the caller asked for only, for example, readable callbacks. So,
- // mask with the events that we actually wanted to know about.
- zx_signals_t signals = signal->trigger & signal->observed;
- DCHECK_NE(0u, signals);
-
// In the case of a persistent Watch, the Watch may be stopped and
// potentially deleted by the caller within the callback, in which case
// |controller| should not be accessed again, and we mustn't continue the
@@ -108,7 +100,7 @@ void MessagePumpFuchsia::ZxHandleWatchController::HandleSignal(
bool was_stopped = false;
controller->was_stopped_ = &was_stopped;
- controller->watcher_->OnZxHandleSignalled(wait->object, signals);
+ controller->watcher_->OnZxHandleSignalled(wait->object, signal->observed);
if (was_stopped)
return;
@@ -125,6 +117,14 @@ void MessagePumpFuchsia::FdWatchController::OnZxHandleSignalled(
uint32_t events;
fdio_unsafe_wait_end(io_, signals, &events);
+ // |events| can include other spurious things, in particular, that an fd
+ // is writable, when we only asked to know when it was readable. In that
+ // case, we don't want to call both the CanWrite and CanRead callback,
+ // when the caller asked for only, for example, readable callbacks. So,
+ // mask with the events that we actually wanted to know about.
+ events &= desired_events_;
+ DCHECK_NE(0u, events);
+
// Each |watcher_| callback we invoke may stop or delete |this|. The pump has
// set |was_stopped_| to point to a safe location on the calling stack, so we
// can use that to detect being stopped mid-callback and avoid doing further
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index c3e85f68fb8..271e9035ddc 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -28,6 +28,20 @@
#endif
namespace base {
+namespace {
+
+#if defined(OS_ANDROID)
+class JavaHandlerThreadForTest : public android::JavaHandlerThread {
+ public:
+ explicit JavaHandlerThreadForTest(const char* name)
+ : android::JavaHandlerThread(name, base::ThreadPriority::NORMAL) {}
+
+ using android::JavaHandlerThread::task_environment;
+ using android::JavaHandlerThread::TaskEnvironment;
+};
+#endif
+
+} // namespace
class ScheduleWorkTest : public testing::Test {
public:
@@ -75,7 +89,7 @@ class ScheduleWorkTest : public testing::Test {
void ScheduleWork(MessageLoop::Type target_type, int num_scheduling_threads) {
#if defined(OS_ANDROID)
if (target_type == MessageLoop::TYPE_JAVA) {
- java_thread_.reset(new android::JavaHandlerThread("target"));
+ java_thread_.reset(new JavaHandlerThreadForTest("target"));
java_thread_->Start();
} else
#endif
@@ -179,8 +193,10 @@ class ScheduleWorkTest : public testing::Test {
sequence_manager::internal::SequenceManagerImpl* target_message_loop_base() {
#if defined(OS_ANDROID)
- if (java_thread_)
- return java_thread_->message_loop()->GetSequenceManagerImpl();
+ if (java_thread_) {
+ return static_cast<sequence_manager::internal::SequenceManagerImpl*>(
+ java_thread_->task_environment()->sequence_manager.get());
+ }
#endif
return MessageLoopCurrent::Get()->GetCurrentSequenceManagerImpl();
}
@@ -189,7 +205,7 @@ class ScheduleWorkTest : public testing::Test {
std::unique_ptr<Thread> target_;
MessageLoop* message_loop_;
#if defined(OS_ANDROID)
- std::unique_ptr<android::JavaHandlerThread> java_thread_;
+ std::unique_ptr<JavaHandlerThreadForTest> java_thread_;
#endif
std::unique_ptr<base::TimeDelta[]> scheduling_times_;
std::unique_ptr<base::TimeDelta[]> scheduling_thread_times_;
diff --git a/chromium/base/message_loop/message_pump_unittest.cc b/chromium/base/message_loop/message_pump_unittest.cc
index 2306f75c999..e7397cd6fba 100644
--- a/chromium/base/message_loop/message_pump_unittest.cc
+++ b/chromium/base/message_loop/message_pump_unittest.cc
@@ -6,10 +6,14 @@
#include <type_traits>
+#include "base/bind.h"
#include "base/message_loop/message_loop.h"
#include "base/message_loop/message_pump_for_io.h"
#include "base/message_loop/message_pump_for_ui.h"
+#include "base/run_loop.h"
+#include "base/task/single_thread_task_executor.h"
#include "base/test/bind_test_util.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/thread.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -76,6 +80,9 @@ bool PumpTypeUsesDoSomeWork(MessageLoop::Type type) {
#if defined(OS_MACOSX)
case MessagePump::Type::NS_RUNLOOP:
#endif // defined(OS_MACOSX)
+#if defined(OS_WIN)
+ case MessagePump::Type::UI_WITH_WM_QUIT_SUPPORT:
+#endif // defined(OS_WIN)
// Not tested in this file.
NOTREACHED();
return false;
@@ -354,4 +361,31 @@ INSTANTIATE_TEST_SUITE_P(,
MessageLoop::TYPE_UI,
MessageLoop::TYPE_IO));
+#if defined(OS_WIN)
+
+TEST(MessagePumpTestWin, WmQuitIsNotIgnoredWithEnableWmQuit) {
+ SingleThreadTaskExecutor task_executor(
+ MessagePump::Type::UI_WITH_WM_QUIT_SUPPORT);
+
+ // Post a WM_QUIT message to the current thread.
+ ::PostQuitMessage(0);
+
+ // Post a task to the current thread, with a small delay to make it less
+ // likely that we process the posted task before looking for WM_* messages.
+ RunLoop run_loop;
+ task_executor.task_runner()->PostDelayedTask(FROM_HERE,
+ BindOnce(
+ [](OnceClosure closure) {
+ ADD_FAILURE();
+ std::move(closure).Run();
+ },
+ run_loop.QuitClosure()),
+ TestTimeouts::tiny_timeout());
+
+ // Run the loop. It should not result in ADD_FAILURE() getting called.
+ run_loop.Run();
+}
+
+#endif // defined(OS_WIN)
+
} // namespace base
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 94e663f7a77..09eb38fb374 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -179,17 +179,15 @@ void OnOutOfMemory(size_t size) {
#if !defined(OS_NACL)
// Returns whether the operation succeeded.
-bool DeserializeGUIDFromStringPieces(base::StringPiece first,
- base::StringPiece second,
- base::UnguessableToken* guid) {
+bool DeserializeGUIDFromStringPieces(StringPiece first,
+ StringPiece second,
+ UnguessableToken* guid) {
uint64_t high = 0;
uint64_t low = 0;
- if (!base::StringToUint64(first, &high) ||
- !base::StringToUint64(second, &low)) {
+ if (!StringToUint64(first, &high) || !StringToUint64(second, &low))
return false;
- }
- *guid = base::UnguessableToken::Deserialize(high, low);
+ *guid = UnguessableToken::Deserialize(high, low);
return true;
}
#endif // !defined(OS_NACL)
@@ -456,6 +454,9 @@ FieldTrialList::~FieldTrialList() {
it->second->Release();
registered_.erase(it->first);
}
+ // Note: If this DCHECK fires in a test that uses ScopedFeatureList, it is
+ // likely caused by nested ScopedFeatureLists being destroyed in a different
+ // order than they are initialized.
DCHECK_EQ(this, global_);
global_ = nullptr;
}
@@ -688,7 +689,7 @@ void FieldTrialList::GetActiveFieldTrialGroupsFromString(
// static
void FieldTrialList::GetInitiallyActiveFieldTrials(
- const base::CommandLine& command_line,
+ const CommandLine& command_line,
FieldTrial::ActiveGroups* active_groups) {
DCHECK(global_);
DCHECK(global_->create_trials_from_command_line_called_);
@@ -733,7 +734,7 @@ bool FieldTrialList::CreateTrialsFromString(
const std::string trial_name = entry.trial_name.as_string();
const std::string group_name = entry.group_name.as_string();
- if (ContainsKey(ignored_trial_names, trial_name)) {
+ if (Contains(ignored_trial_names, trial_name)) {
// This is to warn that the field trial forced through command-line
// input is unforcable.
// Use --enable-logging or --enable-logging=stderr to see this warning.
@@ -796,7 +797,7 @@ void FieldTrialList::CreateTrialsFromCommandLine(
// static
void FieldTrialList::CreateFeaturesFromCommandLine(
- const base::CommandLine& command_line,
+ const CommandLine& command_line,
const char* enable_features_switch,
const char* disable_features_switch,
FeatureList* feature_list) {
@@ -854,10 +855,10 @@ int FieldTrialList::GetFieldTrialDescriptor() {
#endif
// static
-base::ReadOnlySharedMemoryRegion
+ReadOnlySharedMemoryRegion
FieldTrialList::DuplicateFieldTrialSharedMemoryForTesting() {
if (!global_)
- return base::ReadOnlySharedMemoryRegion();
+ return ReadOnlySharedMemoryRegion();
return global_->readonly_allocator_region_.Duplicate();
}
@@ -987,8 +988,7 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
// Recording for stability debugging has to be done inline as a task posted
// to an observer may not get executed before a crash.
- base::debug::GlobalActivityTracker* tracker =
- base::debug::GlobalActivityTracker::Get();
+ debug::GlobalActivityTracker* tracker = debug::GlobalActivityTracker::Get();
if (tracker) {
tracker->RecordFieldTrial(field_trial->trial_name(),
field_trial->group_name_internal());
@@ -1140,13 +1140,25 @@ FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
}
// static
-bool FieldTrialList::IsGlobalSetForTesting() {
- return global_ != nullptr;
+FieldTrialList* FieldTrialList::GetInstance() {
+ return global_;
+}
+
+// static
+FieldTrialList* FieldTrialList::BackupInstanceForTesting() {
+ FieldTrialList* instance = global_;
+ global_ = nullptr;
+ return instance;
+}
+
+// static
+void FieldTrialList::RestoreInstanceForTesting(FieldTrialList* instance) {
+ global_ = instance;
}
// static
std::string FieldTrialList::SerializeSharedMemoryRegionMetadata(
- const base::ReadOnlySharedMemoryRegion& shm) {
+ const ReadOnlySharedMemoryRegion& shm) {
std::stringstream ss;
#if defined(OS_WIN)
// Tell the child process the name of the inherited HANDLE.
@@ -1163,7 +1175,7 @@ std::string FieldTrialList::SerializeSharedMemoryRegionMetadata(
#error Unsupported OS
#endif
- base::UnguessableToken guid = shm.GetGUID();
+ UnguessableToken guid = shm.GetGUID();
ss << guid.GetHighForSerialization() << "," << guid.GetLowForSerialization();
ss << "," << shm.GetSize();
return ss.str();
@@ -1173,27 +1185,27 @@ std::string FieldTrialList::SerializeSharedMemoryRegionMetadata(
(defined(OS_MACOSX) && !defined(OS_IOS))
// static
-base::ReadOnlySharedMemoryRegion
+ReadOnlySharedMemoryRegion
FieldTrialList::DeserializeSharedMemoryRegionMetadata(
const std::string& switch_value) {
- std::vector<base::StringPiece> tokens = base::SplitStringPiece(
- switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ std::vector<StringPiece> tokens =
+ SplitStringPiece(switch_value, ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
if (tokens.size() != 4)
- return base::ReadOnlySharedMemoryRegion();
+ return ReadOnlySharedMemoryRegion();
int field_trial_handle = 0;
- if (!base::StringToInt(tokens[0], &field_trial_handle))
- return base::ReadOnlySharedMemoryRegion();
+ if (!StringToInt(tokens[0], &field_trial_handle))
+ return ReadOnlySharedMemoryRegion();
#if defined(OS_FUCHSIA)
zx_handle_t handle = static_cast<zx_handle_t>(field_trial_handle);
zx::vmo scoped_handle = zx::vmo(handle);
#elif defined(OS_WIN)
HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
- if (base::IsCurrentProcessElevated()) {
- // base::LaunchElevatedProcess doesn't have a way to duplicate the handle,
+ if (IsCurrentProcessElevated()) {
+ // LaunchElevatedProcess doesn't have a way to duplicate the handle,
// but this process can since by definition it's not sandboxed.
- base::ProcessId parent_pid = base::GetParentProcessId(GetCurrentProcess());
+ ProcessId parent_pid = GetParentProcessId(GetCurrentProcess());
HANDLE parent_handle = OpenProcess(PROCESS_ALL_ACCESS, FALSE, parent_pid);
// TODO(https://crbug.com/916461): Duplicating the handle is known to fail
// with ERROR_ACCESS_DENIED when the parent process is being torn down. This
@@ -1205,55 +1217,54 @@ FieldTrialList::DeserializeSharedMemoryRegionMetadata(
win::ScopedHandle scoped_handle(handle);
#elif defined(OS_MACOSX) && !defined(OS_IOS)
auto* rendezvous = MachPortRendezvousClient::GetInstance();
+ if (!rendezvous)
+ return ReadOnlySharedMemoryRegion();
mac::ScopedMachSendRight scoped_handle =
rendezvous->TakeSendRight(field_trial_handle);
if (!scoped_handle.is_valid())
- return base::ReadOnlySharedMemoryRegion();
+ return ReadOnlySharedMemoryRegion();
#endif
- base::UnguessableToken guid;
+ UnguessableToken guid;
if (!DeserializeGUIDFromStringPieces(tokens[1], tokens[2], &guid))
- return base::ReadOnlySharedMemoryRegion();
+ return ReadOnlySharedMemoryRegion();
int size;
- if (!base::StringToInt(tokens[3], &size))
- return base::ReadOnlySharedMemoryRegion();
+ if (!StringToInt(tokens[3], &size))
+ return ReadOnlySharedMemoryRegion();
- auto platform_handle = base::subtle::PlatformSharedMemoryRegion::Take(
+ auto platform_handle = subtle::PlatformSharedMemoryRegion::Take(
std::move(scoped_handle),
- base::subtle::PlatformSharedMemoryRegion::Mode::kReadOnly,
+ subtle::PlatformSharedMemoryRegion::Mode::kReadOnly,
static_cast<size_t>(size), guid);
- return base::ReadOnlySharedMemoryRegion::Deserialize(
- std::move(platform_handle));
+ return ReadOnlySharedMemoryRegion::Deserialize(std::move(platform_handle));
}
#elif defined(OS_POSIX) && !defined(OS_NACL)
// static
-base::ReadOnlySharedMemoryRegion
+ReadOnlySharedMemoryRegion
FieldTrialList::DeserializeSharedMemoryRegionMetadata(
int fd,
const std::string& switch_value) {
- std::vector<base::StringPiece> tokens = base::SplitStringPiece(
- switch_value, ",", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
+ std::vector<StringPiece> tokens =
+ SplitStringPiece(switch_value, ",", KEEP_WHITESPACE, SPLIT_WANT_ALL);
if (tokens.size() != 3)
return ReadOnlySharedMemoryRegion();
- base::UnguessableToken guid;
+ UnguessableToken guid;
if (!DeserializeGUIDFromStringPieces(tokens[0], tokens[1], &guid))
return ReadOnlySharedMemoryRegion();
int size;
- if (!base::StringToInt(tokens[2], &size))
+ if (!StringToInt(tokens[2], &size))
return ReadOnlySharedMemoryRegion();
- auto platform_region = base::subtle::PlatformSharedMemoryRegion::Take(
- base::ScopedFD(fd),
- base::subtle::PlatformSharedMemoryRegion::Mode::kReadOnly,
+ auto platform_region = subtle::PlatformSharedMemoryRegion::Take(
+ ScopedFD(fd), subtle::PlatformSharedMemoryRegion::Mode::kReadOnly,
static_cast<size_t>(size), guid);
- return base::ReadOnlySharedMemoryRegion::Deserialize(
- std::move(platform_region));
+ return ReadOnlySharedMemoryRegion::Deserialize(std::move(platform_region));
}
#endif
@@ -1263,7 +1274,7 @@ FieldTrialList::DeserializeSharedMemoryRegionMetadata(
// static
bool FieldTrialList::CreateTrialsFromSwitchValue(
const std::string& switch_value) {
- base::ReadOnlySharedMemoryRegion shm =
+ ReadOnlySharedMemoryRegion shm =
DeserializeSharedMemoryRegionMetadata(switch_value);
if (!shm.IsValid())
return false;
@@ -1281,7 +1292,7 @@ bool FieldTrialList::CreateTrialsFromDescriptor(
if (fd == -1)
return false;
- base::ReadOnlySharedMemoryRegion shm =
+ ReadOnlySharedMemoryRegion shm =
DeserializeSharedMemoryRegionMetadata(fd, switch_value);
if (!shm.IsValid())
return false;
@@ -1294,8 +1305,8 @@ bool FieldTrialList::CreateTrialsFromDescriptor(
// static
bool FieldTrialList::CreateTrialsFromSharedMemoryRegion(
- const base::ReadOnlySharedMemoryRegion& shm_region) {
- base::ReadOnlySharedMemoryMapping shm_mapping =
+ const ReadOnlySharedMemoryRegion& shm_region) {
+ ReadOnlySharedMemoryMapping shm_mapping =
shm_region.MapAt(0, kFieldTrialAllocationSize);
if (!shm_mapping.IsValid())
OnOutOfMemory(kFieldTrialAllocationSize);
@@ -1306,7 +1317,7 @@ bool FieldTrialList::CreateTrialsFromSharedMemoryRegion(
// static
bool FieldTrialList::CreateTrialsFromSharedMemoryMapping(
- base::ReadOnlySharedMemoryMapping shm_mapping) {
+ ReadOnlySharedMemoryMapping shm_mapping) {
global_->field_trial_allocator_ =
std::make_unique<ReadOnlySharedPersistentMemoryAllocator>(
std::move(shm_mapping), 0, kAllocatorName);
@@ -1347,8 +1358,8 @@ void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
if (global_->field_trial_allocator_ != nullptr)
return;
- base::MappedReadOnlyRegion shm =
- base::ReadOnlySharedMemoryRegion::Create(kFieldTrialAllocationSize);
+ MappedReadOnlyRegion shm =
+ ReadOnlySharedMemoryRegion::Create(kFieldTrialAllocationSize);
if (!shm.IsValid())
OnOutOfMemory(kFieldTrialAllocationSize);
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index 5834115c4cd..ae6068a3d34 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -534,7 +534,7 @@ class BASE_EXPORT FieldTrialList {
// holding field trial information.
// Must be called only after a call to CreateTrialsFromCommandLine().
static void GetInitiallyActiveFieldTrials(
- const base::CommandLine& command_line,
+ const CommandLine& command_line,
FieldTrial::ActiveGroups* active_groups);
// Use a state string (re: StatesToString()) to augment the current list of
@@ -558,24 +558,22 @@ class BASE_EXPORT FieldTrialList {
// contain the shared memory handle that contains the field trial allocator.
// We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
// in since base/ can't depend on content/.
- static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
+ static void CreateTrialsFromCommandLine(const CommandLine& cmd_line,
const char* field_trial_handle_switch,
int fd_key);
// Creates base::Feature overrides from the command line by first trying to
// use shared memory and then falling back to the command line if it fails.
- static void CreateFeaturesFromCommandLine(
- const base::CommandLine& command_line,
- const char* enable_features_switch,
- const char* disable_features_switch,
- FeatureList* feature_list);
+ static void CreateFeaturesFromCommandLine(const CommandLine& command_line,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ FeatureList* feature_list);
#if defined(OS_WIN)
// On Windows, we need to explicitly pass down any handles to be inherited.
// This function adds the shared memory handle to field trial state to the
// list of handles to be inherited.
- static void AppendFieldTrialHandleIfNeeded(
- base::HandlesToInheritVector* handles);
+ static void AppendFieldTrialHandleIfNeeded(HandlesToInheritVector* handles);
#elif defined(OS_FUCHSIA)
// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
#elif defined(OS_MACOSX) && !defined(OS_IOS)
@@ -590,8 +588,7 @@ class BASE_EXPORT FieldTrialList {
// descriptor.
static int GetFieldTrialDescriptor();
#endif
- static base::ReadOnlySharedMemoryRegion
- DuplicateFieldTrialSharedMemoryForTesting();
+ static ReadOnlySharedMemoryRegion DuplicateFieldTrialSharedMemoryForTesting();
// Adds a switch to the command line containing the field trial state as a
// string (if not using shared memory to share field trial state), or the
@@ -601,7 +598,7 @@ class BASE_EXPORT FieldTrialList {
static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
const char* enable_features_switch,
const char* disable_features_switch,
- base::CommandLine* cmd_line);
+ CommandLine* cmd_line);
// Create a FieldTrial with the given |name| and using 100% probability for
// the FieldTrial, force FieldTrial to have the same group string as
@@ -667,8 +664,16 @@ class BASE_EXPORT FieldTrialList {
GetAllFieldTrialsFromPersistentAllocator(
PersistentMemoryAllocator const& allocator);
- // Returns true if a global field trial list is set. Only used for testing.
- static bool IsGlobalSetForTesting();
+ // Returns a pointer to the global instance. This is exposed so that it can
+ // be used in a DCHECK in FeatureList and ScopedFeatureList test-only logic
+ // and is not intended to be used widely beyond those cases.
+ static FieldTrialList* GetInstance();
+
+ // For testing, sets the global instance to null and returns the previous one.
+ static FieldTrialList* BackupInstanceForTesting();
+
+ // For testing, sets the global instance to |instance|.
+ static void RestoreInstanceForTesting(FieldTrialList* instance);
private:
// Allow tests to access our innards for testing purposes.
@@ -688,13 +693,13 @@ class BASE_EXPORT FieldTrialList {
// a GUID. Serialization and deserialization doesn't actually transport the
// underlying OS resource - that must be done by the Process launcher.
static std::string SerializeSharedMemoryRegionMetadata(
- const base::ReadOnlySharedMemoryRegion& shm);
+ const ReadOnlySharedMemoryRegion& shm);
#if defined(OS_WIN) || defined(OS_FUCHSIA) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
- static base::ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
+ static ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
const std::string& switch_value);
#elif defined(OS_POSIX) && !defined(OS_NACL)
- static base::ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
+ static ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
int fd,
const std::string& switch_value);
#endif
@@ -719,7 +724,7 @@ class BASE_EXPORT FieldTrialList {
// and creates field trials via CreateTrialsFromSharedMemoryMapping(). Returns
// true if successful and false otherwise.
static bool CreateTrialsFromSharedMemoryRegion(
- const base::ReadOnlySharedMemoryRegion& shm_region);
+ const ReadOnlySharedMemoryRegion& shm_region);
// Expects a mapped piece of shared memory |shm_mapping| that was created from
// the browser process's field_trial_allocator and shared via the command
@@ -727,7 +732,7 @@ class BASE_EXPORT FieldTrialList {
// trials in it, and creates them via CreateFieldTrial(). Returns true if
// successful and false otherwise.
static bool CreateTrialsFromSharedMemoryMapping(
- base::ReadOnlySharedMemoryMapping shm_mapping);
+ ReadOnlySharedMemoryMapping shm_mapping);
// Instantiate the field trial allocator, add all existing field trials to it,
// and duplicates its handle to a read-only handle, which gets stored in
@@ -794,7 +799,7 @@ class BASE_EXPORT FieldTrialList {
// Readonly copy of the region to the allocator. Needs to be a member variable
// because it's needed from both CopyFieldTrialStateToFlags() and
// AppendFieldTrialHandleIfNeeded().
- base::ReadOnlySharedMemoryRegion readonly_allocator_region_;
+ ReadOnlySharedMemoryRegion readonly_allocator_region_;
// Tracks whether CreateTrialsFromCommandLine() has been called.
bool create_trials_from_command_line_called_ = false;
diff --git a/chromium/base/metrics/field_trial_param_associator.cc b/chromium/base/metrics/field_trial_param_associator.cc
index af76eafaca4..6360b8cc467 100644
--- a/chromium/base/metrics/field_trial_param_associator.cc
+++ b/chromium/base/metrics/field_trial_param_associator.cc
@@ -26,7 +26,7 @@ bool FieldTrialParamAssociator::AssociateFieldTrialParams(
AutoLock scoped_lock(lock_);
const FieldTrialKey key(trial_name, group_name);
- if (ContainsKey(field_trial_params_, key))
+ if (Contains(field_trial_params_, key))
return false;
field_trial_params_[key] = params;
@@ -57,7 +57,7 @@ bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
AutoLock scoped_lock(lock_);
const FieldTrialKey key(trial_name, group_name);
- if (!ContainsKey(field_trial_params_, key))
+ if (!Contains(field_trial_params_, key))
return false;
*params = field_trial_params_[key];
diff --git a/chromium/base/metrics/field_trial_params.cc b/chromium/base/metrics/field_trial_params.cc
index 0680d502ccb..e8eb452ca21 100644
--- a/chromium/base/metrics/field_trial_params.cc
+++ b/chromium/base/metrics/field_trial_params.cc
@@ -4,32 +4,91 @@
#include "base/metrics/field_trial_params.h"
+#include <set>
+#include <utility>
+#include <vector>
+
#include "base/feature_list.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_param_associator.h"
#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_split.h"
+#include "base/strings/stringprintf.h"
namespace base {
bool AssociateFieldTrialParams(const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params) {
- return base::FieldTrialParamAssociator::GetInstance()
- ->AssociateFieldTrialParams(trial_name, group_name, params);
+ return FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+}
+
+bool AssociateFieldTrialParamsFromString(
+ const std::string& params_string,
+ FieldTrialParamsDecodeStringFunc decode_data_func) {
+ // Format: Trial1.Group1:k1/v1/k2/v2,Trial2.Group2:k1/v1/k2/v2
+ std::set<std::pair<std::string, std::string>> trial_groups;
+ for (StringPiece experiment_group :
+ SplitStringPiece(params_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL)) {
+ std::vector<StringPiece> experiment = SplitStringPiece(
+ experiment_group, ":", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ if (experiment.size() != 2) {
+ DLOG(ERROR) << "Experiment and params should be separated by ':'";
+ return false;
+ }
+
+ std::vector<std::string> group_parts =
+ SplitString(experiment[0], ".", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ if (group_parts.size() != 2) {
+ DLOG(ERROR) << "Trial and group name should be separated by '.'";
+ return false;
+ }
+
+ std::vector<std::string> key_values =
+ SplitString(experiment[1], "/", TRIM_WHITESPACE, SPLIT_WANT_ALL);
+ if (key_values.size() % 2 != 0) {
+ DLOG(ERROR) << "Param name and param value should be separated by '/'";
+ return false;
+ }
+ std::string trial = decode_data_func(group_parts[0]);
+ std::string group = decode_data_func(group_parts[1]);
+ auto trial_group = std::make_pair(trial, group);
+ if (trial_groups.find(trial_group) != trial_groups.end()) {
+ DLOG(ERROR) << StringPrintf(
+ "A (trial, group) pair listed more than once. (%s, %s)",
+ trial.c_str(), group.c_str());
+ return false;
+ }
+ trial_groups.insert(trial_group);
+ std::map<std::string, std::string> params;
+ for (size_t i = 0; i < key_values.size(); i += 2) {
+ std::string key = decode_data_func(key_values[i]);
+ std::string value = decode_data_func(key_values[i + 1]);
+ params[key] = value;
+ }
+ bool result = AssociateFieldTrialParams(trial, group, params);
+ if (!result) {
+ DLOG(ERROR) << "Failed to associate field trial params for group \""
+ << group << "\" in trial \"" << trial << "\"";
+ return false;
+ }
+ }
+ return true;
}
bool GetFieldTrialParams(const std::string& trial_name,
FieldTrialParams* params) {
- return base::FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
+ return FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
trial_name, params);
}
-bool GetFieldTrialParamsByFeature(const base::Feature& feature,
+bool GetFieldTrialParamsByFeature(const Feature& feature,
FieldTrialParams* params) {
- if (!base::FeatureList::IsEnabled(feature))
+ if (!FeatureList::IsEnabled(feature))
return false;
- base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+ FieldTrial* trial = FeatureList::GetFieldTrial(feature);
if (!trial)
return false;
@@ -47,25 +106,25 @@ std::string GetFieldTrialParamValue(const std::string& trial_name,
return std::string();
}
-std::string GetFieldTrialParamValueByFeature(const base::Feature& feature,
+std::string GetFieldTrialParamValueByFeature(const Feature& feature,
const std::string& param_name) {
- if (!base::FeatureList::IsEnabled(feature))
+ if (!FeatureList::IsEnabled(feature))
return std::string();
- base::FieldTrial* trial = base::FeatureList::GetFieldTrial(feature);
+ FieldTrial* trial = FeatureList::GetFieldTrial(feature);
if (!trial)
return std::string();
return GetFieldTrialParamValue(trial->trial_name(), param_name);
}
-int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
+int GetFieldTrialParamByFeatureAsInt(const Feature& feature,
const std::string& param_name,
int default_value) {
std::string value_as_string =
GetFieldTrialParamValueByFeature(feature, param_name);
int value_as_int = 0;
- if (!base::StringToInt(value_as_string, &value_as_int)) {
+ if (!StringToInt(value_as_string, &value_as_int)) {
if (!value_as_string.empty()) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string
@@ -78,13 +137,13 @@ int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
return value_as_int;
}
-double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
+double GetFieldTrialParamByFeatureAsDouble(const Feature& feature,
const std::string& param_name,
double default_value) {
std::string value_as_string =
GetFieldTrialParamValueByFeature(feature, param_name);
double value_as_double = 0;
- if (!base::StringToDouble(value_as_string, &value_as_double)) {
+ if (!StringToDouble(value_as_string, &value_as_double)) {
if (!value_as_string.empty()) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string
@@ -97,7 +156,7 @@ double GetFieldTrialParamByFeatureAsDouble(const base::Feature& feature,
return value_as_double;
}
-bool GetFieldTrialParamByFeatureAsBool(const base::Feature& feature,
+bool GetFieldTrialParamByFeatureAsBool(const Feature& feature,
const std::string& param_name,
bool default_value) {
std::string value_as_string =
@@ -134,7 +193,7 @@ bool FeatureParam<bool>::Get() const {
return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
}
-void LogInvalidEnumValue(const base::Feature& feature,
+void LogInvalidEnumValue(const Feature& feature,
const std::string& param_name,
const std::string& value_as_string,
int default_value_as_int) {
diff --git a/chromium/base/metrics/field_trial_params.h b/chromium/base/metrics/field_trial_params.h
index b2e838f3879..a9bd0c54c93 100644
--- a/chromium/base/metrics/field_trial_params.h
+++ b/chromium/base/metrics/field_trial_params.h
@@ -17,6 +17,9 @@ struct Feature;
// Key-value mapping type for field trial parameters.
typedef std::map<std::string, std::string> FieldTrialParams;
+// Param string decoding function for AssociateFieldTrialParamsFromString().
+typedef std::string (*FieldTrialParamsDecodeStringFunc)(const std::string& str);
+
// Associates the specified set of key-value |params| with the field trial
// specified by |trial_name| and |group_name|. Fails and returns false if the
// specified field trial already has params associated with it or the trial
@@ -25,6 +28,13 @@ BASE_EXPORT bool AssociateFieldTrialParams(const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params);
+// Provides a mechanism to associate multiple set of params to multiple groups
+// with a formatted string as returned by FieldTrialList::AllParamsToString().
+// |decode_data_func| allows specifying a custom decoding function.
+BASE_EXPORT bool AssociateFieldTrialParamsFromString(
+ const std::string& params_string,
+ FieldTrialParamsDecodeStringFunc decode_data_func);
+
// Retrieves the set of key-value |params| for the specified field trial, based
// on its selected group. If the field trial does not exist or its selected
// group does not have any parameters associated with it, returns false and
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index c542bcf7930..f925fbcced6 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -1212,10 +1212,8 @@ TEST(FieldTrialListTest, MAYBE_TestCopyFieldTrialStateToFlags) {
}
TEST(FieldTrialListTest, InstantiateAllocator) {
- test::ScopedFeatureList scoped_feature_list;
- scoped_feature_list.Init();
-
FieldTrialList field_trial_list(nullptr);
+
FieldTrialList::CreateFieldTrial("Trial1", "Group1");
FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
@@ -1240,7 +1238,6 @@ TEST(FieldTrialListTest, AddTrialsToAllocator) {
test::ScopedFeatureList scoped_feature_list;
scoped_feature_list.Init();
- FieldTrialList field_trial_list(nullptr);
FieldTrialList::CreateFieldTrial("Trial1", "Group1");
FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
FieldTrialList::AllStatesToString(&save_string, false);
@@ -1267,7 +1264,6 @@ TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
// Create a simulated trial and a real trial and call group() on them, which
// should only add the real trial to the field trial allocator.
- FieldTrialList field_trial_list(nullptr);
FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
// This shouldn't add to the allocator.
@@ -1304,7 +1300,6 @@ TEST(FieldTrialListTest, AssociateFieldTrialParams) {
std::string group_name("Group1");
// Create a field trial with some params.
- FieldTrialList field_trial_list(nullptr);
FieldTrialList::CreateFieldTrial(trial_name, group_name);
std::map<std::string, std::string> params;
params["key1"] = "value1";
@@ -1346,7 +1341,6 @@ TEST(FieldTrialListTest, MAYBE_ClearParamsFromSharedMemory) {
scoped_feature_list.Init();
// Create a field trial with some params.
- FieldTrialList field_trial_list(nullptr);
FieldTrial* trial =
FieldTrialList::CreateFieldTrial(trial_name, group_name);
std::map<std::string, std::string> params;
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index deaa36eb8f2..220578d16fd 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -11,13 +11,13 @@
#include <utility>
#include "base/json/json_string_value_serializer.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
+#include "base/no_destructor.h"
#include "base/numerics/safe_conversions.h"
#include "base/pickle.h"
#include "base/process/process_handle.h"
@@ -143,8 +143,8 @@ void HistogramBase::ValidateHistogramContents() const {}
void HistogramBase::WriteJSON(std::string* output,
JSONVerbosityLevel verbosity_level) const {
- Count count;
- int64_t sum;
+ Count count = 0;
+ int64_t sum = 0;
std::unique_ptr<ListValue> buckets(new ListValue());
GetCountAndBucketData(&count, &sum, buckets.get());
std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
@@ -204,11 +204,11 @@ char const* HistogramBase::GetPermanentName(const std::string& name) {
// A set of histogram names that provides the "permanent" lifetime required
// by histogram objects for those strings that are not already code constants
// or held in persistent memory.
- static LazyInstance<std::set<std::string>>::Leaky permanent_names;
- static LazyInstance<Lock>::Leaky permanent_names_lock;
+ static base::NoDestructor<std::set<std::string>> permanent_names;
+ static base::NoDestructor<Lock> permanent_names_lock;
- AutoLock lock(permanent_names_lock.Get());
- auto result = permanent_names.Get().insert(name);
+ AutoLock lock(*permanent_names_lock);
+ auto result = permanent_names->insert(name);
return result.first->c_str();
}
diff --git a/chromium/base/metrics/histogram_functions.cc b/chromium/base/metrics/histogram_functions.cc
index 687f235301d..cc2828ff8fc 100644
--- a/chromium/base/metrics/histogram_functions.cc
+++ b/chromium/base/metrics/histogram_functions.cc
@@ -170,7 +170,7 @@ void UmaHistogramCustomMicrosecondsTimes(const std::string& name,
TimeDelta min,
TimeDelta max,
int buckets) {
- HistogramBase* histogram = Histogram::FactoryTimeGet(
+ HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMicrosecondsGranularity(sample);
}
@@ -180,7 +180,7 @@ void UmaHistogramCustomMicrosecondsTimes(const char* name,
TimeDelta min,
TimeDelta max,
int buckets) {
- HistogramBase* histogram = Histogram::FactoryTimeGet(
+ HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMicrosecondsGranularity(sample);
}
@@ -197,6 +197,24 @@ void UmaHistogramMicrosecondsTimes(const char* name, TimeDelta sample) {
TimeDelta::FromSeconds(10), 50);
}
+// TODO(crbug.com/983261) Remove this method after moving to
+// UmaHistogramMicrosecondsTimes.
+void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(const std::string& name,
+ TimeDelta sample) {
+ UmaHistogramCustomMicrosecondsTimes(name, sample,
+ TimeDelta::FromMicroseconds(1),
+ TimeDelta::FromMilliseconds(10), 50);
+}
+
+// TODO(crbug.com/983261) Remove this method after moving to
+// UmaHistogramMicrosecondsTimes.
+void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(const char* name,
+ TimeDelta sample) {
+ UmaHistogramCustomMicrosecondsTimes(name, sample,
+ TimeDelta::FromMicroseconds(1),
+ TimeDelta::FromMilliseconds(10), 50);
+}
+
void UmaHistogramMemoryKB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
}
diff --git a/chromium/base/metrics/histogram_functions.h b/chromium/base/metrics/histogram_functions.h
index 16e1d7e0a81..0ecfae38733 100644
--- a/chromium/base/metrics/histogram_functions.h
+++ b/chromium/base/metrics/histogram_functions.h
@@ -46,38 +46,15 @@ BASE_EXPORT void UmaHistogramExactLinear(const char* name,
// Sample usage:
// // These values are persisted to logs. Entries should not be renumbered and
// // numeric values should never be reused.
-// enum class MyEnum {
-// FIRST_VALUE = 0,
-// SECOND_VALUE = 1,
-// ...
-// FINAL_VALUE = N,
-// COUNT
+// enum class NewTabPageAction {
+// kUseOmnibox = 0,
+// kClickTitle = 1,
+// // kUseSearchbox = 2, // no longer used, combined into omnibox
+// kOpenBookmark = 3,
+// kMaxValue = kOpenBookmark,
// };
// base::UmaHistogramEnumeration("My.Enumeration",
-// MyEnum::SOME_VALUE, MyEnum::COUNT);
-//
-// Note: The value in |sample| must be strictly less than |enum_size|.
-template <typename T>
-void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
- static_assert(std::is_enum<T>::value,
- "Non enum passed to UmaHistogramEnumeration");
- DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
- DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
- return UmaHistogramExactLinear(name, static_cast<int>(sample),
- static_cast<int>(enum_size));
-}
-template <typename T>
-void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
- static_assert(std::is_enum<T>::value,
- "Non enum passed to UmaHistogramEnumeration");
- DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
- DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
- return UmaHistogramExactLinear(name, static_cast<int>(sample),
- static_cast<int>(enum_size));
-}
-
-// Same as above, but uses T::kMaxValue as the inclusive maximum value of the
-// enum.
+// NewTabPageAction::kUseSearchbox);
template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample) {
static_assert(std::is_enum<T>::value,
@@ -101,6 +78,41 @@ void UmaHistogramEnumeration(const char* name, T sample) {
static_cast<int>(T::kMaxValue) + 1);
}
+// Some legacy histograms may manually specify a max value, with a kCount,
+// COUNT, kMaxValue, or MAX_VALUE sentinel like so:
+// // These values are persisted to logs. Entries should not be renumbered and
+// // numeric values should never be reused.
+// enum class NewTabPageAction {
+// kUseOmnibox = 0,
+// kClickTitle = 1,
+// // kUseSearchbox = 2, // no longer used, combined into omnibox
+// kOpenBookmark = 3,
+// kMaxValue,
+// };
+// base::UmaHistogramEnumeration("My.Enumeration",
+// NewTabPageAction::kUseSearchbox,
+// kMaxValue);
+// Note: The value in |sample| must be strictly less than |kMaxValue|. This is
+// otherwise functionally equivalent to the above.
+template <typename T>
+void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
+ static_assert(std::is_enum<T>::value,
+ "Non enum passed to UmaHistogramEnumeration");
+ DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
+ DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
+ return UmaHistogramExactLinear(name, static_cast<int>(sample),
+ static_cast<int>(enum_size));
+}
+template <typename T>
+void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
+ static_assert(std::is_enum<T>::value,
+ "Non enum passed to UmaHistogramEnumeration");
+ DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
+ DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
+ return UmaHistogramExactLinear(name, static_cast<int>(sample),
+ static_cast<int>(enum_size));
+}
+
// For adding boolean sample to histogram.
// Sample usage:
// base::UmaHistogramBoolean("My.Boolean", true)
@@ -183,6 +195,16 @@ BASE_EXPORT void UmaHistogramMicrosecondsTimes(const std::string& name,
BASE_EXPORT void UmaHistogramMicrosecondsTimes(const char* name,
TimeDelta sample);
+// For microseconds timings from 1 microsecond up to 10 ms (50 buckets).
+// TODO(crbug.com/983261) Remove this method after moving to
+// UmaHistogramMicrosecondsTimes.
+BASE_EXPORT void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(
+ const std::string& name,
+ TimeDelta sample);
+BASE_EXPORT void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(
+ const char* name,
+ TimeDelta sample);
+
// For recording memory related histograms.
// Used to measure common KB-granularity memory stats. Range is up to 500M.
BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 892f1a6cba2..7a1f57539b7 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -310,6 +310,12 @@
// For details on usage, see the documentation on the non-stability equivalents.
+#define UMA_STABILITY_HISTOGRAM_BOOLEAN(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, AddBoolean(sample), \
+ base::BooleanHistogram::FactoryGet( \
+ name, base::HistogramBase::kUmaStabilityHistogramFlag))
+
#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample) \
UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
@@ -324,6 +330,19 @@
name, sample, enum_max, \
base::HistogramBase::kUmaStabilityHistogramFlag)
+#define UMA_STABILITY_HISTOGRAM_LONG_TIMES(name, sample) \
+ STATIC_HISTOGRAM_POINTER_BLOCK( \
+ name, AddTimeMillisecondsGranularity(sample), \
+ base::Histogram::FactoryTimeGet( \
+ name, base::TimeDelta::FromMilliseconds(1), \
+ base::TimeDelta::FromHours(1), 50, \
+ base::HistogramBase::kUmaStabilityHistogramFlag))
+
+#define UMA_STABILITY_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
+ INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
+ name, percent_as_int, 101, \
+ base::HistogramBase::kUmaStabilityHistogramFlag)
+
//------------------------------------------------------------------------------
// Histogram instantiation helpers.
diff --git a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
index 1e2c599ec6b..2fbf182dfbd 100644
--- a/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -25,8 +25,7 @@ class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
const HistogramSamples& snapshot) override {
recorded_delta_histogram_names_.push_back(histogram.histogram_name());
// Use CHECK instead of ASSERT to get full stack-trace and thus origin.
- CHECK(!ContainsKey(recorded_delta_histogram_sum_,
- histogram.histogram_name()));
+ CHECK(!Contains(recorded_delta_histogram_sum_, histogram.histogram_name()));
// Keep pointer to snapshot for testing. This really isn't ideal but the
// snapshot-manager keeps the snapshot alive until it's "forgotten".
recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
@@ -42,7 +41,7 @@ class HistogramFlattenerDeltaRecorder : public HistogramFlattener {
}
int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
- EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+ EXPECT_TRUE(Contains(recorded_delta_histogram_sum_, name));
return recorded_delta_histogram_sum_[name];
}
diff --git a/chromium/base/metrics/persistent_sample_map.cc b/chromium/base/metrics/persistent_sample_map.cc
index e07b7167745..ba73128b7d5 100644
--- a/chromium/base/metrics/persistent_sample_map.cc
+++ b/chromium/base/metrics/persistent_sample_map.cc
@@ -276,7 +276,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value,
DCHECK_EQ(id(), record->id);
// Check if the record's value is already known.
- if (!ContainsKey(sample_counts_, record->value)) {
+ if (!Contains(sample_counts_, record->value)) {
// No: Add it to map of known values.
sample_counts_[record->value] = &record->count;
} else {
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 9609d9083a9..35bcff5457a 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -645,7 +645,7 @@ class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
public:
explicit TestHistogramProvider(
std::unique_ptr<PersistentHistogramAllocator> allocator)
- : allocator_(std::move(allocator)), weak_factory_(this) {
+ : allocator_(std::move(allocator)) {
StatisticsRecorder::RegisterHistogramProvider(weak_factory_.GetWeakPtr());
}
@@ -661,7 +661,7 @@ class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
private:
std::unique_ptr<PersistentHistogramAllocator> allocator_;
- WeakPtrFactory<TestHistogramProvider> weak_factory_;
+ WeakPtrFactory<TestHistogramProvider> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(TestHistogramProvider);
};
diff --git a/chromium/base/metrics/ukm_source_id.h b/chromium/base/metrics/ukm_source_id.h
index 6b259c7a714..51f3e1183db 100644
--- a/chromium/base/metrics/ukm_source_id.h
+++ b/chromium/base/metrics/ukm_source_id.h
@@ -17,11 +17,24 @@ namespace base {
class BASE_EXPORT UkmSourceId {
public:
enum class Type : int64_t {
+ // Source ids of this type are created via ukm::AssignNewSourceId, to denote
+ // 'custom' source other than the 3 types below. Source of this type has
+ // additional restrictions with logging, as determined by
+ // IsWhitelistedSourceId.
UKM = 0,
+ // Sources created by navigation. They will be kept in memory as long as
+ // the associated tab is still alive and the number of sources are within
+ // the max threshold.
NAVIGATION_ID = 1,
+ // Source ID used by AppLaunchEventLogger::Log. A new source of this type
+ // and associated events are expected to be recorded within the same report
+ // interval; it will not be kept in memory between different reports.
APP_ID = 2,
// Source ID for background events that don't have an open tab but the
- // associated URL is still present in the browser's history.
+ // associated URL is still present in the browser's history. A new source of
+ // this type and associated events are expected to be recorded within the
+ // same report interval; it will not be kept in memory between different
+ // reports.
HISTORY_ID = 3,
};
diff --git a/chromium/base/numerics/clamped_math.h b/chromium/base/numerics/clamped_math.h
index a3df69f7ab8..37a4cfd22a8 100644
--- a/chromium/base/numerics/clamped_math.h
+++ b/chromium/base/numerics/clamped_math.h
@@ -192,12 +192,14 @@ constexpr ClampedNumeric<typename UnderlyingType<T>::type> MakeClampedNum(
return value;
}
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
// Overload the ostream output operator to make logging work nicely.
template <typename T>
std::ostream& operator<<(std::ostream& os, const ClampedNumeric<T>& value) {
os << static_cast<T>(value);
return os;
}
+#endif
// These implement the variadic wrapper for the math operations.
template <template <typename, typename, typename> class M,
diff --git a/chromium/base/numerics/safe_conversions.h b/chromium/base/numerics/safe_conversions.h
index 9a52a6f5d17..b9636fec428 100644
--- a/chromium/base/numerics/safe_conversions.h
+++ b/chromium/base/numerics/safe_conversions.h
@@ -8,7 +8,6 @@
#include <stddef.h>
#include <limits>
-#include <ostream>
#include <type_traits>
#include "base/numerics/safe_conversions_impl.h"
@@ -20,6 +19,10 @@
#define BASE_HAS_OPTIMIZED_SAFE_CONVERSIONS (0)
#endif
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
+#include <ostream>
+#endif
+
namespace base {
namespace internal {
@@ -308,12 +311,14 @@ constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
return value;
}
+#if !BASE_NUMERICS_DISABLE_OSTREAM_OPERATORS
// Overload the ostream output operator to make logging work nicely.
template <typename T>
std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
os << static_cast<T>(value);
return os;
}
+#endif
#define BASE_NUMERIC_COMPARISON_OPERATORS(CLASS, NAME, OP) \
template <typename L, typename R, \
diff --git a/chromium/base/numerics/safe_math_shared_impl.h b/chromium/base/numerics/safe_math_shared_impl.h
index 583c487a42e..3556b1ea814 100644
--- a/chromium/base/numerics/safe_math_shared_impl.h
+++ b/chromium/base/numerics/safe_math_shared_impl.h
@@ -17,12 +17,15 @@
#include "base/numerics/safe_conversions.h"
+#ifdef __asmjs__
+// Optimized safe math instructions are incompatible with asmjs.
+#define BASE_HAS_OPTIMIZED_SAFE_MATH (0)
// Where available use builtin math overflow support on Clang and GCC.
-#if !defined(__native_client__) && \
- ((defined(__clang__) && \
- ((__clang_major__ > 3) || \
- (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
- (defined(__GNUC__) && __GNUC__ >= 5))
+#elif !defined(__native_client__) && \
+ ((defined(__clang__) && \
+ ((__clang_major__ > 3) || \
+ (__clang_major__ == 3 && __clang_minor__ >= 4))) || \
+ (defined(__GNUC__) && __GNUC__ >= 5))
#include "base/numerics/safe_math_clang_gcc_impl.h"
#define BASE_HAS_OPTIMIZED_SAFE_MATH (1)
#else
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index 5036ffca1f4..6388ba4c5e6 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -107,7 +107,7 @@ class ObserverListThreadSafe : public internal::ObserverListThreadSafeBase {
AutoLock auto_lock(lock_);
// Add |observer| to the list of observers.
- DCHECK(!ContainsKey(observers_, observer));
+ DCHECK(!Contains(observers_, observer));
const scoped_refptr<SequencedTaskRunner> task_runner =
SequencedTaskRunnerHandle::Get();
observers_[observer] = task_runner;
diff --git a/chromium/base/observer_list_threadsafe_unittest.cc b/chromium/base/observer_list_threadsafe_unittest.cc
index 7f3db3e33a3..d2eaf9f2c12 100644
--- a/chromium/base/observer_list_threadsafe_unittest.cc
+++ b/chromium/base/observer_list_threadsafe_unittest.cc
@@ -75,13 +75,12 @@ class AddRemoveThread : public Foo {
public:
AddRemoveThread(ObserverListThreadSafe<Foo>* list, bool notify)
: list_(list),
- task_runner_(CreateSingleThreadTaskRunnerWithTraits(
- TaskTraits(),
+ task_runner_(CreateSingleThreadTaskRunner(
+ TaskTraits(ThreadPool()),
SingleThreadTaskRunnerThreadMode::DEDICATED)),
in_list_(false),
start_(Time::Now()),
- do_notifies_(notify),
- weak_factory_(this) {
+ do_notifies_(notify) {
task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
@@ -132,7 +131,7 @@ class AddRemoveThread : public Foo {
bool do_notifies_; // Whether these threads should do notifications.
- base::WeakPtrFactory<AddRemoveThread> weak_factory_;
+ base::WeakPtrFactory<AddRemoveThread> weak_factory_{this};
};
} // namespace
@@ -375,8 +374,8 @@ class SequenceVerificationObserver : public Foo {
TEST(ObserverListThreadSafeTest, NotificationOnValidSequence) {
test::ScopedTaskEnvironment scoped_task_environment;
- auto task_runner_1 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
- auto task_runner_2 = CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ auto task_runner_1 = CreateSequencedTaskRunner(TaskTraits(ThreadPool()));
+ auto task_runner_2 = CreateSequencedTaskRunner(TaskTraits(ThreadPool()));
auto observer_list = MakeRefCounted<ObserverListThreadSafe<Foo>>();
@@ -463,7 +462,7 @@ TEST(ObserverListThreadSafeTest, RemoveWhileNotificationIsRunning) {
// ThreadPool can safely use |barrier|.
test::ScopedTaskEnvironment scoped_task_environment;
- CreateSequencedTaskRunnerWithTraits({MayBlock()})
+ CreateSequencedTaskRunner({ThreadPool(), MayBlock()})
->PostTask(FROM_HERE,
base::BindOnce(&ObserverListThreadSafe<Foo>::AddObserver,
observer_list, Unretained(&observer)));
diff --git a/chromium/base/observer_list_types.cc b/chromium/base/observer_list_types.cc
index a4441c280e7..e09ee2a4f8e 100644
--- a/chromium/base/observer_list_types.cc
+++ b/chromium/base/observer_list_types.cc
@@ -6,7 +6,7 @@
namespace base {
-CheckedObserver::CheckedObserver() : factory_(this) {}
+CheckedObserver::CheckedObserver() {}
CheckedObserver::~CheckedObserver() = default;
bool CheckedObserver::IsInObserverList() const {
diff --git a/chromium/base/observer_list_types.h b/chromium/base/observer_list_types.h
index f2d9e85c95c..c47ca0ad758 100644
--- a/chromium/base/observer_list_types.h
+++ b/chromium/base/observer_list_types.h
@@ -39,7 +39,7 @@ class BASE_EXPORT CheckedObserver {
friend class internal::CheckedObserverAdapter;
// Must be mutable to allow ObserverList<const Foo>.
- mutable WeakPtrFactory<CheckedObserver> factory_;
+ mutable WeakPtrFactory<CheckedObserver> factory_{this};
DISALLOW_COPY_AND_ASSIGN(CheckedObserver);
};
diff --git a/chromium/base/one_shot_event.cc b/chromium/base/one_shot_event.cc
index addd0e188cf..f96f7a13894 100644
--- a/chromium/base/one_shot_event.cc
+++ b/chromium/base/one_shot_event.cc
@@ -7,7 +7,6 @@
#include <stddef.h>
#include "base/callback.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
diff --git a/chromium/base/one_shot_event_unittest.cc b/chromium/base/one_shot_event_unittest.cc
index f7e54fce53a..cf5a58abb9f 100644
--- a/chromium/base/one_shot_event_unittest.cc
+++ b/chromium/base/one_shot_event_unittest.cc
@@ -5,9 +5,9 @@
#include "base/one_shot_event.h"
#include "base/bind.h"
-#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/test/scoped_task_environment.h"
#include "base/test/test_simple_task_runner.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -98,7 +98,7 @@ TEST(OneShotEventTest, PostDefaultsToCurrentMessageLoop) {
OneShotEvent event;
scoped_refptr<base::TestSimpleTaskRunner> runner(
new base::TestSimpleTaskRunner);
- base::MessageLoop loop;
+ base::test::ScopedTaskEnvironment scoped_task_environment;
int runner_i = 0;
int loop_i = 0;
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index b54dd8b2709..36ae36fc98b 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -427,6 +427,28 @@ class OPTIONAL_DECLSPEC_EMPTY_BASES Optional
std::is_copy_assignable<T>::value>,
public internal::MoveAssignable<std::is_move_constructible<T>::value &&
std::is_move_assignable<T>::value> {
+ private:
+ // Disable some versions of T that are ill-formed.
+ // See: https://timsong-cpp.github.io/cppwp/n4659/optional#syn-1
+ static_assert(
+ !std::is_same<internal::RemoveCvRefT<T>, in_place_t>::value,
+ "instantiation of base::Optional with in_place_t is ill-formed");
+ static_assert(!std::is_same<internal::RemoveCvRefT<T>, nullopt_t>::value,
+ "instantiation of base::Optional with nullopt_t is ill-formed");
+ static_assert(
+ !std::is_reference<T>::value,
+ "instantiation of base::Optional with a reference type is ill-formed");
+ // See: https://timsong-cpp.github.io/cppwp/n4659/optional#optional-3
+ static_assert(std::is_destructible<T>::value,
+ "instantiation of base::Optional with a non-destructible type "
+ "is ill-formed");
+ // Arrays are explicitly disallowed because for arrays of known bound
+ // is_destructible is of undefined value.
+ // See: https://en.cppreference.com/w/cpp/types/is_destructible
+ static_assert(
+ !std::is_array<T>::value,
+ "instantiation of base::Optional with an array type is ill-formed");
+
public:
#undef OPTIONAL_DECLSPEC_EMPTY_BASES
using value_type = T;
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index a06dd43ba32..dbf1ce114a2 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -102,6 +102,7 @@ void swap(TestObject& lhs, TestObject& rhs) {
}
class NonTriviallyDestructible {
+ public:
~NonTriviallyDestructible() {}
};
diff --git a/chromium/base/optional_unittest.nc b/chromium/base/optional_unittest.nc
index 62c0196765c..40b976eabdf 100644
--- a/chromium/base/optional_unittest.nc
+++ b/chromium/base/optional_unittest.nc
@@ -60,6 +60,107 @@ void WontCompile() {
([](Optional<Test> param) {})(1);
}
+#elif defined(NCTEST_ILL_FORMED_IN_PLACET_T) // [r"instantiation of base::Optional with in_place_t is ill-formed"]
+
+// Optional<T> is ill-formed if T is `in_place_t`.
+void WontCompile() {
+ Optional<base::in_place_t> optional;
+ optional.has_value();
+}
+
+#elif defined(NCTEST_ILL_FORMED_CONST_IN_PLACET_T) // [r"instantiation of base::Optional with in_place_t is ill-formed"]
+
+// Optional<T> is ill-formed if T is `const in_place_t`.
+void WontCompile() {
+ Optional<const base::in_place_t> optional;
+ optional.has_value();
+}
+
+#elif defined(NCTEST_ILL_FORMED_NULLOPT_T) // [r"instantiation of base::Optional with nullopt_t is ill-formed"]
+
+// Optional<T> is ill-formed if T is `const nullopt_t`.
+void WontCompile() {
+ Optional<const base::nullopt_t> optional;
+ optional.has_value();
+}
+
+#elif defined(NCTEST_ILL_FORMED_CONST_NULLOPT_T) // [r"instantiation of base::Optional with nullopt_t is ill-formed"]
+
+// Optional<T> is ill-formed if T is `const nullopt_t`.
+void WontCompile() {
+ Optional<const base::nullopt_t> optional;
+ optional.has_value();
+}
+
+#elif defined(NCTEST_ILL_FORMED_NON_DESTRUCTIBLE) // [r"instantiation of base::Optional with a non-destructible type is ill-formed"]
+
+// Optional<T> is ill-formed if T is non-destructible.
+void WontCompile() {
+ struct T {
+ private:
+ ~T();
+ };
+
+ static_assert(!std::is_destructible<T>::value, "T is not destructible");
+
+ Optional<T> optional;
+ optional.has_value();
+}
+
+// TODO(crbug.com/967722): the error message should be about the instantiation of an
+// ill-formed base::Optional.
+#elif defined(NCTEST_ILL_FORMED_REFERENCE) // [r"fatal error: union member 'value_' has reference type 'int &'"]
+
+// Optional<T> is ill-formed if T is a reference.
+void WontCompile() {
+ using T = int&;
+
+ static_assert(std::is_reference<T>::value, "T is a reference");
+
+ Optional<T> optional;
+ optional.has_value();
+}
+
+// TODO(crbug.com/967722): the error message should be about the instantiation of an
+// ill-formed base::Optional.
+#elif defined(NCTEST_ILL_FORMED_CONST_REFERENCE) // [r"fatal error: union member 'value_' has reference type 'const int &'"]
+
+// Optional<T> is ill-formed if T is a const reference.
+void WontCompile() {
+ using T = const int&;
+
+ static_assert(std::is_reference<T>::value, "T is a reference");
+
+ Optional<T> optional;
+ optional.has_value();
+}
+
+#elif defined(NCTEST_ILL_FORMED_FIXED_LENGTH_ARRAY) // [r"instantiation of base::Optional with an array type is ill-formed"]
+
+// Optional<T> is ill-formed if T is a fixed length array.
+void WontCompile() {
+ using T = char[4];
+
+ static_assert(std::is_array<T>::value, "T is an array");
+
+ Optional<T> optional;
+ optional.has_value();
+}
+
+// TODO(crbug.com/967722): the error message should be about the instantiation of an
+// ill-formed base::Optional.
+#elif defined(NCTEST_ILL_FORMED_UNDEFINED_LENGTH_ARRAY) // [r"fatal error: base class 'OptionalStorageBase' has a flexible array member"]
+
+// Optional<T> is ill-formed if T is a undefined length array.
+void WontCompile() {
+ using T = char[];
+
+ static_assert(std::is_array<T>::value, "T is an array");
+
+ Optional<T> optional;
+ optional.has_value();
+}
+
#endif
} // namespace base
diff --git a/chromium/base/pickle_fuzzer.cc b/chromium/base/pickle_fuzzer.cc
index 8bd0177b052..76372fea53f 100644
--- a/chromium/base/pickle_fuzzer.cc
+++ b/chromium/base/pickle_fuzzer.cc
@@ -4,7 +4,7 @@
#include "base/macros.h"
#include "base/pickle.h"
-#include "base/test/fuzzed_data_provider.h"
+#include "third_party/libFuzzer/src/utils/FuzzedDataProvider.h"
namespace {
constexpr int kIterations = 16;
@@ -20,7 +20,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
}
// Use the first kReadControlBytes bytes of the fuzzer input to control how
// the pickled data is read.
- base::FuzzedDataProvider data_provider(data, kReadControlBytes);
+ FuzzedDataProvider data_provider(data, kReadControlBytes);
data += kReadControlBytes;
size -= kReadControlBytes;
diff --git a/chromium/base/power_monitor/power_monitor.cc b/chromium/base/power_monitor/power_monitor.cc
index b0b92e6bda7..2d3dd073aef 100644
--- a/chromium/base/power_monitor/power_monitor.cc
+++ b/chromium/base/power_monitor/power_monitor.cc
@@ -11,61 +11,74 @@
namespace base {
-static PowerMonitor* g_power_monitor = nullptr;
-
-PowerMonitor::PowerMonitor(std::unique_ptr<PowerMonitorSource> source)
- : observers_(new ObserverListThreadSafe<PowerObserver>()),
- source_(std::move(source)) {
- DCHECK(!g_power_monitor);
- g_power_monitor = this;
-}
-
-PowerMonitor::~PowerMonitor() {
- source_->Shutdown();
- DCHECK_EQ(this, g_power_monitor);
- g_power_monitor = nullptr;
+void PowerMonitor::Initialize(std::unique_ptr<PowerMonitorSource> source) {
+ DCHECK(!IsInitialized());
+ GetInstance()->source_ = std::move(source);
}
-// static
-PowerMonitor* PowerMonitor::Get() {
- return g_power_monitor;
+bool PowerMonitor::IsInitialized() {
+ return GetInstance()->source_.get() != nullptr;
}
-void PowerMonitor::AddObserver(PowerObserver* obs) {
- observers_->AddObserver(obs);
+bool PowerMonitor::AddObserver(PowerObserver* obs) {
+ PowerMonitor* power_monitor = GetInstance();
+ if (!IsInitialized())
+ return false;
+ power_monitor->observers_->AddObserver(obs);
+ return true;
}
void PowerMonitor::RemoveObserver(PowerObserver* obs) {
- observers_->RemoveObserver(obs);
+ GetInstance()->observers_->RemoveObserver(obs);
}
PowerMonitorSource* PowerMonitor::Source() {
- return source_.get();
+ return GetInstance()->source_.get();
}
bool PowerMonitor::IsOnBatteryPower() {
- return source_->IsOnBatteryPower();
+ DCHECK(IsInitialized());
+ return GetInstance()->source_->IsOnBatteryPower();
+}
+
+void PowerMonitor::ShutdownForTesting() {
+ PowerMonitor::GetInstance()->observers_->AssertEmpty();
+ GetInstance()->source_ = nullptr;
}
void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) {
+ DCHECK(IsInitialized());
DVLOG(1) << "PowerStateChange: " << (battery_in_use ? "On" : "Off")
<< " battery";
- observers_->Notify(FROM_HERE, &PowerObserver::OnPowerStateChange,
- battery_in_use);
+ GetInstance()->observers_->Notify(
+ FROM_HERE, &PowerObserver::OnPowerStateChange, battery_in_use);
}
void PowerMonitor::NotifySuspend() {
+ DCHECK(IsInitialized());
TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifySuspend",
TRACE_EVENT_SCOPE_GLOBAL);
DVLOG(1) << "Power Suspending";
- observers_->Notify(FROM_HERE, &PowerObserver::OnSuspend);
+ GetInstance()->observers_->Notify(FROM_HERE, &PowerObserver::OnSuspend);
}
void PowerMonitor::NotifyResume() {
+ DCHECK(IsInitialized());
TRACE_EVENT_INSTANT0("base", "PowerMonitor::NotifyResume",
TRACE_EVENT_SCOPE_GLOBAL);
DVLOG(1) << "Power Resuming";
- observers_->Notify(FROM_HERE, &PowerObserver::OnResume);
+ GetInstance()->observers_->Notify(FROM_HERE, &PowerObserver::OnResume);
}
+PowerMonitor* PowerMonitor::GetInstance() {
+ static base::NoDestructor<PowerMonitor> power_monitor;
+ return power_monitor.get();
+}
+
+PowerMonitor::PowerMonitor()
+ : observers_(
+ base::MakeRefCounted<ObserverListThreadSafe<PowerObserver>>()) {}
+
+PowerMonitor::~PowerMonitor() = default;
+
} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor.h b/chromium/base/power_monitor/power_monitor.h
index b8e02e50ead..1a2f01581ee 100644
--- a/chromium/base/power_monitor/power_monitor.h
+++ b/chromium/base/power_monitor/power_monitor.h
@@ -8,6 +8,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/no_destructor.h"
#include "base/observer_list_threadsafe.h"
#include "base/power_monitor/power_observer.h"
@@ -16,34 +17,61 @@ namespace base {
class PowerMonitorSource;
// A class used to monitor the power state change and notify the observers about
-// the change event.
+// the change event. The threading model of this class is as follows:
+// Once initialized, it is threadsafe. However, the client must ensure that
+// initialization happens before any other methods are invoked, including
+// IsInitialized(). IsInitialized() exists only as a convenience for detection
+// of test contexts where the PowerMonitor global is never created.
class BASE_EXPORT PowerMonitor {
public:
- // Takes ownership of |source|.
- explicit PowerMonitor(std::unique_ptr<PowerMonitorSource> source);
- ~PowerMonitor();
+ // Initializes global PowerMonitor state. Takes ownership of |source|, which
+ // will be leaked on process teardown. May only be called once. Not threadsafe
+ // - no other PowerMonitor methods may be called on any thread while calling
+ // Initialize(). |source| must not be nullptr.
+ static void Initialize(std::unique_ptr<PowerMonitorSource> source);
- // Get the process-wide PowerMonitor (if not present, returns NULL).
- static PowerMonitor* Get();
+ // Returns true if Initialize() has been called. Safe to call on any thread,
+ // but must not be called while Initialize() or ShutdownForTesting() is being
+ // invoked.
+ static bool IsInitialized();
// Add and remove an observer.
// Can be called from any thread. |observer| is notified on the sequence
// from which it was registered.
// Must not be called from within a notification callback.
- void AddObserver(PowerObserver* observer);
- void RemoveObserver(PowerObserver* observer);
+ //
+ // AddObserver() fails and returns false if PowerMonitor::Initialize() has not
+ // been invoked. Failure should only happen in unit tests, where the
+ // PowerMonitor is generally not initialized. It is safe to call
+ // RemoveObserver with a PowerObserver that was not successfully added as an
+ // observer.
+ static bool AddObserver(PowerObserver* observer);
+ static void RemoveObserver(PowerObserver* observer);
+
+ // Is the computer currently on battery power. May only be called if the
+ // PowerMonitor has been initialized.
+ static bool IsOnBatteryPower();
- // Is the computer currently on battery power.
- bool IsOnBatteryPower();
+ // Uninitializes the PowerMonitor. Should be called at the end of any unit
+ // test that mocks out the PowerMonitor, to avoid affecting subsequent tests.
+ // There must be no live PowerObservers when invoked. Safe to call even if the
+ // PowerMonitor hasn't been initialized.
+ static void ShutdownForTesting();
private:
friend class PowerMonitorSource;
+ friend class base::NoDestructor<PowerMonitor>;
+
+ PowerMonitor();
+ ~PowerMonitor();
+
+ static PowerMonitorSource* Source();
- PowerMonitorSource* Source();
+ static void NotifyPowerStateChange(bool battery_in_use);
+ static void NotifySuspend();
+ static void NotifyResume();
- void NotifyPowerStateChange(bool battery_in_use);
- void NotifySuspend();
- void NotifyResume();
+ static PowerMonitor* GetInstance();
scoped_refptr<ObserverListThreadSafe<PowerObserver>> observers_;
std::unique_ptr<PowerMonitorSource> source_;
diff --git a/chromium/base/power_monitor/power_monitor_device_source.cc b/chromium/base/power_monitor/power_monitor_device_source.cc
index f42065499f2..5df58003375 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source.cc
@@ -25,9 +25,4 @@ PowerMonitorDeviceSource::~PowerMonitorDeviceSource() {
#endif
}
-// PowerMonitorDeviceSource does not need to take any special action to ensure
-// that it doesn't callback into PowerMonitor after this phase of shutdown has
-// completed.
-void PowerMonitorDeviceSource::Shutdown() {}
-
} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor_device_source.h b/chromium/base/power_monitor/power_monitor_device_source.h
index fc19b2435f2..1e2c885fa45 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.h
+++ b/chromium/base/power_monitor/power_monitor_device_source.h
@@ -28,8 +28,6 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
PowerMonitorDeviceSource();
~PowerMonitorDeviceSource() override;
- void Shutdown() override;
-
#if defined(OS_MACOSX)
// Allocate system resources needed by the PowerMonitor class.
//
diff --git a/chromium/base/power_monitor/power_monitor_device_source_android.cc b/chromium/base/power_monitor/power_monitor_device_source_android.cc
index a9898aac3bf..4d9fc81879e 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_android.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_android.cc
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/base_jni_headers/PowerMonitor_jni.h"
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_device_source.h"
#include "base/power_monitor/power_monitor_source.h"
-#include "jni/PowerMonitor_jni.h"
namespace base {
diff --git a/chromium/base/power_monitor/power_monitor_source.cc b/chromium/base/power_monitor/power_monitor_source.cc
index d4757b0629f..794eee7b764 100644
--- a/chromium/base/power_monitor/power_monitor_source.cc
+++ b/chromium/base/power_monitor/power_monitor_source.cc
@@ -18,11 +18,10 @@ bool PowerMonitorSource::IsOnBatteryPower() {
}
void PowerMonitorSource::ProcessPowerEvent(PowerEvent event_id) {
- PowerMonitor* monitor = PowerMonitor::Get();
- if (!monitor)
+ if (!PowerMonitor::IsInitialized())
return;
- PowerMonitorSource* source = monitor->Source();
+ PowerMonitorSource* source = PowerMonitor::Source();
// Suppress duplicate notifications. Some platforms may
// send multiple notifications of the same event.
@@ -41,28 +40,29 @@ void PowerMonitorSource::ProcessPowerEvent(PowerEvent event_id) {
}
if (changed)
- monitor->NotifyPowerStateChange(new_on_battery_power);
+ PowerMonitor::NotifyPowerStateChange(new_on_battery_power);
}
break;
case RESUME_EVENT:
if (source->suspended_) {
source->suspended_ = false;
- monitor->NotifyResume();
+ PowerMonitor::NotifyResume();
}
break;
case SUSPEND_EVENT:
if (!source->suspended_) {
source->suspended_ = true;
- monitor->NotifySuspend();
+ PowerMonitor::NotifySuspend();
}
break;
}
}
void PowerMonitorSource::SetInitialOnBatteryPowerState(bool on_battery_power) {
- // Must only be called before a monitor exists, otherwise the caller should
- // have just used a normal ProcessPowerEvent(POWER_STATE_EVENT) call.
- DCHECK(!PowerMonitor::Get());
+ // Must only be called before an initialized PowerMonitor exists, otherwise
+ // the caller should have just used a normal
+ // ProcessPowerEvent(POWER_STATE_EVENT) call.
+ DCHECK(!PowerMonitor::Source());
on_battery_power_ = on_battery_power;
}
diff --git a/chromium/base/power_monitor/power_monitor_source.h b/chromium/base/power_monitor/power_monitor_source.h
index 5fceee918c6..7f59a644026 100644
--- a/chromium/base/power_monitor/power_monitor_source.h
+++ b/chromium/base/power_monitor/power_monitor_source.h
@@ -30,14 +30,6 @@ class BASE_EXPORT PowerMonitorSource {
// Is the computer currently on battery power. Can be called on any thread.
bool IsOnBatteryPower();
- // Called by PowerMonitor just before PowerMonitor destroys both itself and
- // this instance). After return from this call it is no longer safe for
- // subclasses to call into PowerMonitor (e.g., via PowerMonitor::Get(). Hence,
- // subclasses should take any necessary actions here to ensure that after
- // return from this invocation they will no longer make any calls on
- // PowerMonitor.
- virtual void Shutdown() = 0;
-
protected:
friend class PowerMonitorTest;
diff --git a/chromium/base/power_monitor/power_monitor_unittest.cc b/chromium/base/power_monitor/power_monitor_unittest.cc
index 71fb260c4d9..3d81d4f12cf 100644
--- a/chromium/base/power_monitor/power_monitor_unittest.cc
+++ b/chromium/base/power_monitor/power_monitor_unittest.cc
@@ -14,30 +14,28 @@ class PowerMonitorTest : public testing::Test {
protected:
PowerMonitorTest() {
power_monitor_source_ = new PowerMonitorTestSource();
- power_monitor_.reset(new PowerMonitor(
- std::unique_ptr<PowerMonitorSource>(power_monitor_source_)));
+ PowerMonitor::Initialize(
+ std::unique_ptr<PowerMonitorSource>(power_monitor_source_));
}
- ~PowerMonitorTest() override = default;
+ ~PowerMonitorTest() override { PowerMonitor::ShutdownForTesting(); }
PowerMonitorTestSource* source() { return power_monitor_source_; }
- PowerMonitor* monitor() { return power_monitor_.get(); }
private:
test::ScopedTaskEnvironment scoped_task_environment_;
PowerMonitorTestSource* power_monitor_source_;
- std::unique_ptr<PowerMonitor> power_monitor_;
DISALLOW_COPY_AND_ASSIGN(PowerMonitorTest);
};
// PowerMonitorSource is tightly coupled with the PowerMonitor, so this test
-// Will cover both classes
+// covers both classes.
TEST_F(PowerMonitorTest, PowerNotifications) {
const int kObservers = 5;
PowerMonitorTestObserver observers[kObservers];
for (auto& index : observers)
- monitor()->AddObserver(&index);
+ EXPECT_TRUE(PowerMonitor::AddObserver(&index));
// Sending resume when not suspended should have no effect.
source()->GenerateResumeEvent();
@@ -78,6 +76,9 @@ TEST_F(PowerMonitorTest, PowerNotifications) {
// Repeated indications the device is off battery power should be suppressed.
source()->GeneratePowerStateEvent(false);
EXPECT_EQ(observers[0].power_state_changes(), 2);
+
+ for (auto& index : observers)
+ PowerMonitor::RemoveObserver(&index);
}
} // namespace base
diff --git a/chromium/base/process/kill.cc b/chromium/base/process/kill.cc
index f641d3fe03e..54b99beb890 100644
--- a/chromium/base/process/kill.cc
+++ b/chromium/base/process/kill.cc
@@ -40,7 +40,7 @@ void EnsureProcessTerminated(Process process) {
if (process.WaitForExitWithTimeout(TimeDelta(), nullptr))
return;
- PostDelayedTaskWithTraits(
+ PostDelayedTask(
FROM_HERE,
{TaskPriority::BEST_EFFORT, TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
BindOnce(
diff --git a/chromium/base/process/launch_win.cc b/chromium/base/process/launch_win.cc
index e2f79b8c9ff..6bf9e188d66 100644
--- a/chromium/base/process/launch_win.cc
+++ b/chromium/base/process/launch_win.cc
@@ -26,6 +26,7 @@
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
#include "base/system/sys_info.h"
+#include "base/threading/scoped_thread_priority.h"
#include "base/win/scoped_handle.h"
#include "base/win/scoped_process_information.h"
#include "base/win/startup_information.h"
@@ -200,6 +201,10 @@ Process LaunchProcess(const CommandLine& cmdline,
Process LaunchProcess(const string16& cmdline,
const LaunchOptions& options) {
+ // Mitigate the issues caused by loading DLLs on a background thread
+ // (http://crbug/973868).
+ base::ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+
win::StartupInformation startup_info_wrapper;
STARTUPINFO* startup_info = startup_info_wrapper.startup_info();
diff --git a/chromium/base/process/process_fuchsia.cc b/chromium/base/process/process_fuchsia.cc
index 8ecf9d618dc..f2605754471 100644
--- a/chromium/base/process/process_fuchsia.cc
+++ b/chromium/base/process/process_fuchsia.cc
@@ -13,7 +13,10 @@
#include "base/fuchsia/default_job.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/strings/stringprintf.h"
+
+#if BUILDFLAG(CLANG_COVERAGE)
#include "base/test/clang_coverage.h"
+#endif
namespace base {
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 16e97850cbb..259910914a8 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -98,37 +98,6 @@ class BASE_EXPORT ProcessMetrics {
BASE_EXPORT size_t GetResidentSetSize() const;
#endif
-#if defined(OS_CHROMEOS)
- // /proc/<pid>/totmaps is a syscall that returns memory summary statistics for
- // the process.
- // totmaps is a Linux specific concept, currently only being used on ChromeOS.
- // Do not attempt to extend this to other platforms.
- //
- struct TotalsSummary {
- size_t private_clean_kb;
- size_t private_dirty_kb;
- size_t swap_kb;
- };
- BASE_EXPORT TotalsSummary GetTotalsSummary() const;
-#endif
-
-#if defined(OS_MACOSX)
- struct TaskVMInfo {
- // Only available on macOS 10.12+.
- // Anonymous, non-discardable memory, including non-volatile IOKit.
- // Measured in bytes.
- uint64_t phys_footprint = 0;
-
- // Anonymous, non-discardable, non-compressed memory, excluding IOKit.
- // Measured in bytes.
- uint64_t internal = 0;
-
- // Compressed memory measured in bytes.
- uint64_t compressed = 0;
- };
- TaskVMInfo GetTaskVMInfo() const;
-#endif
-
// Returns the percentage of time spent executing, across all threads of the
// process, in the interval since the last time the method was called. Since
// this considers the total execution time across all threads in a process,
@@ -285,6 +254,9 @@ BASE_EXPORT size_t GetPageSize();
// at once. If the number is unavailable, a conservative best guess is returned.
BASE_EXPORT size_t GetMaxFds();
+// Returns the maximum number of handles that can be open at once per process.
+BASE_EXPORT size_t GetHandleLimit();
+
#if defined(OS_POSIX)
// Increases the file descriptor soft limit to |max_descriptors| or the OS hard
// limit, whichever is lower. If the limit is already higher than
diff --git a/chromium/base/process/process_metrics_fuchsia.cc b/chromium/base/process/process_metrics_fuchsia.cc
index ddfa9c61b52..3c7d14cd84e 100644
--- a/chromium/base/process/process_metrics_fuchsia.cc
+++ b/chromium/base/process/process_metrics_fuchsia.cc
@@ -12,6 +12,12 @@ size_t GetMaxFds() {
return FDIO_MAX_FD;
}
+size_t GetHandleLimit() {
+ // Duplicated from the internal Magenta kernel constant kMaxHandleCount
+ // (zircon/kernel/object/handle.cc).
+ return 256 * 1024u;
+}
+
size_t GetSystemCommitCharge() {
// TODO(https://crbug.com/926581): Fuchsia does not support this.
return 0;
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 0c119bd2836..7ffccec2b3e 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -307,66 +307,6 @@ ProcessMetrics::ProcessMetrics(ProcessHandle process)
ProcessMetrics::ProcessMetrics(ProcessHandle process) : process_(process) {}
#endif
-#if defined(OS_CHROMEOS)
-// Private, Shared and Proportional working set sizes are obtained from
-// /proc/<pid>/totmaps
-ProcessMetrics::TotalsSummary ProcessMetrics::GetTotalsSummary() const {
- // The format of /proc/<pid>/totmaps is:
- //
- // Rss: 6120 kB
- // Pss: 3335 kB
- // Shared_Clean: 1008 kB
- // Shared_Dirty: 4012 kB
- // Private_Clean: 4 kB
- // Private_Dirty: 1096 kB
- // Referenced: XXX kB
- // Anonymous: XXX kB
- // AnonHugePages: XXX kB
- // Swap: XXX kB
- // Locked: XXX kB
- ProcessMetrics::TotalsSummary summary = {};
-
- const size_t kPrivate_CleanIndex = (4 * 3) + 1;
- const size_t kPrivate_DirtyIndex = (5 * 3) + 1;
- const size_t kSwapIndex = (9 * 3) + 1;
-
- std::string totmaps_data;
- {
- FilePath totmaps_file = internal::GetProcPidDir(process_).Append("totmaps");
- ThreadRestrictions::ScopedAllowIO allow_io;
- bool ret = ReadFileToString(totmaps_file, &totmaps_data);
- if (!ret || totmaps_data.length() == 0)
- return summary;
- }
-
- std::vector<std::string> totmaps_fields = SplitString(
- totmaps_data, kWhitespaceASCII, KEEP_WHITESPACE, SPLIT_WANT_NONEMPTY);
-
- DCHECK_EQ("Private_Clean:", totmaps_fields[kPrivate_CleanIndex - 1]);
- DCHECK_EQ("Private_Dirty:", totmaps_fields[kPrivate_DirtyIndex - 1]);
- DCHECK_EQ("Swap:", totmaps_fields[kSwapIndex-1]);
-
- int private_clean_kb = 0;
- int private_dirty_kb = 0;
- int swap_kb = 0;
- bool success = true;
- success &=
- StringToInt(totmaps_fields[kPrivate_CleanIndex], &private_clean_kb);
- success &=
- StringToInt(totmaps_fields[kPrivate_DirtyIndex], &private_dirty_kb);
- success &= StringToInt(totmaps_fields[kSwapIndex], &swap_kb);
-
- if (!success)
- return summary;
-
- summary.private_clean_kb = private_clean_kb;
- summary.private_dirty_kb = private_dirty_kb;
- summary.swap_kb = swap_kb;
-
- return summary;
-}
-#endif
-
size_t GetSystemCommitCharge() {
SystemMemoryInfoKB meminfo;
if (!GetSystemMemoryInfo(&meminfo))
diff --git a/chromium/base/process/process_metrics_mac.cc b/chromium/base/process/process_metrics_mac.cc
index cbb5e937a9e..d05e122897d 100644
--- a/chromium/base/process/process_metrics_mac.cc
+++ b/chromium/base/process/process_metrics_mac.cc
@@ -25,37 +25,6 @@ namespace base {
namespace {
-#if !defined(MAC_OS_X_VERSION_10_11) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
-// The |phys_footprint| field was introduced in 10.11.
-struct ChromeTaskVMInfo {
- mach_vm_size_t virtual_size;
- integer_t region_count;
- integer_t page_size;
- mach_vm_size_t resident_size;
- mach_vm_size_t resident_size_peak;
- mach_vm_size_t device;
- mach_vm_size_t device_peak;
- mach_vm_size_t internal;
- mach_vm_size_t internal_peak;
- mach_vm_size_t external;
- mach_vm_size_t external_peak;
- mach_vm_size_t reusable;
- mach_vm_size_t reusable_peak;
- mach_vm_size_t purgeable_volatile_pmap;
- mach_vm_size_t purgeable_volatile_resident;
- mach_vm_size_t purgeable_volatile_virtual;
- mach_vm_size_t compressed;
- mach_vm_size_t compressed_peak;
- mach_vm_size_t compressed_lifetime;
- mach_vm_size_t phys_footprint;
-};
-#else
-using ChromeTaskVMInfo = task_vm_info;
-#endif // MAC_OS_X_VERSION_10_11
-mach_msg_type_number_t ChromeTaskVMInfoCount =
- sizeof(ChromeTaskVMInfo) / sizeof(natural_t);
-
bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
if (task == MACH_PORT_NULL)
return false;
@@ -105,23 +74,6 @@ std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
return WrapUnique(new ProcessMetrics(process, port_provider));
}
-ProcessMetrics::TaskVMInfo ProcessMetrics::GetTaskVMInfo() const {
- TaskVMInfo info;
- ChromeTaskVMInfo task_vm_info;
- mach_msg_type_number_t count = ChromeTaskVMInfoCount;
- kern_return_t result =
- task_info(TaskForPid(process_), TASK_VM_INFO,
- reinterpret_cast<task_info_t>(&task_vm_info), &count);
- if (result != KERN_SUCCESS)
- return info;
-
- info.internal = task_vm_info.internal;
- info.compressed = task_vm_info.compressed;
- if (count == ChromeTaskVMInfoCount)
- info.phys_footprint = task_vm_info.phys_footprint;
- return info;
-}
-
#define TIME_VALUE_TO_TIMEVAL(a, r) do { \
(r)->tv_sec = (a)->seconds; \
(r)->tv_usec = (a)->microseconds; \
diff --git a/chromium/base/process/process_metrics_posix.cc b/chromium/base/process/process_metrics_posix.cc
index a09bbf2c56e..5763432aa4f 100644
--- a/chromium/base/process/process_metrics_posix.cc
+++ b/chromium/base/process/process_metrics_posix.cc
@@ -71,6 +71,15 @@ size_t GetMaxFds() {
return static_cast<size_t>(max_fds);
}
+size_t GetHandleLimit() {
+#if defined(OS_MACOSX)
+ // Taken from a small test that allocated ports in a loop.
+ return static_cast<size_t>(1 << 18);
+#else
+ return GetMaxFds();
+#endif
+}
+
void IncreaseFdLimitTo(unsigned int max_descriptors) {
struct rlimit limits;
if (getrlimit(RLIMIT_NOFILE, &limits) == 0) {
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index 45e27674468..5cf0943c654 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -129,6 +129,12 @@ size_t GetMaxFds() {
return std::numeric_limits<size_t>::max();
}
+size_t GetHandleLimit() {
+ // Rounded down from value reported here:
+ // http://blogs.technet.com/b/markrussinovich/archive/2009/09/29/3283844.aspx
+ return static_cast<size_t>(1 << 23);
+}
+
// static
std::unique_ptr<ProcessMetrics> ProcessMetrics::CreateProcessMetrics(
ProcessHandle process) {
@@ -141,6 +147,9 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
FILETIME kernel_time;
FILETIME user_time;
+ if (!process_.IsValid())
+ return TimeDelta();
+
if (!GetProcessTimes(process_.Get(), &creation_time, &exit_time, &kernel_time,
&user_time)) {
// This should never fail because we duplicate the handle to guarantee it
@@ -154,6 +163,9 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
}
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
+ if (!process_.IsValid())
+ return false;
+
return GetProcessIoCounters(process_.Get(), io_counters) != FALSE;
}
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 38ee542e3a0..9636d44ddc2 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -16,7 +16,6 @@
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/kill.h"
-#include "base/test/clang_coverage.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -24,6 +23,10 @@
#include <sys/event.h>
#endif
+#if BUILDFLAG(CLANG_COVERAGE)
+#include "base/test/clang_coverage.h"
+#endif
+
namespace {
#if !defined(OS_NACL_NONSFI)
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index ae97a92c8e5..e43ce2d4faa 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -9,11 +9,14 @@
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/process/kill.h"
-#include "base/test/clang_coverage.h"
#include "base/threading/thread_restrictions.h"
#include <windows.h>
+#if BUILDFLAG(CLANG_COVERAGE)
+#include "base/test/clang_coverage.h"
+#endif
+
namespace {
DWORD kBasicProcessAccess =
@@ -162,10 +165,14 @@ bool Process::Terminate(int exit_code, bool wait) const {
DPLOG(ERROR) << "Error waiting for process exit";
Exited(exit_code);
} else {
- // The process can't be terminated, perhaps because it has already
- // exited or is in the process of exiting. A non-zero timeout is necessary
- // here for the same reasons as above.
- DPLOG(ERROR) << "Unable to terminate process";
+ // The process can't be terminated, perhaps because it has already exited or
+ // is in the process of exiting. An error code of ERROR_ACCESS_DENIED is the
+ // undocumented-but-expected result if the process has already exited or
+ // started exiting when TerminateProcess is called, so don't print an error
+ // message in that case.
+ if (GetLastError() != ERROR_ACCESS_DENIED)
+ DPLOG(ERROR) << "Unable to terminate process";
+ // A non-zero timeout is necessary here for the same reasons as above.
if (::WaitForSingleObject(Handle(), kWaitMs) == WAIT_OBJECT_0) {
DWORD actual_exit;
Exited(::GetExitCodeProcess(Handle(), &actual_exit) ? actual_exit
diff --git a/chromium/base/profiler/metadata_recorder.cc b/chromium/base/profiler/metadata_recorder.cc
index baca29af823..fa490e746c9 100644
--- a/chromium/base/profiler/metadata_recorder.cc
+++ b/chromium/base/profiler/metadata_recorder.cc
@@ -4,6 +4,8 @@
#include "base/profiler/metadata_recorder.h"
+#include "base/metrics/histogram_macros.h"
+
namespace base {
MetadataRecorder::ItemInternal::ItemInternal() = default;
@@ -21,30 +23,46 @@ MetadataRecorder::~MetadataRecorder() = default;
void MetadataRecorder::Set(uint64_t name_hash, int64_t value) {
base::AutoLock lock(write_lock_);
- // Acquiring the |write_lock_| guarantees that two simultaneous writes don't
- // attempt to create items in the same slot. Use of memory_order_release
- // guarantees that all writes performed by other threads to the metadata items
- // will be seen by the time we reach this point.
+ // Acquiring the |write_lock_| ensures that:
+ //
+ // - We don't try to write into the same new slot at the same time as
+ // another thread
+ // - We see all writes by other threads (acquiring a mutex implies acquire
+ // semantics)
size_t item_slots_used = item_slots_used_.load(std::memory_order_relaxed);
for (size_t i = 0; i < item_slots_used; ++i) {
auto& item = items_[i];
if (item.name_hash == name_hash) {
item.value.store(value, std::memory_order_relaxed);
- item.is_active.store(true, std::memory_order_release);
+
+ const bool was_active =
+ item.is_active.exchange(true, std::memory_order_release);
+ if (!was_active)
+ inactive_item_count_--;
+
+ UMA_HISTOGRAM_COUNTS_10000("StackSamplingProfiler.MetadataSlotsUsed",
+ item_slots_used);
+
return;
}
}
- // There should always be room in this data structure because there are more
- // reserved slots than there are unique metadata names in Chromium.
- DCHECK_NE(item_slots_used, items_.size())
- << "Cannot add a new sampling profiler metadata item to an already full "
- "map.";
+ item_slots_used = TryReclaimInactiveSlots(item_slots_used);
+
+ UMA_HISTOGRAM_COUNTS_10000("StackSamplingProfiler.MetadataSlotsUsed",
+ item_slots_used + 1);
+
+ if (item_slots_used == items_.size()) {
+ // The metadata recorder is full, forcing us to drop this metadata. The
+ // above UMA histogram counting occupied metadata slots should help us set a
+ // max size that avoids this condition during normal Chrome use.
+ return;
+ }
// Wait until the item is fully created before setting |is_active| to true and
// incrementing |item_slots_used_|, which will signal to readers that the item
// is ready.
- auto& item = items_[item_slots_used_];
+ auto& item = items_[item_slots_used];
item.name_hash = name_hash;
item.value.store(value, std::memory_order_relaxed);
item.is_active.store(true, std::memory_order_release);
@@ -58,16 +76,42 @@ void MetadataRecorder::Remove(uint64_t name_hash) {
for (size_t i = 0; i < item_slots_used; ++i) {
auto& item = items_[i];
if (item.name_hash == name_hash) {
- // A removed item will occupy its slot indefinitely.
- item.is_active.store(false, std::memory_order_release);
+ // A removed item will occupy its slot until that slot is reclaimed.
+ const bool was_active =
+ item.is_active.exchange(false, std::memory_order_relaxed);
+ if (was_active)
+ inactive_item_count_++;
+
+ return;
}
}
}
-size_t MetadataRecorder::GetItems(ItemArray* const items) const {
- // TODO(charliea): Defragment the item array if we can successfully acquire
- // the write lock here. This will require either making this function
- // non-const or |items_| mutable.
+MetadataRecorder::ScopedGetItems::ScopedGetItems(
+ MetadataRecorder* metadata_recorder)
+ : metadata_recorder_(metadata_recorder),
+ auto_lock_(&metadata_recorder->read_lock_) {}
+
+MetadataRecorder::ScopedGetItems::~ScopedGetItems() {}
+
+// This function is marked as NO_THREAD_SAFETY_ANALYSIS because the analyzer
+// doesn't understand that the lock is acquired in the constructor initializer
+// list and can therefore be safely released here.
+size_t MetadataRecorder::ScopedGetItems::GetItems(
+ ProfileBuilder::MetadataItemArray* const items) NO_THREAD_SAFETY_ANALYSIS {
+ size_t item_count = metadata_recorder_->GetItems(items);
+ auto_lock_.Release();
+ return item_count;
+}
+
+std::unique_ptr<ProfileBuilder::MetadataProvider>
+MetadataRecorder::CreateMetadataProvider() {
+ return std::make_unique<MetadataRecorder::ScopedGetItems>(this);
+}
+
+size_t MetadataRecorder::GetItems(
+ ProfileBuilder::MetadataItemArray* const items) const {
+ read_lock_.AssertAcquired();
// If a writer adds a new item after this load, it will be ignored. We do
// this instead of calling item_slots_used_.load() explicitly in the for loop
@@ -87,12 +131,75 @@ size_t MetadataRecorder::GetItems(ItemArray* const items) const {
// Because we wait until |is_active| is set to consider an item active and
// that field is always set last, we ignore half-created items.
if (item.is_active.load(std::memory_order_acquire)) {
- (*items)[write_index++] =
- Item{item.name_hash, item.value.load(std::memory_order_relaxed)};
+ (*items)[write_index++] = ProfileBuilder::MetadataItem{
+ item.name_hash, item.value.load(std::memory_order_relaxed)};
}
}
return write_index;
}
+size_t MetadataRecorder::TryReclaimInactiveSlots(size_t item_slots_used) {
+ const size_t remaining_slots =
+ ProfileBuilder::MAX_METADATA_COUNT - item_slots_used;
+
+ if (inactive_item_count_ == 0 || inactive_item_count_ < remaining_slots) {
+ // This reclaiming threshold has a few nice properties:
+ //
+ // - It avoids reclaiming when no items have been removed
+ // - It makes doing so more likely as free slots become more scarce
+ // - It makes doing so less likely when the benefits are lower
+ return item_slots_used;
+ }
+
+ if (read_lock_.Try()) {
+ // The lock isn't already held by a reader or another thread reclaiming
+ // slots.
+ item_slots_used = ReclaimInactiveSlots(item_slots_used);
+ read_lock_.Release();
+ }
+
+ return item_slots_used;
+}
+
+size_t MetadataRecorder::ReclaimInactiveSlots(size_t item_slots_used) {
+ // From here until the end of the reclamation, we can safely use
+ // memory_order_relaxed for all reads and writes. We don't need
+ // memory_order_acquire because acquiring the write mutex gives acquire
+ // semantics and no other threads can write after we hold that mutex. We don't
+ // need memory_order_release because no readers can read until we release the
+ // read mutex, which itself has release semantics.
+ size_t first_inactive_item_idx = 0;
+ size_t last_active_item_idx = item_slots_used - 1;
+ while (first_inactive_item_idx < last_active_item_idx) {
+ ItemInternal& inactive_item = items_[first_inactive_item_idx];
+ ItemInternal& active_item = items_[last_active_item_idx];
+
+ if (inactive_item.is_active.load(std::memory_order_relaxed)) {
+ // Keep seeking forward to an inactive item.
+ ++first_inactive_item_idx;
+ continue;
+ }
+
+ if (!active_item.is_active.load(std::memory_order_relaxed)) {
+ // Keep seeking backward to an active item. Skipping over this item
+ // indicates that we're freeing the slot at this index.
+ --last_active_item_idx;
+ item_slots_used--;
+ continue;
+ }
+
+ inactive_item.name_hash = active_item.name_hash;
+ inactive_item.value.store(active_item.value.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+ inactive_item.is_active.store(true, std::memory_order_relaxed);
+
+ ++first_inactive_item_idx;
+ --last_active_item_idx;
+ item_slots_used--;
+ }
+
+ item_slots_used_.store(item_slots_used, std::memory_order_relaxed);
+ return item_slots_used;
+}
} // namespace base
diff --git a/chromium/base/profiler/metadata_recorder.h b/chromium/base/profiler/metadata_recorder.h
index 346a5a1c024..faec7c275d7 100644
--- a/chromium/base/profiler/metadata_recorder.h
+++ b/chromium/base/profiler/metadata_recorder.h
@@ -9,7 +9,9 @@
#include <atomic>
#include <utility>
+#include "base/profiler/profile_builder.h"
#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
namespace base {
@@ -19,6 +21,105 @@ namespace base {
// with the sample.
//
// Methods on this class are safe to call unsynchronized from arbitrary threads.
+//
+// This class was designed to read metadata from a single sampling thread and
+// write metadata from many Chrome threads within the same process. These other
+// threads might be suspended by the sampling thread at any time in order to
+// collect a sample.
+//
+// This class has a few notable constraints:
+//
+// A) If a lock that's required to read the metadata might be held while writing
+// the metadata, that lock must be acquirable *before* the thread is
+// suspended. Otherwise, the sampling thread might suspend the target thread
+// while it is holding the required lock, causing deadlock.
+//
+// Ramifications:
+//
+// - When retrieving items, lock acquisition (through
+// CreateMetadataProvider()) and actual item retrieval (through
+// MetadataProvider::GetItems()) are separate.
+//
+// B) We can't allocate data on the heap while reading the metadata items. This
+// is because, on many operating systems, there's a process-wide heap lock
+// that is held while allocating on the heap. If a thread is suspended while
+// holding this lock and the sampling thread then tries to allocate on the
+// heap to read the metadata, it will deadlock trying to acquire the heap
+// lock.
+//
+// Ramifications:
+//
+// - We hold and retrieve the metadata using a fixed-size array, which
+// allows readers to preallocate the data structure that we pass back
+// the metadata in.
+//
+// C) We shouldn't guard writes with a lock that also guards reads. It can take
+// ~30us from the time that the sampling thread requests that a thread be
+// suspended and the time that it actually happens. If all metadata writes
+// block their thread during that time, we're very likely to block all Chrome
+// threads for an additional 30us per sample.
+//
+// Ramifications:
+//
+// - We use two locks to guard the metadata: a read lock and a write
+// lock. Only the write lock is required to write into the metadata, and
+// only the read lock is required to read the metadata.
+//
+// - Because we can't guard reads and writes with the same lock, we have to
+// face the possibility of writes occurring during a read. This is
+// especially problematic because there's no way to read both the key and
+// value for an item atomically without using mutexes, which violates
+// constraint A). If the sampling thread were to see the following
+// interleaving of reads and writes:
+//
+// * Reader thread reads key for slot 0
+// * Writer thread removes item at slot 0
+// * Writer thread creates new item with different key in slot 0
+// * Reader thread reads value for slot 0
+//
+// then the reader would see an invalid value for the given key. Because
+// of this possibility, we keep slots reserved for a specific key even
+// after that item has been removed. We reclaim these slots on a
+// best-effort basis during writes when the metadata recorder has become
+// sufficiently full and we can acquire the read lock.
+//
+// - We use state stored in atomic data types to ensure that readers and
+// writers are synchronized about where data should be written to and
+// read from. We must use atomic data types to guarantee that there's no
+// instruction during a write after which the recorder is in an
+// inconsistent state that might yield garbage data for a reader.
+//
+// Here are a few of the many states the recorder can be in:
+//
+// - No thread is using the recorder.
+//
+// - A single writer is writing into the recorder without a simultaneous
+// read. The write will succeed.
+//
+// - A reader is reading from the recorder without a simultaneous write. The
+// read will succeed.
+//
+// - Multiple writers attempt to write into the recorder simultaneously. All
+// writers but one will block because only one can hold the write lock.
+//
+// - A writer is writing into the recorder, which hasn't reached the threshold
+// at which it will try to reclaim inactive slots. The writer won't try to
+// acquire the read lock to reclaim inactive slots. The reader will therefore
+// be able to immediately acquire the read lock, suspend the target thread,
+// and read the metadata.
+//
+// - A writer is writing into the recorder, the recorder has reached the
+// threshold at which it needs to reclaim inactive slots, and the writer
+// thread is now in the middle of reclaiming those slots when a reader
+// arrives. The reader will try to acquire the read lock before suspending the
+// thread but will block until the writer thread finishes reclamation and
+// releases the read lock. The reader will then be able to acquire the read
+// lock and suspend the target thread.
+//
+// - A reader is reading the recorder when a writer attempts to write. The write
+// will be successful. However, if the writer deems it necessary to reclaim
+// inactive slots, it will skip doing so because it won't be able to acquire
+// the read lock.
class BASE_EXPORT MetadataRecorder {
public:
MetadataRecorder();
@@ -35,22 +136,62 @@ class BASE_EXPORT MetadataRecorder {
// If such an item does not exist, this has no effect.
void Remove(uint64_t name_hash);
- struct Item {
- // The hash of the metadata name, as produced by base::HashMetricName().
- uint64_t name_hash;
- // The value of the metadata item.
- int64_t value;
- };
-
- static const size_t MAX_METADATA_COUNT = 50;
- typedef std::array<Item, MAX_METADATA_COUNT> ItemArray;
- // Retrieves the first |available_slots| items in the metadata recorder and
- // copies them into |items|, returning the number of metadata items that were
- // copied. To ensure that all items can be copied, |available slots| should be
- // greater than or equal to |MAX_METADATA_COUNT|.
- size_t GetItems(ItemArray* const items) const;
+ // Creates a MetadataProvider object for the recorder, which acquires the
+ // necessary exclusive read lock and provides access to the recorder's items
+ // via its GetItems() function. Reclaiming of inactive slots in the recorder
+ // can't occur while this object lives, so it should be created as soon before
+ // it's needed as possible. Calling GetItems() releases the lock held by the
+ // object and can therefore only be called once during the object's lifetime.
+ //
+ // This object should be created *before* suspending the target
+ // thread. Otherwise, that thread might be suspended while reclaiming inactive
+ // slots and holding the read lock, which would cause the sampling thread to
+ // deadlock.
+ //
+ // Example usage:
+ //
+ // MetadataRecorder r;
+ // base::ProfileBuilder::MetadataItemArray arr;
+ // size_t item_count;
+ // ...
+ // {
+ // auto get_items = r.CreateMetadataProvider();
+ // item_count = get_items.GetItems(arr);
+ // }
+ std::unique_ptr<ProfileBuilder::MetadataProvider> CreateMetadataProvider();
private:
+ // An object that provides access to a MetadataRecorder's items and holds the
+ // necessary exclusive read lock until either GetItems() is called or the
+ // object is destroyed.
+ //
+ // For usage and more details, see CreateMetadataProvider().
+ class SCOPED_LOCKABLE ScopedGetItems
+ : public ProfileBuilder::MetadataProvider {
+ public:
+ // Acquires an exclusive read lock on the metadata recorder which is held
+ // until either GetItems() is called or the object is destroyed.
+ ScopedGetItems(MetadataRecorder* metadata_recorder)
+ EXCLUSIVE_LOCK_FUNCTION(metadata_recorder->read_lock_);
+ ~ScopedGetItems() override UNLOCK_FUNCTION(metadata_recorder_->read_lock_);
+ ScopedGetItems(const ScopedGetItems&) = delete;
+ ScopedGetItems& operator=(const ScopedGetItems&) = delete;
+
+ // Retrieves the first |available_slots| items in the metadata recorder and
+ // copies them into |items|, returning the number of metadata items that
+ // were copied. To ensure that all items can be copied, |available slots|
+ // should be greater than or equal to |MAX_METADATA_COUNT|.
+ //
+ // This function releases the lock held by the object and can therefore only
+ // be called once during the object's lifetime.
+ size_t GetItems(ProfileBuilder::MetadataItemArray* const items) override
+ EXCLUSIVE_LOCKS_REQUIRED(metadata_recorder_->read_lock_);
+
+ private:
+ const MetadataRecorder* const metadata_recorder_;
+ base::ReleasableAutoLock auto_lock_;
+ };
+
// TODO(charliea): Support large quantities of metadata efficiently.
struct ItemInternal {
ItemInternal();
@@ -77,6 +218,24 @@ class BASE_EXPORT MetadataRecorder {
std::atomic<int64_t> value;
};
+ // Attempts to free slots in the metadata map that are currently allocated to
+ // inactive items. May fail silently if the read lock is already held, in
+ // which case no slots will be freed. Returns the number of item slots used
+ // after the reclamation.
+ size_t TryReclaimInactiveSlots(size_t item_slots_used)
+ EXCLUSIVE_LOCKS_REQUIRED(write_lock_) LOCKS_EXCLUDED(read_lock_);
+ // Also protected by read_lock_, but current thread annotation limitations
+ // prevent us from using thread annotations with locks acquired through
+ // Lock::Try(). Updates item_slots_used_ to reflect the new item count and
+ // returns the number of item slots used after the reclamation.
+ size_t ReclaimInactiveSlots(size_t item_slots_used)
+ EXCLUSIVE_LOCKS_REQUIRED(write_lock_);
+
+ // Protected by read_lock_, but current thread annotation limitations
+ // prevent us from using thread annotations with locks acquired through
+ // Lock::Try().
+ size_t GetItems(ProfileBuilder::MetadataItemArray* const items) const;
+
// Metadata items that the recorder has seen. Rather than implementing the
// metadata recorder as a dense array, we implement it as a sparse array where
// removed metadata items keep their slot with their |is_active| bit set to
@@ -85,7 +244,7 @@ class BASE_EXPORT MetadataRecorder {
//
// For the rationale behind this design (along with others considered), see
// https://docs.google.com/document/d/18shLhVwuFbLl_jKZxCmOfRB98FmNHdKl0yZZZ3aEO4U/edit#.
- std::array<ItemInternal, MAX_METADATA_COUNT> items_;
+ std::array<ItemInternal, ProfileBuilder::MAX_METADATA_COUNT> items_;
// The number of item slots used in the metadata map.
//
@@ -95,9 +254,21 @@ class BASE_EXPORT MetadataRecorder {
// of its existence.
std::atomic<size_t> item_slots_used_{0};
- // A lock that guards against multiple threads trying to modify the same item
- // at once.
+ // The number of item slots occupied by inactive items.
+ size_t inactive_item_count_ GUARDED_BY(write_lock_) = 0;
+
+ // A lock that guards against multiple threads trying to manipulate items_,
+ // item_slots_used_, or inactive_item_count_ at the same time.
base::Lock write_lock_;
+
+ // A lock that guards against a reader trying to read items_ while inactive
+ // slots are being reclaimed.
+ //
+ // Note that we can't enforce that this lock is properly acquired through
+ // thread annotations because thread annotations doesn't understand that
+ // ScopedGetItems::GetItems() can only be called between ScopedGetItems's
+ // constructor and destructor.
+ base::Lock read_lock_;
};
} // namespace base
diff --git a/chromium/base/profiler/metadata_recorder_unittest.cc b/chromium/base/profiler/metadata_recorder_unittest.cc
index 8155687fbcb..42e8568ce43 100644
--- a/chromium/base/profiler/metadata_recorder_unittest.cc
+++ b/chromium/base/profiler/metadata_recorder_unittest.cc
@@ -5,20 +5,27 @@
#include "base/profiler/metadata_recorder.h"
#include "base/test/gtest_util.h"
+#include "base/test/metrics/histogram_tester.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
-bool operator==(const MetadataRecorder::Item& lhs,
- const MetadataRecorder::Item& rhs) {
+bool operator==(const base::ProfileBuilder::MetadataItem& lhs,
+ const base::ProfileBuilder::MetadataItem& rhs) {
return lhs.name_hash == rhs.name_hash && lhs.value == rhs.value;
}
+bool operator<(const base::ProfileBuilder::MetadataItem& lhs,
+ const base::ProfileBuilder::MetadataItem& rhs) {
+ return lhs.name_hash < rhs.name_hash;
+}
+
TEST(MetadataRecorderTest, GetItems_Empty) {
MetadataRecorder recorder;
- MetadataRecorder::ItemArray items;
- size_t item_count = recorder.GetItems(&items);
+ base::ProfileBuilder::MetadataItemArray items;
+
+ size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items);
ASSERT_EQ(0u, item_count);
}
@@ -28,18 +35,23 @@ TEST(MetadataRecorderTest, Set_NewNameHash) {
recorder.Set(10, 20);
- MetadataRecorder::ItemArray items;
- size_t item_count = recorder.GetItems(&items);
- ASSERT_EQ(1u, item_count);
- ASSERT_EQ(10u, items[0].name_hash);
- ASSERT_EQ(20, items[0].value);
+ base::ProfileBuilder::MetadataItemArray items;
+ size_t item_count;
+ {
+ item_count = recorder.CreateMetadataProvider()->GetItems(&items);
+ ASSERT_EQ(1u, item_count);
+ ASSERT_EQ(10u, items[0].name_hash);
+ ASSERT_EQ(20, items[0].value);
+ }
recorder.Set(20, 30);
- item_count = recorder.GetItems(&items);
- ASSERT_EQ(2u, item_count);
- ASSERT_EQ(20u, items[1].name_hash);
- ASSERT_EQ(30, items[1].value);
+ {
+ item_count = recorder.CreateMetadataProvider()->GetItems(&items);
+ ASSERT_EQ(2u, item_count);
+ ASSERT_EQ(20u, items[1].name_hash);
+ ASSERT_EQ(30, items[1].value);
+ }
}
TEST(MetadataRecorderTest, Set_ExistingNameNash) {
@@ -47,8 +59,8 @@ TEST(MetadataRecorderTest, Set_ExistingNameNash) {
recorder.Set(10, 20);
recorder.Set(10, 30);
- MetadataRecorder::ItemArray items;
- size_t item_count = recorder.GetItems(&items);
+ base::ProfileBuilder::MetadataItemArray items;
+ size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items);
ASSERT_EQ(1u, item_count);
ASSERT_EQ(10u, items[0].name_hash);
ASSERT_EQ(30, items[0].value);
@@ -56,10 +68,10 @@ TEST(MetadataRecorderTest, Set_ExistingNameNash) {
TEST(MetadataRecorderTest, Set_ReAddRemovedNameNash) {
MetadataRecorder recorder;
- MetadataRecorder::ItemArray items;
- std::vector<MetadataRecorder::Item> expected;
+ base::ProfileBuilder::MetadataItemArray items;
+ std::vector<base::ProfileBuilder::MetadataItem> expected;
for (size_t i = 0; i < items.size(); ++i) {
- expected.push_back(MetadataRecorder::Item{i, 0});
+ expected.push_back(base::ProfileBuilder::MetadataItem{i, 0});
recorder.Set(i, 0);
}
@@ -70,19 +82,20 @@ TEST(MetadataRecorderTest, Set_ReAddRemovedNameNash) {
recorder.Remove(3);
recorder.Set(3, 0);
- size_t item_count = recorder.GetItems(&items);
+ size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items);
EXPECT_EQ(items.size(), item_count);
- ASSERT_THAT(expected, ::testing::ElementsAreArray(items));
+ ASSERT_THAT(expected, ::testing::UnorderedElementsAreArray(items));
}
TEST(MetadataRecorderTest, Set_AddPastMaxCount) {
MetadataRecorder recorder;
- MetadataRecorder::ItemArray items;
+ base::ProfileBuilder::MetadataItemArray items;
for (size_t i = 0; i < items.size(); ++i) {
recorder.Set(i, 0);
}
- ASSERT_DCHECK_DEATH(recorder.Set(items.size(), 0));
+ // This should fail silently.
+ recorder.Set(items.size(), 0);
}
TEST(MetadataRecorderTest, Remove) {
@@ -92,8 +105,8 @@ TEST(MetadataRecorderTest, Remove) {
recorder.Set(50, 60);
recorder.Remove(30);
- MetadataRecorder::ItemArray items;
- size_t item_count = recorder.GetItems(&items);
+ base::ProfileBuilder::MetadataItemArray items;
+ size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items);
ASSERT_EQ(2u, item_count);
ASSERT_EQ(10u, items[0].name_hash);
ASSERT_EQ(20, items[0].value);
@@ -106,11 +119,63 @@ TEST(MetadataRecorderTest, Remove_DoesntExist) {
recorder.Set(10, 20);
recorder.Remove(20);
- MetadataRecorder::ItemArray items;
- size_t item_count = recorder.GetItems(&items);
+ base::ProfileBuilder::MetadataItemArray items;
+ size_t item_count = recorder.CreateMetadataProvider()->GetItems(&items);
ASSERT_EQ(1u, item_count);
ASSERT_EQ(10u, items[0].name_hash);
ASSERT_EQ(20, items[0].value);
}
+TEST(MetadataRecorderTest, ReclaimInactiveSlots) {
+ MetadataRecorder recorder;
+
+ std::set<base::ProfileBuilder::MetadataItem> items_set;
+ // Fill up the metadata map.
+ for (size_t i = 0; i < base::ProfileBuilder::MAX_METADATA_COUNT; ++i) {
+ recorder.Set(i, i);
+ items_set.insert(base::ProfileBuilder::MetadataItem{i, i});
+ }
+
+ // Remove every fourth entry to fragment the data.
+ size_t entries_removed = 0;
+ for (size_t i = 3; i < base::ProfileBuilder::MAX_METADATA_COUNT; i += 4) {
+ recorder.Remove(i);
+ ++entries_removed;
+ items_set.erase(base::ProfileBuilder::MetadataItem{i, i});
+ }
+
+ // Ensure that the inactive slots are reclaimed to make room for more entries.
+ for (size_t i = 1; i <= entries_removed; ++i) {
+ recorder.Set(i * 100, i * 100);
+ items_set.insert(base::ProfileBuilder::MetadataItem{i * 100, i * 100});
+ }
+
+ base::ProfileBuilder::MetadataItemArray items_arr;
+ std::copy(items_set.begin(), items_set.end(), items_arr.begin());
+
+ base::ProfileBuilder::MetadataItemArray recorder_items;
+ size_t recorder_item_count =
+ recorder.CreateMetadataProvider()->GetItems(&recorder_items);
+ ASSERT_EQ(recorder_item_count, base::ProfileBuilder::MAX_METADATA_COUNT);
+ ASSERT_THAT(recorder_items, ::testing::UnorderedElementsAreArray(items_arr));
+}
+
+TEST(MetadataRecorderTest, MetadataSlotsUsedUmaHistogram) {
+ MetadataRecorder recorder;
+ base::HistogramTester histogram_tester;
+
+ for (size_t i = 0; i < base::ProfileBuilder::MAX_METADATA_COUNT; ++i) {
+ recorder.Set(i * 10, i * 100);
+ }
+
+ EXPECT_THAT(
+ histogram_tester.GetAllSamples("StackSamplingProfiler.MetadataSlotsUsed"),
+ testing::ElementsAre(Bucket(1, 1), Bucket(2, 1), Bucket(3, 1),
+ Bucket(4, 1), Bucket(5, 1), Bucket(6, 1),
+ Bucket(7, 1), Bucket(8, 2), Bucket(10, 2),
+ Bucket(12, 2), Bucket(14, 3), Bucket(17, 3),
+ Bucket(20, 4), Bucket(24, 5), Bucket(29, 5),
+ Bucket(34, 6), Bucket(40, 8), Bucket(48, 3)));
+}
+
} // namespace base
diff --git a/chromium/base/profiler/profile_builder.cc b/chromium/base/profiler/profile_builder.cc
new file mode 100644
index 00000000000..ca3db2ac6b0
--- /dev/null
+++ b/chromium/base/profiler/profile_builder.cc
@@ -0,0 +1,7 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/profiler/profile_builder.h"
+
+const size_t base::ProfileBuilder::MAX_METADATA_COUNT;
diff --git a/chromium/base/profiler/profile_builder.h b/chromium/base/profiler/profile_builder.h
index 0202c21ea52..49355ef49df 100644
--- a/chromium/base/profiler/profile_builder.h
+++ b/chromium/base/profiler/profile_builder.h
@@ -26,13 +26,31 @@ class BASE_EXPORT ProfileBuilder {
// up modules from addresses.
virtual ModuleCache* GetModuleCache() = 0;
+ struct MetadataItem {
+ // The hash of the metadata name, as produced by base::HashMetricName().
+ uint64_t name_hash;
+ // The value of the metadata item.
+ int64_t value;
+ };
+
+ static constexpr size_t MAX_METADATA_COUNT = 50;
+ typedef std::array<MetadataItem, MAX_METADATA_COUNT> MetadataItemArray;
+
+ class MetadataProvider {
+ public:
+ MetadataProvider() = default;
+ virtual ~MetadataProvider() = default;
+
+ virtual size_t GetItems(ProfileBuilder::MetadataItemArray* const items) = 0;
+ };
+
// Records metadata to be associated with the current sample. To avoid
// deadlock on locks taken by the suspended profiled thread, implementations
// of this method must not execute any code that could take a lock, including
// heap allocation or use of CHECK/DCHECK/LOG statements. Generally
// implementations should simply atomically copy metadata state to be
// associated with the sample.
- virtual void RecordMetadata() {}
+ virtual void RecordMetadata(MetadataProvider* metadata_provider) {}
// Records a new set of frames. Invoked when sampling a sample completes.
virtual void OnSampleCompleted(std::vector<Frame> frames) = 0;
diff --git a/chromium/base/profiler/sample_metadata_unittest.cc b/chromium/base/profiler/sample_metadata_unittest.cc
index 27e177b3918..e38052f0b86 100644
--- a/chromium/base/profiler/sample_metadata_unittest.cc
+++ b/chromium/base/profiler/sample_metadata_unittest.cc
@@ -10,19 +10,29 @@
namespace base {
TEST(SampleMetadataTest, ScopedSampleMetadata) {
- MetadataRecorder::ItemArray items;
-
- ASSERT_EQ(0u, GetSampleMetadataRecorder()->GetItems(&items));
+ base::ProfileBuilder::MetadataItemArray items;
+ {
+ auto get_items = GetSampleMetadataRecorder()->CreateMetadataProvider();
+ ASSERT_EQ(0u, get_items->GetItems(&items));
+ }
{
ScopedSampleMetadata m("myname", 100);
- ASSERT_EQ(1u, GetSampleMetadataRecorder()->GetItems(&items));
- EXPECT_EQ(base::HashMetricName("myname"), items[0].name_hash);
- EXPECT_EQ(100, items[0].value);
+ {
+ ASSERT_EQ(1u,
+ GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems(
+ &items));
+ EXPECT_EQ(base::HashMetricName("myname"), items[0].name_hash);
+ EXPECT_EQ(100, items[0].value);
+ }
}
- ASSERT_EQ(0u, GetSampleMetadataRecorder()->GetItems(&items));
+ {
+ ASSERT_EQ(0u,
+ GetSampleMetadataRecorder()->CreateMetadataProvider()->GetItems(
+ &items));
+ }
}
} // namespace base
diff --git a/chromium/base/profiler/stack_sampler_impl.cc b/chromium/base/profiler/stack_sampler_impl.cc
index 2c70cc85522..6993664fd7f 100644
--- a/chromium/base/profiler/stack_sampler_impl.cc
+++ b/chromium/base/profiler/stack_sampler_impl.cc
@@ -8,6 +8,7 @@
#include "base/logging.h"
#include "base/profiler/profile_builder.h"
+#include "base/profiler/sample_metadata.h"
#include "base/profiler/thread_delegate.h"
#include "base/profiler/unwinder.h"
@@ -79,6 +80,12 @@ bool StackSamplerImpl::CopyStack(StackBuffer* stack_buffer,
uintptr_t bottom = 0;
const uint8_t* stack_copy_bottom = nullptr;
{
+ // The MetadataProvider must be created before the ScopedSuspendThread
+ // because it acquires a lock in its constructor that might otherwise be
+ // held by the target thread, resulting in deadlock.
+ std::unique_ptr<base::ProfileBuilder::MetadataProvider> get_metadata_items =
+ base::GetSampleMetadataRecorder()->CreateMetadataProvider();
+
// Allocation of the ScopedSuspendThread object itself is OK since it
// necessarily occurs before the thread is suspended by the object.
std::unique_ptr<ThreadDelegate::ScopedSuspendThread> suspend_thread =
@@ -102,7 +109,7 @@ bool StackSamplerImpl::CopyStack(StackBuffer* stack_buffer,
if (!thread_delegate_->CanCopyStack(bottom))
return false;
- profile_builder->RecordMetadata();
+ profile_builder->RecordMetadata(get_metadata_items.get());
stack_copy_bottom = CopyStackContentsAndRewritePointers(
reinterpret_cast<uint8_t*>(bottom), reinterpret_cast<uintptr_t*>(top),
diff --git a/chromium/base/profiler/stack_sampler_impl_unittest.cc b/chromium/base/profiler/stack_sampler_impl_unittest.cc
index 694b7837173..f86d4330e33 100644
--- a/chromium/base/profiler/stack_sampler_impl_unittest.cc
+++ b/chromium/base/profiler/stack_sampler_impl_unittest.cc
@@ -33,7 +33,8 @@ class TestProfileBuilder : public ProfileBuilder {
// ProfileBuilder
ModuleCache* GetModuleCache() override { return module_cache_; }
- void RecordMetadata() override {}
+ void RecordMetadata(
+ base::ProfileBuilder::MetadataProvider* metadata_provider) override {}
void OnSampleCompleted(std::vector<Frame> frames) override {}
void OnProfileCompleted(TimeDelta profile_duration,
TimeDelta sampling_period) override {}
@@ -60,8 +61,7 @@ class TestThreadDelegate : public ThreadDelegate {
// The register context will be initialized to
// *|thread_context| if non-null.
RegisterContext* thread_context = nullptr)
- : fake_stack_(fake_stack),
- thread_context_(thread_context) {}
+ : fake_stack_(fake_stack), thread_context_(thread_context) {}
TestThreadDelegate(const TestThreadDelegate&) = delete;
TestThreadDelegate& operator=(const TestThreadDelegate&) = delete;
@@ -596,7 +596,7 @@ TEST(StackSamplerImplTest, WalkStack_AuxThenNative) {
module_cache.InjectModuleForTesting(std::make_unique<TestModule>(1u, 1u));
FakeTestUnwinder aux_unwinder(
- {{{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, {false}}});
+ {{UnwindResult::UNRECOGNIZED_FRAME, {1u}}, false});
FakeTestUnwinder native_unwinder({{UnwindResult::COMPLETED, {2u}}});
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
diff --git a/chromium/base/profiler/stack_sampling_profiler_test_util.cc b/chromium/base/profiler/stack_sampling_profiler_test_util.cc
index e560e0c0855..b96bbeb3795 100644
--- a/chromium/base/profiler/stack_sampling_profiler_test_util.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_test_util.cc
@@ -35,7 +35,7 @@ class TestProfileBuilder : public ProfileBuilder {
// ProfileBuilder:
ModuleCache* GetModuleCache() override { return module_cache_; }
- void RecordMetadata() override {}
+ void RecordMetadata(MetadataProvider* metadata_provider) override {}
void OnSampleCompleted(std::vector<Frame> sample) override {
EXPECT_TRUE(sample_.empty());
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index c52fcc2d4f5..d3035b93e54 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -172,7 +172,8 @@ class TestProfileBuilder : public ProfileBuilder {
// ProfileBuilder:
ModuleCache* GetModuleCache() override;
- void RecordMetadata() override;
+ void RecordMetadata(
+ base::ProfileBuilder::MetadataProvider* metadata_provider) override;
void OnSampleCompleted(std::vector<Frame> sample) override;
void OnProfileCompleted(TimeDelta profile_duration,
TimeDelta sampling_period) override;
@@ -202,7 +203,8 @@ ModuleCache* TestProfileBuilder::GetModuleCache() {
return module_cache_;
}
-void TestProfileBuilder::RecordMetadata() {
+void TestProfileBuilder::RecordMetadata(
+ base::ProfileBuilder::MetadataProvider* metadata_provider) {
++metadata_count_;
}
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index de03afc5ffc..f350a4298af 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -7,7 +7,6 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/cancelable_callback.h"
-#include "base/lazy_instance.h"
#include "base/message_loop/message_loop.h"
#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
@@ -19,8 +18,10 @@ namespace base {
namespace {
-LazyInstance<ThreadLocalPointer<RunLoop::Delegate>>::Leaky tls_delegate =
- LAZY_INSTANCE_INITIALIZER;
+ThreadLocalPointer<RunLoop::Delegate>& GetTlsDelegate() {
+ static base::NoDestructor<ThreadLocalPointer<RunLoop::Delegate>> instance;
+ return *instance;
+}
// Runs |closure| immediately if this is called on |task_runner|, otherwise
// forwards |closure| to it.
@@ -33,10 +34,10 @@ void ProxyToTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner,
task_runner->PostTask(FROM_HERE, std::move(closure));
}
-ThreadLocalPointer<RunLoop::ScopedRunTimeoutForTest>*
+ThreadLocalPointer<RunLoop::ScopedRunTimeoutForTest>&
ScopedRunTimeoutForTestTLS() {
static NoDestructor<ThreadLocalPointer<RunLoop::ScopedRunTimeoutForTest>> tls;
- return tls.get();
+ return *tls;
}
void OnRunTimeout(RunLoop* run_loop, OnceClosure on_timeout) {
@@ -51,29 +52,29 @@ RunLoop::ScopedRunTimeoutForTest::ScopedRunTimeoutForTest(
RepeatingClosure on_timeout)
: timeout_(timeout),
on_timeout_(std::move(on_timeout)),
- nested_timeout_(ScopedRunTimeoutForTestTLS()->Get()) {
+ nested_timeout_(ScopedRunTimeoutForTestTLS().Get()) {
DCHECK_GT(timeout_, TimeDelta());
DCHECK(on_timeout_);
- ScopedRunTimeoutForTestTLS()->Set(this);
+ ScopedRunTimeoutForTestTLS().Set(this);
}
RunLoop::ScopedRunTimeoutForTest::~ScopedRunTimeoutForTest() {
- ScopedRunTimeoutForTestTLS()->Set(nested_timeout_);
+ ScopedRunTimeoutForTestTLS().Set(nested_timeout_);
}
// static
const RunLoop::ScopedRunTimeoutForTest*
RunLoop::ScopedRunTimeoutForTest::Current() {
- return ScopedRunTimeoutForTestTLS()->Get();
+ return ScopedRunTimeoutForTestTLS().Get();
}
RunLoop::ScopedDisableRunTimeoutForTest::ScopedDisableRunTimeoutForTest()
- : nested_timeout_(ScopedRunTimeoutForTestTLS()->Get()) {
- ScopedRunTimeoutForTestTLS()->Set(nullptr);
+ : nested_timeout_(ScopedRunTimeoutForTestTLS().Get()) {
+ ScopedRunTimeoutForTestTLS().Set(nullptr);
}
RunLoop::ScopedDisableRunTimeoutForTest::~ScopedDisableRunTimeoutForTest() {
- ScopedRunTimeoutForTestTLS()->Set(nested_timeout_);
+ ScopedRunTimeoutForTestTLS().Set(nested_timeout_);
}
RunLoop::Delegate::Delegate() {
@@ -84,11 +85,14 @@ RunLoop::Delegate::Delegate() {
RunLoop::Delegate::~Delegate() {
DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
+ DCHECK(active_run_loops_.empty());
// A RunLoop::Delegate may be destroyed before it is bound, if so it may still
// be on its creation thread (e.g. a Thread that fails to start) and
// shouldn't disrupt that thread's state.
- if (bound_)
- tls_delegate.Get().Set(nullptr);
+ if (bound_) {
+ DCHECK_EQ(this, GetTlsDelegate().Get());
+ GetTlsDelegate().Set(nullptr);
+ }
}
bool RunLoop::Delegate::ShouldQuitWhenIdle() {
@@ -102,19 +106,18 @@ void RunLoop::RegisterDelegateForCurrentThread(Delegate* delegate) {
DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
// There can only be one RunLoop::Delegate per thread.
- DCHECK(!tls_delegate.Get().Get())
+ DCHECK(!GetTlsDelegate().Get())
<< "Error: Multiple RunLoop::Delegates registered on the same thread.\n\n"
"Hint: You perhaps instantiated a second "
"MessageLoop/ScopedTaskEnvironment on a thread that already had one?";
- tls_delegate.Get().Set(delegate);
+ GetTlsDelegate().Set(delegate);
delegate->bound_ = true;
}
RunLoop::RunLoop(Type type)
- : delegate_(tls_delegate.Get().Get()),
+ : delegate_(GetTlsDelegate().Get()),
type_(type),
- origin_task_runner_(ThreadTaskRunnerHandle::Get()),
- weak_factory_(this) {
+ origin_task_runner_(ThreadTaskRunnerHandle::Get()) {
DCHECK(delegate_) << "A RunLoop::Delegate must be bound to this thread prior "
"to using RunLoop.";
DCHECK(origin_task_runner_);
@@ -139,7 +142,7 @@ void RunLoop::RunWithTimeout(TimeDelta timeout) {
// TODO(crbug.com/905412): Use real-time for Run() timeouts so that they
// can be applied even in tests which mock TimeTicks::Now().
CancelableOnceClosure cancelable_timeout;
- ScopedRunTimeoutForTest* run_timeout = ScopedRunTimeoutForTestTLS()->Get();
+ ScopedRunTimeoutForTest* run_timeout = ScopedRunTimeoutForTestTLS().Get();
if (run_timeout) {
cancelable_timeout.Reset(
BindOnce(&OnRunTimeout, Unretained(this), run_timeout->on_timeout()));
@@ -147,23 +150,12 @@ void RunLoop::RunWithTimeout(TimeDelta timeout) {
FROM_HERE, cancelable_timeout.callback(), run_timeout->timeout());
}
- // It is okay to access this RunLoop from another sequence while Run() is
- // active as this RunLoop won't touch its state until after that returns (if
- // the RunLoop's state is accessed while processing Run(), it will be re-bound
- // to the accessing sequence for the remainder of that Run() -- accessing from
- // multiple sequences is still disallowed).
- DETACH_FROM_SEQUENCE(sequence_checker_);
-
DCHECK_EQ(this, delegate_->active_run_loops_.top());
const bool application_tasks_allowed =
delegate_->active_run_loops_.size() == 1U ||
type_ == Type::kNestableTasksAllowed;
delegate_->Run(application_tasks_allowed, timeout);
- // Rebind this RunLoop to the current thread after Run().
- DETACH_FROM_SEQUENCE(sequence_checker_);
- DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
-
AfterRun();
}
@@ -208,52 +200,52 @@ void RunLoop::QuitWhenIdle() {
quit_when_idle_received_ = true;
}
-Closure RunLoop::QuitClosure() {
- // TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
- // DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+RepeatingClosure RunLoop::QuitClosure() {
+ // Obtaining the QuitClosure() is not thread-safe; either post the
+ // QuitClosure() from the run thread or invoke Quit() directly (which is
+ // thread-safe).
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
allow_quit_current_deprecated_ = false;
- // Need to use ProxyToTaskRunner() as WeakPtrs vended from
- // |weak_factory_| may only be accessed on |origin_task_runner_|.
- // TODO(gab): It feels wrong that QuitClosure() is bound to a WeakPtr.
- return Bind(&ProxyToTaskRunner, origin_task_runner_,
- Bind(&RunLoop::Quit, weak_factory_.GetWeakPtr()));
+ return BindRepeating(
+ &ProxyToTaskRunner, origin_task_runner_,
+ BindRepeating(&RunLoop::Quit, weak_factory_.GetWeakPtr()));
}
-Closure RunLoop::QuitWhenIdleClosure() {
- // TODO(gab): Fix bad usage and enable this check, http://crbug.com/715235.
- // DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+RepeatingClosure RunLoop::QuitWhenIdleClosure() {
+ // Obtaining the QuitWhenIdleClosure() is not thread-safe; either post the
+ // QuitWhenIdleClosure() from the run thread or invoke QuitWhenIdle() directly
+ // (which is thread-safe).
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
allow_quit_current_deprecated_ = false;
- // Need to use ProxyToTaskRunner() as WeakPtrs vended from
- // |weak_factory_| may only be accessed on |origin_task_runner_|.
- // TODO(gab): It feels wrong that QuitWhenIdleClosure() is bound to a WeakPtr.
- return Bind(&ProxyToTaskRunner, origin_task_runner_,
- Bind(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr()));
+ return BindRepeating(
+ &ProxyToTaskRunner, origin_task_runner_,
+ BindRepeating(&RunLoop::QuitWhenIdle, weak_factory_.GetWeakPtr()));
}
// static
bool RunLoop::IsRunningOnCurrentThread() {
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
return delegate && !delegate->active_run_loops_.empty();
}
// static
bool RunLoop::IsNestedOnCurrentThread() {
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
return delegate && delegate->active_run_loops_.size() > 1;
}
// static
void RunLoop::AddNestingObserverOnCurrentThread(NestingObserver* observer) {
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
DCHECK(delegate);
delegate->nesting_observers_.AddObserver(observer);
}
// static
void RunLoop::RemoveNestingObserverOnCurrentThread(NestingObserver* observer) {
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
DCHECK(delegate);
delegate->nesting_observers_.RemoveObserver(observer);
}
@@ -261,7 +253,7 @@ void RunLoop::RemoveNestingObserverOnCurrentThread(NestingObserver* observer) {
// static
void RunLoop::QuitCurrentDeprecated() {
DCHECK(IsRunningOnCurrentThread());
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
<< "Please migrate off QuitCurrentDeprecated(), e.g. to QuitClosure().";
delegate->active_run_loops_.top()->Quit();
@@ -270,7 +262,7 @@ void RunLoop::QuitCurrentDeprecated() {
// static
void RunLoop::QuitCurrentWhenIdleDeprecated() {
DCHECK(IsRunningOnCurrentThread());
- Delegate* delegate = tls_delegate.Get().Get();
+ Delegate* delegate = GetTlsDelegate().Get();
DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
<< "Please migrate off QuitCurrentWhenIdleDeprecated(), e.g. to "
"QuitWhenIdleClosure().";
@@ -278,18 +270,18 @@ void RunLoop::QuitCurrentWhenIdleDeprecated() {
}
// static
-Closure RunLoop::QuitCurrentWhenIdleClosureDeprecated() {
+RepeatingClosure RunLoop::QuitCurrentWhenIdleClosureDeprecated() {
// TODO(844016): Fix callsites and enable this check, or remove the API.
- // Delegate* delegate = tls_delegate.Get().Get();
+ // Delegate* delegate = GetTlsDelegate().Get();
// DCHECK(delegate->active_run_loops_.top()->allow_quit_current_deprecated_)
// << "Please migrate off QuitCurrentWhenIdleClosureDeprecated(), e.g to "
// "QuitWhenIdleClosure().";
- return Bind(&RunLoop::QuitCurrentWhenIdleDeprecated);
+ return BindRepeating(&RunLoop::QuitCurrentWhenIdleDeprecated);
}
#if DCHECK_IS_ON()
RunLoop::ScopedDisallowRunningForTesting::ScopedDisallowRunningForTesting()
- : current_delegate_(tls_delegate.Get().Get()),
+ : current_delegate_(GetTlsDelegate().Get()),
previous_run_allowance_(
current_delegate_ ? current_delegate_->allow_running_for_testing_
: false) {
@@ -298,7 +290,7 @@ RunLoop::ScopedDisallowRunningForTesting::ScopedDisallowRunningForTesting()
}
RunLoop::ScopedDisallowRunningForTesting::~ScopedDisallowRunningForTesting() {
- DCHECK_EQ(current_delegate_, tls_delegate.Get().Get());
+ DCHECK_EQ(current_delegate_, GetTlsDelegate().Get());
if (current_delegate_)
current_delegate_->allow_running_for_testing_ = previous_run_allowance_;
}
@@ -329,10 +321,10 @@ bool RunLoop::BeforeRun() {
if (quit_called_)
return false;
- auto& active_run_loops_ = delegate_->active_run_loops_;
- active_run_loops_.push(this);
+ auto& active_run_loops = delegate_->active_run_loops_;
+ active_run_loops.push(this);
- const bool is_nested = active_run_loops_.size() > 1;
+ const bool is_nested = active_run_loops.size() > 1;
if (is_nested) {
for (auto& observer : delegate_->nesting_observers_)
@@ -350,21 +342,19 @@ void RunLoop::AfterRun() {
running_ = false;
- auto& active_run_loops_ = delegate_->active_run_loops_;
- DCHECK_EQ(active_run_loops_.top(), this);
- active_run_loops_.pop();
-
- RunLoop* previous_run_loop =
- active_run_loops_.empty() ? nullptr : active_run_loops_.top();
+ auto& active_run_loops = delegate_->active_run_loops_;
+ DCHECK_EQ(active_run_loops.top(), this);
+ active_run_loops.pop();
- if (previous_run_loop) {
+ // Exiting a nested RunLoop?
+ if (!active_run_loops.empty()) {
for (auto& observer : delegate_->nesting_observers_)
observer.OnExitNestedRunLoop();
- }
- // Execute deferred Quit, if any:
- if (previous_run_loop && previous_run_loop->quit_called_)
- delegate_->Quit();
+ // Execute deferred Quit, if any:
+ if (active_run_loops.top()->quit_called_)
+ delegate_->Quit();
+ }
}
} // namespace base
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index 597d5de58f4..539e5884eb5 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -101,7 +101,7 @@ class BASE_EXPORT RunLoop {
// Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
// earlier call to Run() when there aren't any tasks or messages in the queue.
//
- // These methods are thread-safe but note that Quit() is best-effort when
+ // These methods are thread-safe but note that Quit() is asynchronous when
// called from another thread (will quit soon but tasks that were already
// queued on this RunLoop will get to run first).
//
@@ -112,25 +112,37 @@ class BASE_EXPORT RunLoop {
// RunLoop has already finished running has no effect.
//
// WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
- // terminate the targetted message loop. If a nested RunLoop continues
+ // terminate the targeted message loop. If a nested RunLoop continues
// running, the target may NEVER terminate. It is very easy to livelock (run
// forever) in such a case.
void Quit();
void QuitWhenIdle();
- // Convenience methods to get a closure that safely calls Quit() or
- // QuitWhenIdle() (has no effect if the RunLoop instance is gone).
+ // Returns a RepeatingClosure that safely calls Quit() or QuitWhenIdle() (has
+ // no effect if the RunLoop instance is gone).
//
- // The resulting Closure is thread-safe (note however that invoking the
- // QuitClosure() from another thread than this RunLoop's will result in an
- // asynchronous rather than immediate Quit()).
+ // These methods must be called from the thread on which the RunLoop was
+ // created.
+ //
+ // Returned closures may be safely:
+ // * Passed to other threads.
+ // * Run() from other threads, though this will quit the RunLoop
+ // asynchronously.
+ // * Run() after the RunLoop has stopped or been destroyed, in which case
+ // they are a no-op).
+ // * Run() before RunLoop::Run(), in which case RunLoop::Run() returns
+ // immediately."
//
// Example:
// RunLoop run_loop;
- // PostTask(run_loop.QuitClosure());
+ // DoFooAsyncAndNotify(run_loop.QuitClosure());
// run_loop.Run();
- Closure QuitClosure();
- Closure QuitWhenIdleClosure();
+ //
+ // Note that Quit() itself is thread-safe and may be invoked directly if you
+ // have access to the RunLoop reference from another thread (e.g. from a
+ // capturing lambda or test observer).
+ RepeatingClosure QuitClosure();
+ RepeatingClosure QuitWhenIdleClosure();
// Returns true if there is an active RunLoop on this thread.
// Safe to call before RegisterDelegateForCurrentThread().
@@ -237,7 +249,7 @@ class BASE_EXPORT RunLoop {
// instance and increase readability.
static void QuitCurrentDeprecated();
static void QuitCurrentWhenIdleDeprecated();
- static Closure QuitCurrentWhenIdleClosureDeprecated();
+ static RepeatingClosure QuitCurrentWhenIdleClosureDeprecated();
// Configures all RunLoop::Run() calls on the current thread to run the
// supplied |on_timeout| callback if they run for longer than |timeout|.
@@ -339,10 +351,10 @@ class BASE_EXPORT RunLoop {
bool BeforeRun();
void AfterRun();
- // A copy of RunLoop::Delegate for the thread driven by tis RunLoop for quick
- // access without using TLS (also allows access to state from another sequence
- // during Run(), ref. |sequence_checker_| below).
- Delegate* delegate_;
+ // A cached reference of RunLoop::Delegate for the thread driven by this
+ // RunLoop for quick access without using TLS (also allows access to state
+ // from another sequence during Run(), ref. |sequence_checker_| below).
+ Delegate* const delegate_;
const Type type_;
@@ -374,7 +386,7 @@ class BASE_EXPORT RunLoop {
const scoped_refptr<SingleThreadTaskRunner> origin_task_runner_;
// WeakPtrFactory for QuitClosure safety.
- WeakPtrFactory<RunLoop> weak_factory_;
+ WeakPtrFactory<RunLoop> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(RunLoop);
};
diff --git a/chromium/base/run_loop_unittest.cc b/chromium/base/run_loop_unittest.cc
index d9832e4a9bf..edea4b15134 100644
--- a/chromium/base/run_loop_unittest.cc
+++ b/chromium/base/run_loop_unittest.cc
@@ -309,7 +309,8 @@ TEST_P(RunLoopTest, RunWithTimeout) {
EXPECT_FALSE(task3_run);
}
-TEST_P(RunLoopTest, NestedRunWithTimeout) {
+// TODO(https://crbug.com/970187): This test is inherently flaky.
+TEST_P(RunLoopTest, DISABLED_NestedRunWithTimeout) {
// SimpleSingleThreadTaskRunner doesn't support delayed tasks.
if (GetParam() == RunLoopTestType::kTestDelegate)
return;
@@ -402,7 +403,7 @@ TEST_P(RunLoopTest, NestedRunWithTimeoutWhereInnerLoopHasALongerTimeout) {
// Verify that the QuitWhenIdleClosure() can run after the RunLoop has been
// deleted. It should have no effect.
TEST_P(RunLoopTest, QuitWhenIdleClosureAfterRunLoopScope) {
- Closure quit_when_idle_closure;
+ RepeatingClosure quit_when_idle_closure;
{
RunLoop run_loop;
quit_when_idle_closure = run_loop.QuitWhenIdleClosure();
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc
index d78498dd418..acfcdac3e91 100644
--- a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.cc
@@ -43,7 +43,6 @@ void LockFreeAddressHashSet::Insert(void* key) {
}
}
// There are no empty nodes to reuse left in the bucket.
- DCHECK_EQ(node, nullptr);
// Create a new node first...
Node* new_node = new Node(key, bucket.load(std::memory_order_relaxed));
// ... and then publish the new chain.
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
index a5113288711..1007859d7e2 100644
--- a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
@@ -9,6 +9,7 @@
#include <cstdint>
#include <vector>
+#include "base/compiler_specific.h"
#include "base/logging.h"
namespace base {
@@ -46,12 +47,12 @@ class BASE_EXPORT LockFreeAddressHashSet {
// Checks if the |key| is in the set. Can be executed concurrently with
// |Insert|, |Remove|, and |Contains| operations.
- bool Contains(void* key) const;
+ ALWAYS_INLINE bool Contains(void* key) const;
// Removes the |key| from the set. The key must be present in the set before
// the invocation.
// Concurrent execution of |Insert|, |Remove|, or |Copy| is not supported.
- void Remove(void* key);
+ ALWAYS_INLINE void Remove(void* key);
// Inserts the |key| into the set. The key must not be present in the set
// before the invocation.
@@ -73,28 +74,29 @@ class BASE_EXPORT LockFreeAddressHashSet {
friend class LockFreeAddressHashSetTest;
struct Node {
- Node(void* key, Node* next);
+ ALWAYS_INLINE Node(void* key, Node* next);
std::atomic<void*> key;
Node* next;
};
- static uint32_t Hash(void* key);
- Node* FindNode(void* key) const;
+ ALWAYS_INLINE static uint32_t Hash(void* key);
+ ALWAYS_INLINE Node* FindNode(void* key) const;
std::vector<std::atomic<Node*>> buckets_;
int size_ = 0;
const size_t bucket_mask_;
};
-inline LockFreeAddressHashSet::Node::Node(void* key, Node* next) : next(next) {
+ALWAYS_INLINE LockFreeAddressHashSet::Node::Node(void* key, Node* next)
+ : next(next) {
this->key.store(key, std::memory_order_relaxed);
}
-inline bool LockFreeAddressHashSet::Contains(void* key) const {
+ALWAYS_INLINE bool LockFreeAddressHashSet::Contains(void* key) const {
return FindNode(key) != nullptr;
}
-inline void LockFreeAddressHashSet::Remove(void* key) {
+ALWAYS_INLINE void LockFreeAddressHashSet::Remove(void* key) {
Node* node = FindNode(key);
DCHECK_NE(node, nullptr);
// We can never delete the node, nor detach it from the current bucket
@@ -104,7 +106,7 @@ inline void LockFreeAddressHashSet::Remove(void* key) {
--size_;
}
-inline LockFreeAddressHashSet::Node* LockFreeAddressHashSet::FindNode(
+ALWAYS_INLINE LockFreeAddressHashSet::Node* LockFreeAddressHashSet::FindNode(
void* key) const {
DCHECK_NE(key, nullptr);
const std::atomic<Node*>& bucket = buckets_[Hash(key) & bucket_mask_];
@@ -122,7 +124,7 @@ inline LockFreeAddressHashSet::Node* LockFreeAddressHashSet::FindNode(
}
// static
-inline uint32_t LockFreeAddressHashSet::Hash(void* key) {
+ALWAYS_INLINE uint32_t LockFreeAddressHashSet::Hash(void* key) {
// A simple fast hash function for addresses.
constexpr uintptr_t random_bits = static_cast<uintptr_t>(0x4bfdb9df5a6f243b);
uint64_t k = reinterpret_cast<uintptr_t>(key);
diff --git a/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.cc b/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.cc
index 515cf6c6083..1f0aaeb1409 100644
--- a/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.cc
+++ b/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.cc
@@ -17,7 +17,6 @@
#include "base/no_destructor.h"
#include "base/partition_alloc_buildflags.h"
#include "base/rand_util.h"
-#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
#include "build/build_config.h"
#if defined(OS_MACOSX) || defined(OS_ANDROID)
@@ -472,14 +471,6 @@ void PoissonAllocationSampler::DoRecordAlloc(intptr_t accumulated_bytes,
observer->SampleAdded(address, size, total_allocated, type, context);
}
-// static
-void PoissonAllocationSampler::RecordFree(void* address) {
- if (UNLIKELY(address == nullptr))
- return;
- if (UNLIKELY(sampled_addresses_set().Contains(address)))
- instance_->DoRecordFree(address);
-}
-
void PoissonAllocationSampler::DoRecordFree(void* address) {
if (UNLIKELY(ScopedMuteThreadSamples::IsMuted()))
return;
diff --git a/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.h b/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.h
index f0751e4470c..3178ef7be83 100644
--- a/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.h
+++ b/chromium/base/sampling_heap_profiler/poisson_allocation_sampler.h
@@ -8,7 +8,9 @@
#include <vector>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
+#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
#include "base/synchronization/lock.h"
namespace base {
@@ -16,8 +18,6 @@ namespace base {
template <typename T>
class NoDestructor;
-class LockFreeAddressHashSet;
-
// This singleton class implements Poisson sampling of the incoming allocations
// stream. It hooks onto base::allocator and base::PartitionAlloc.
// An extra custom allocator can be hooked via SetHooksInstallCallback method.
@@ -86,7 +86,7 @@ class BASE_EXPORT PoissonAllocationSampler {
size_t,
AllocatorType,
const char* context);
- static void RecordFree(void* address);
+ ALWAYS_INLINE static void RecordFree(void* address);
static PoissonAllocationSampler* Get();
@@ -120,6 +120,14 @@ class BASE_EXPORT PoissonAllocationSampler {
DISALLOW_COPY_AND_ASSIGN(PoissonAllocationSampler);
};
+// static
+ALWAYS_INLINE void PoissonAllocationSampler::RecordFree(void* address) {
+ if (UNLIKELY(address == nullptr))
+ return;
+ if (UNLIKELY(sampled_addresses_set().Contains(address)))
+ instance_->DoRecordFree(address);
+}
+
} // namespace base
#endif // BASE_SAMPLING_HEAP_PROFILER_POISSON_ALLOCATION_SAMPLER_H_
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
index f8736efe6e0..676becaa368 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.cc
@@ -17,7 +17,6 @@
#include "base/no_destructor.h"
#include "base/partition_alloc_buildflags.h"
#include "base/sampling_heap_profiler/lock_free_address_hash_set.h"
-#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "build/build_config.h"
@@ -94,7 +93,10 @@ SamplingHeapProfiler::Sample::Sample(const Sample&) = default;
SamplingHeapProfiler::Sample::~Sample() = default;
SamplingHeapProfiler::SamplingHeapProfiler() = default;
-SamplingHeapProfiler::~SamplingHeapProfiler() = default;
+SamplingHeapProfiler::~SamplingHeapProfiler() {
+ if (record_thread_names_)
+ base::ThreadIdNameManager::GetInstance()->RemoveObserver(this);
+}
uint32_t SamplingHeapProfiler::Start() {
#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE) && \
@@ -124,10 +126,13 @@ void SamplingHeapProfiler::SetSamplingInterval(size_t sampling_interval) {
}
void SamplingHeapProfiler::SetRecordThreadNames(bool value) {
+ if (record_thread_names_ == value)
+ return;
record_thread_names_ = value;
if (value) {
- base::ThreadIdNameManager::GetInstance()->InstallSetNameCallback(
- base::BindRepeating(IgnoreResult(&UpdateAndGetThreadName)));
+ base::ThreadIdNameManager::GetInstance()->AddObserver(this);
+ } else {
+ base::ThreadIdNameManager::GetInstance()->RemoveObserver(this);
}
}
@@ -284,4 +289,8 @@ SamplingHeapProfiler* SamplingHeapProfiler::Get() {
return instance.get();
}
+void SamplingHeapProfiler::OnThreadNameChanged(const char* name) {
+ UpdateAndGetThreadName(name);
+}
+
} // namespace base
diff --git a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
index 398bf0e98a3..a1dca2eca61 100644
--- a/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
+++ b/chromium/base/sampling_heap_profiler/sampling_heap_profiler.h
@@ -14,6 +14,7 @@
#include "base/macros.h"
#include "base/sampling_heap_profiler/poisson_allocation_sampler.h"
#include "base/synchronization/lock.h"
+#include "base/threading/thread_id_name_manager.h"
namespace base {
@@ -25,7 +26,8 @@ class NoDestructor;
// record samples.
// The recorded samples can then be retrieved using GetSamples method.
class BASE_EXPORT SamplingHeapProfiler
- : private PoissonAllocationSampler::SamplesObserver {
+ : private PoissonAllocationSampler::SamplesObserver,
+ public base::ThreadIdNameManager::Observer {
public:
class BASE_EXPORT Sample {
public:
@@ -95,6 +97,9 @@ class BASE_EXPORT SamplingHeapProfiler
static void Init();
static SamplingHeapProfiler* Get();
+ // ThreadIdNameManager::Observer implementation:
+ void OnThreadNameChanged(const char* name) override;
+
private:
SamplingHeapProfiler();
~SamplingHeapProfiler() override;
diff --git a/chromium/base/scoped_generic_unittest.cc b/chromium/base/scoped_generic_unittest.cc
index f75adf004bf..73291a8e433 100644
--- a/chromium/base/scoped_generic_unittest.cc
+++ b/chromium/base/scoped_generic_unittest.cc
@@ -215,18 +215,18 @@ TEST(ScopedGenericTest, OwnershipTracking) {
std::unordered_set<int> freed;
TrackedIntTraits traits(&freed, &owners);
-#define ASSERT_OWNED(value, owner) \
- ASSERT_TRUE(base::ContainsKey(owners, value)); \
- ASSERT_EQ(&owner, owners[value]); \
- ASSERT_FALSE(base::ContainsKey(freed, value))
-
-#define ASSERT_UNOWNED(value) \
- ASSERT_FALSE(base::ContainsKey(owners, value)); \
- ASSERT_FALSE(base::ContainsKey(freed, value))
-
-#define ASSERT_FREED(value) \
- ASSERT_FALSE(base::ContainsKey(owners, value)); \
- ASSERT_TRUE(base::ContainsKey(freed, value))
+#define ASSERT_OWNED(value, owner) \
+ ASSERT_TRUE(base::Contains(owners, value)); \
+ ASSERT_EQ(&owner, owners[value]); \
+ ASSERT_FALSE(base::Contains(freed, value))
+
+#define ASSERT_UNOWNED(value) \
+ ASSERT_FALSE(base::Contains(owners, value)); \
+ ASSERT_FALSE(base::Contains(freed, value))
+
+#define ASSERT_FREED(value) \
+ ASSERT_FALSE(base::Contains(owners, value)); \
+ ASSERT_TRUE(base::Contains(freed, value))
// Constructor.
{
diff --git a/chromium/base/scoped_observer.h b/chromium/base/scoped_observer.h
index 7f1d6fba96a..f5ddc737a68 100644
--- a/chromium/base/scoped_observer.h
+++ b/chromium/base/scoped_observer.h
@@ -47,11 +47,13 @@ class ScopedObserver {
}
bool IsObserving(Source* source) const {
- return base::ContainsValue(sources_, source);
+ return base::Contains(sources_, source);
}
bool IsObservingSources() const { return !sources_.empty(); }
+ size_t GetSourcesCount() const { return sources_.size(); }
+
private:
Observer* observer_;
diff --git a/chromium/base/sequence_token.cc b/chromium/base/sequence_token.cc
index 0bf2b44a44f..3636c7f2ee7 100644
--- a/chromium/base/sequence_token.cc
+++ b/chromium/base/sequence_token.cc
@@ -5,8 +5,8 @@
#include "base/sequence_token.h"
#include "base/atomic_sequence_num.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
namespace base {
@@ -17,11 +17,15 @@ base::AtomicSequenceNumber g_sequence_token_generator;
base::AtomicSequenceNumber g_task_token_generator;
-LazyInstance<ThreadLocalPointer<const SequenceToken>>::Leaky
- tls_current_sequence_token = LAZY_INSTANCE_INITIALIZER;
+ThreadLocalPointer<const SequenceToken>& GetTlsCurrentSequenceToken() {
+ static base::NoDestructor<ThreadLocalPointer<const SequenceToken>> instance;
+ return *instance;
+}
-LazyInstance<ThreadLocalPointer<const TaskToken>>::Leaky
- tls_current_task_token = LAZY_INSTANCE_INITIALIZER;
+ThreadLocalPointer<const TaskToken>& GetTlsCurrentTaskToken() {
+ static base::NoDestructor<ThreadLocalPointer<const TaskToken>> instance;
+ return *instance;
+}
} // namespace
@@ -47,7 +51,7 @@ SequenceToken SequenceToken::Create() {
SequenceToken SequenceToken::GetForCurrentThread() {
const SequenceToken* current_sequence_token =
- tls_current_sequence_token.Get().Get();
+ GetTlsCurrentSequenceToken().Get();
return current_sequence_token ? *current_sequence_token : SequenceToken();
}
@@ -68,25 +72,25 @@ TaskToken TaskToken::Create() {
}
TaskToken TaskToken::GetForCurrentThread() {
- const TaskToken* current_task_token = tls_current_task_token.Get().Get();
+ const TaskToken* current_task_token = GetTlsCurrentTaskToken().Get();
return current_task_token ? *current_task_token : TaskToken();
}
ScopedSetSequenceTokenForCurrentThread::ScopedSetSequenceTokenForCurrentThread(
const SequenceToken& sequence_token)
: sequence_token_(sequence_token), task_token_(TaskToken::Create()) {
- DCHECK(!tls_current_sequence_token.Get().Get());
- DCHECK(!tls_current_task_token.Get().Get());
- tls_current_sequence_token.Get().Set(&sequence_token_);
- tls_current_task_token.Get().Set(&task_token_);
+ DCHECK(!GetTlsCurrentSequenceToken().Get());
+ DCHECK(!GetTlsCurrentTaskToken().Get());
+ GetTlsCurrentSequenceToken().Set(&sequence_token_);
+ GetTlsCurrentTaskToken().Set(&task_token_);
}
ScopedSetSequenceTokenForCurrentThread::
~ScopedSetSequenceTokenForCurrentThread() {
- DCHECK_EQ(tls_current_sequence_token.Get().Get(), &sequence_token_);
- DCHECK_EQ(tls_current_task_token.Get().Get(), &task_token_);
- tls_current_sequence_token.Get().Set(nullptr);
- tls_current_task_token.Get().Set(nullptr);
+ DCHECK_EQ(GetTlsCurrentSequenceToken().Get(), &sequence_token_);
+ DCHECK_EQ(GetTlsCurrentTaskToken().Get(), &task_token_);
+ GetTlsCurrentSequenceToken().Set(nullptr);
+ GetTlsCurrentTaskToken().Set(nullptr);
}
} // namespace base
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index e05958ad283..e22b029157d 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -17,12 +17,15 @@
#include <map>
#include <set>
#include <string>
+#include <type_traits>
#include <unordered_map>
#include <unordered_set>
+#include <utility>
#include <vector>
#include "base/logging.h"
#include "base/optional.h"
+#include "base/template_util.h"
namespace base {
@@ -39,6 +42,43 @@ void IterateAndEraseIf(Container& container, Predicate pred) {
}
}
+template <typename Iter>
+constexpr bool IsRandomAccessIter =
+ std::is_same<typename std::iterator_traits<Iter>::iterator_category,
+ std::random_access_iterator_tag>::value;
+
+// Utility type traits used for specializing base::Contains() below.
+template <typename Container, typename Element, typename = void>
+struct HasFindWithNpos : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasFindWithNpos<
+ Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().find(
+ std::declval<const Element&>()) != Container::npos)>>
+ : std::true_type {};
+
+template <typename Container, typename Element, typename = void>
+struct HasFindWithEnd : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasFindWithEnd<Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().find(
+ std::declval<const Element&>()) !=
+ std::declval<const Container&>().end())>>
+ : std::true_type {};
+
+template <typename Container, typename Element, typename = void>
+struct HasContains : std::false_type {};
+
+template <typename Container, typename Element>
+struct HasContains<Container,
+ Element,
+ void_t<decltype(std::declval<const Container&>().contains(
+ std::declval<const Element&>()))>> : std::true_type {};
+
} // namespace internal
// C++14 implementation of C++17's std::size():
@@ -134,39 +174,250 @@ STLCount(const Container& container, const T& val) {
return std::count(container.begin(), container.end(), val);
}
-// Test to see if a set or map contains a particular key.
-// Returns true if the key is in the collection.
-template <typename Collection, typename Key>
-bool ContainsKey(const Collection& collection, const Key& key) {
- return collection.find(key) != collection.end();
+// General purpose implementation to check if |container| contains |value|.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<
+ !internal::HasFindWithNpos<Container, Value>::value &&
+ !internal::HasFindWithEnd<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* = nullptr>
+bool Contains(const Container& container, const Value& value) {
+ using std::begin;
+ using std::end;
+ return std::find(begin(container), end(container), value) != end(container);
+}
+
+// Specialized Contains() implementation for when |container| has a find()
+// member function and a static npos member, but no contains() member function.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasFindWithNpos<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* =
+ nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.find(value) != Container::npos;
+}
+
+// Specialized Contains() implementation for when |container| has a find()
+// and end() member function, but no contains() member function.
+template <typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasFindWithEnd<Container, Value>::value &&
+ !internal::HasContains<Container, Value>::value>* =
+ nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.find(value) != container.end();
+}
+
+// Specialized Contains() implementation for when |container| has a contains()
+// member function.
+template <
+ typename Container,
+ typename Value,
+ std::enable_if_t<internal::HasContains<Container, Value>::value>* = nullptr>
+bool Contains(const Container& container, const Value& value) {
+ return container.contains(value);
+}
+
+// O(1) implementation of const casting an iterator for any sequence,
+// associative or unordered associative container in the STL.
+//
+// Reference: https://stackoverflow.com/a/10669041
+template <typename Container,
+ typename ConstIter,
+ std::enable_if_t<!internal::IsRandomAccessIter<ConstIter>>* = nullptr>
+constexpr auto ConstCastIterator(Container& c, ConstIter it) {
+ return c.erase(it, it);
+}
+
+// Explicit overload for std::forward_list where erase() is named erase_after().
+template <typename T, typename Allocator>
+constexpr auto ConstCastIterator(
+ std::forward_list<T, Allocator>& c,
+ typename std::forward_list<T, Allocator>::const_iterator it) {
+// The erase_after(it, it) trick used below does not work for libstdc++ [1],
+// thus we need a different way.
+// TODO(crbug.com/972541): Remove this workaround once libstdc++ is fixed on all
+// platforms.
+//
+// [1] https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90857
+#if defined(__GLIBCXX__)
+ return c.insert_after(it, {});
+#else
+ return c.erase_after(it, it);
+#endif
+}
+
+// Specialized O(1) const casting for random access iterators. This is
+// necessary, because erase() is either not available (e.g. array-like
+// containers), or has O(n) complexity (e.g. std::deque or std::vector).
+template <typename Container,
+ typename ConstIter,
+ std::enable_if_t<internal::IsRandomAccessIter<ConstIter>>* = nullptr>
+constexpr auto ConstCastIterator(Container& c, ConstIter it) {
+ using std::begin;
+ using std::cbegin;
+ return begin(c) + (it - cbegin(c));
}
namespace internal {
-template <typename Collection>
-class HasKeyType {
- template <typename C>
- static std::true_type test(typename C::key_type*);
- template <typename C>
- static std::false_type test(...);
+template <typename Map, typename Key, typename Value>
+std::pair<typename Map::iterator, bool> InsertOrAssignImpl(Map& map,
+ Key&& key,
+ Value&& value) {
+ auto lower = map.lower_bound(key);
+ if (lower != map.end() && !map.key_comp()(key, lower->first)) {
+ // key already exists, perform assignment.
+ lower->second = std::forward<Value>(value);
+ return {lower, false};
+ }
- public:
- static constexpr bool value = decltype(test<Collection>(nullptr))::value;
-};
+ // key did not yet exist, insert it.
+ return {map.emplace_hint(lower, std::forward<Key>(key),
+ std::forward<Value>(value)),
+ true};
+}
+
+template <typename Map, typename Key, typename Value>
+typename Map::iterator InsertOrAssignImpl(Map& map,
+ typename Map::const_iterator hint,
+ Key&& key,
+ Value&& value) {
+ auto&& key_comp = map.key_comp();
+ if ((hint == map.begin() || key_comp(std::prev(hint)->first, key))) {
+ if (hint == map.end() || key_comp(key, hint->first)) {
+ // *(hint - 1) < key < *hint => key did not exist and hint is correct.
+ return map.emplace_hint(hint, std::forward<Key>(key),
+ std::forward<Value>(value));
+ }
+
+ if (!key_comp(hint->first, key)) {
+ // key == *hint => key already exists and hint is correct.
+ auto mutable_hint = ConstCastIterator(map, hint);
+ mutable_hint->second = std::forward<Value>(value);
+ return mutable_hint;
+ }
+ }
+
+ // hint was not helpful, dispatch to hintless version.
+ return InsertOrAssignImpl(map, std::forward<Key>(key),
+ std::forward<Value>(value))
+ .first;
+}
+
+template <typename Map, typename Key, typename... Args>
+std::pair<typename Map::iterator, bool> TryEmplaceImpl(Map& map,
+ Key&& key,
+ Args&&... args) {
+ auto lower = map.lower_bound(key);
+ if (lower != map.end() && !map.key_comp()(key, lower->first)) {
+ // key already exists, do nothing.
+ return {lower, false};
+ }
+
+ // key did not yet exist, insert it.
+ return {map.emplace_hint(lower, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<Key>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...)),
+ true};
+}
+
+template <typename Map, typename Key, typename... Args>
+typename Map::iterator TryEmplaceImpl(Map& map,
+ typename Map::const_iterator hint,
+ Key&& key,
+ Args&&... args) {
+ auto&& key_comp = map.key_comp();
+ if ((hint == map.begin() || key_comp(std::prev(hint)->first, key))) {
+ if (hint == map.end() || key_comp(key, hint->first)) {
+ // *(hint - 1) < key < *hint => key did not exist and hint is correct.
+ return map.emplace_hint(
+ hint, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<Key>(key)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+
+ if (!key_comp(hint->first, key)) {
+ // key == *hint => no-op, return correct hint.
+ return ConstCastIterator(map, hint);
+ }
+ }
+
+ // hint was not helpful, dispatch to hintless version.
+ return TryEmplaceImpl(map, std::forward<Key>(key),
+ std::forward<Args>(args)...)
+ .first;
+}
} // namespace internal
-// Test to see if a collection like a vector contains a particular value.
-// Returns true if the value is in the collection.
-// Don't use this on collections such as sets or maps. This is enforced by
-// disabling this method if the collection defines a key_type.
-template <typename Collection,
- typename Value,
- typename std::enable_if<!internal::HasKeyType<Collection>::value,
- int>::type = 0>
-bool ContainsValue(const Collection& collection, const Value& value) {
- return std::find(std::begin(collection), std::end(collection), value) !=
- std::end(collection);
+// Implementation of C++17's std::map::insert_or_assign as a free function.
+template <typename Map, typename Value>
+std::pair<typename Map::iterator, bool>
+InsertOrAssign(Map& map, const typename Map::key_type& key, Value&& value) {
+ return internal::InsertOrAssignImpl(map, key, std::forward<Value>(value));
+}
+
+template <typename Map, typename Value>
+std::pair<typename Map::iterator, bool>
+InsertOrAssign(Map& map, typename Map::key_type&& key, Value&& value) {
+ return internal::InsertOrAssignImpl(map, std::move(key),
+ std::forward<Value>(value));
+}
+
+// Implementation of C++17's std::map::insert_or_assign with hint as a free
+// function.
+template <typename Map, typename Value>
+typename Map::iterator InsertOrAssign(Map& map,
+ typename Map::const_iterator hint,
+ const typename Map::key_type& key,
+ Value&& value) {
+ return internal::InsertOrAssignImpl(map, hint, key,
+ std::forward<Value>(value));
+}
+
+template <typename Map, typename Value>
+typename Map::iterator InsertOrAssign(Map& map,
+ typename Map::const_iterator hint,
+ typename Map::key_type&& key,
+ Value&& value) {
+ return internal::InsertOrAssignImpl(map, hint, std::move(key),
+ std::forward<Value>(value));
+}
+
+// Implementation of C++17's std::map::try_emplace as a free function.
+template <typename Map, typename... Args>
+std::pair<typename Map::iterator, bool>
+TryEmplace(Map& map, const typename Map::key_type& key, Args&&... args) {
+ return internal::TryEmplaceImpl(map, key, std::forward<Args>(args)...);
+}
+
+template <typename Map, typename... Args>
+std::pair<typename Map::iterator, bool> TryEmplace(Map& map,
+ typename Map::key_type&& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, std::move(key),
+ std::forward<Args>(args)...);
+}
+
+// Implementation of C++17's std::map::try_emplace with hint as a free
+// function.
+template <typename Map, typename... Args>
+typename Map::iterator TryEmplace(Map& map,
+ typename Map::const_iterator hint,
+ const typename Map::key_type& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, hint, key, std::forward<Args>(args)...);
+}
+
+template <typename Map, typename... Args>
+typename Map::iterator TryEmplace(Map& map,
+ typename Map::const_iterator hint,
+ typename Map::key_type&& key,
+ Args&&... args) {
+ return internal::TryEmplaceImpl(map, hint, std::move(key),
+ std::forward<Args>(args)...);
}
// Returns true if the container is sorted.
diff --git a/chromium/base/stl_util_unittest.cc b/chromium/base/stl_util_unittest.cc
index 100a42e08a7..ab3fdb99b54 100644
--- a/chromium/base/stl_util_unittest.cc
+++ b/chromium/base/stl_util_unittest.cc
@@ -21,6 +21,7 @@
#include <unordered_set>
#include <vector>
+#include "base/containers/flat_set.h"
#include "base/containers/queue.h"
#include "base/strings/string16.h"
#include "base/strings/utf_string_conversions.h"
@@ -29,6 +30,9 @@
namespace {
+using ::testing::IsNull;
+using ::testing::Pair;
+
// Used as test case to ensure the various base::STLXxx functions don't require
// more than operators "<" and "==" on values stored in containers.
class ComparableValue {
@@ -86,6 +90,26 @@ void RunEraseIfTest() {
}
}
+template <typename Container>
+void RunConstCastIteratorTest() {
+ using std::begin;
+ using std::cbegin;
+
+ Container c = {1, 2, 3, 4, 5};
+ auto c_it = std::next(cbegin(c), 3);
+ auto it = base::ConstCastIterator(c, c_it);
+ static_assert(std::is_same<decltype(cbegin(std::declval<Container&>())),
+ decltype(c_it)>::value,
+ "c_it is not a constant iterator.");
+ static_assert(std::is_same<decltype(begin(std::declval<Container&>())),
+ decltype(it)>::value,
+ "it is not a iterator.");
+ EXPECT_EQ(c_it, it);
+ // Const casting the iterator should not modify the underlying container.
+ Container other = {1, 2, 3, 4, 5};
+ EXPECT_THAT(c, testing::ContainerEq(other));
+}
+
struct CustomIntHash {
size_t operator()(int elem) const { return std::hash<int>()(elem) + 1; }
};
@@ -280,6 +304,24 @@ TEST(STLUtilTest, GetUnderlyingContainer) {
}
}
+TEST(STLUtilTest, ConstCastIterator) {
+ // Sequence Containers
+ RunConstCastIteratorTest<std::forward_list<int>>();
+ RunConstCastIteratorTest<std::list<int>>();
+ RunConstCastIteratorTest<std::deque<int>>();
+ RunConstCastIteratorTest<std::vector<int>>();
+ RunConstCastIteratorTest<std::array<int, 5>>();
+ RunConstCastIteratorTest<int[5]>();
+
+ // Associative Containers
+ RunConstCastIteratorTest<std::set<int>>();
+ RunConstCastIteratorTest<std::multiset<int>>();
+
+ // Unordered Associative Containers
+ RunConstCastIteratorTest<std::unordered_set<int>>();
+ RunConstCastIteratorTest<std::unordered_multiset<int>>();
+}
+
TEST(STLUtilTest, STLIsSorted) {
{
std::set<int> set;
@@ -596,14 +638,137 @@ TEST(Erase, IsNotIn) {
EXPECT_EQ(expected, lhs);
}
-TEST(ContainsValue, OrdinaryArrays) {
+TEST(STLUtilTest, GenericContains) {
const char allowed_chars[] = {'a', 'b', 'c', 'd'};
- EXPECT_TRUE(ContainsValue(allowed_chars, 'a'));
- EXPECT_FALSE(ContainsValue(allowed_chars, 'z'));
- EXPECT_FALSE(ContainsValue(allowed_chars, 0));
+
+ EXPECT_TRUE(Contains(allowed_chars, 'a'));
+ EXPECT_FALSE(Contains(allowed_chars, 'z'));
+ EXPECT_FALSE(Contains(allowed_chars, 0));
const char allowed_chars_including_nul[] = "abcd";
- EXPECT_TRUE(ContainsValue(allowed_chars_including_nul, 0));
+ EXPECT_TRUE(Contains(allowed_chars_including_nul, 0));
+}
+
+TEST(STLUtilTest, ContainsWithFindAndNpos) {
+ std::string str = "abcd";
+
+ EXPECT_TRUE(Contains(str, 'a'));
+ EXPECT_FALSE(Contains(str, 'z'));
+ EXPECT_FALSE(Contains(str, 0));
+}
+
+TEST(STLUtilTest, ContainsWithFindAndEnd) {
+ std::set<int> set = {1, 2, 3, 4};
+
+ EXPECT_TRUE(Contains(set, 1));
+ EXPECT_FALSE(Contains(set, 5));
+ EXPECT_FALSE(Contains(set, 0));
+}
+
+TEST(STLUtilTest, ContainsWithContains) {
+ flat_set<int> set = {1, 2, 3, 4};
+
+ EXPECT_TRUE(Contains(set, 1));
+ EXPECT_FALSE(Contains(set, 5));
+ EXPECT_FALSE(Contains(set, 0));
+}
+
+TEST(STLUtilTest, InsertOrAssign) {
+ std::map<std::string, int> my_map;
+ auto result = InsertOrAssign(my_map, "Hello", 42);
+ EXPECT_THAT(*result.first, Pair("Hello", 42));
+ EXPECT_TRUE(result.second);
+
+ result = InsertOrAssign(my_map, "Hello", 43);
+ EXPECT_THAT(*result.first, Pair("Hello", 43));
+ EXPECT_FALSE(result.second);
+}
+
+TEST(STLUtilTest, InsertOrAssignHint) {
+ std::map<std::string, int> my_map;
+ auto result = InsertOrAssign(my_map, my_map.end(), "Hello", 42);
+ EXPECT_THAT(*result, Pair("Hello", 42));
+
+ result = InsertOrAssign(my_map, my_map.begin(), "Hello", 43);
+ EXPECT_THAT(*result, Pair("Hello", 43));
+}
+
+TEST(STLUtilTest, InsertOrAssignWrongHints) {
+ std::map<int, int> my_map;
+ // Since we insert keys in sorted order, my_map.begin() will be a wrong hint
+ // after the first iteration. Check that insertion happens anyway.
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE(i);
+ auto result = InsertOrAssign(my_map, my_map.begin(), i, i);
+ EXPECT_THAT(*result, Pair(i, i));
+ }
+
+ // Overwrite the keys we just inserted. Since we no longer insert into the
+ // map, my_map.end() will be a wrong hint for all iterations but the last.
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE(10 + i);
+ auto result = InsertOrAssign(my_map, my_map.end(), i, 10 + i);
+ EXPECT_THAT(*result, Pair(i, 10 + i));
+ }
+}
+
+TEST(STLUtilTest, TryEmplace) {
+ std::map<std::string, std::unique_ptr<int>> my_map;
+ auto result = TryEmplace(my_map, "Hello", nullptr);
+ EXPECT_THAT(*result.first, Pair("Hello", IsNull()));
+ EXPECT_TRUE(result.second);
+
+ auto new_value = std::make_unique<int>(42);
+ result = TryEmplace(my_map, "Hello", std::move(new_value));
+ EXPECT_THAT(*result.first, Pair("Hello", IsNull()));
+ EXPECT_FALSE(result.second);
+ // |new_value| should not be touched following a failed insertion.
+ ASSERT_NE(nullptr, new_value);
+ EXPECT_EQ(42, *new_value);
+
+ result = TryEmplace(my_map, "World", std::move(new_value));
+ EXPECT_EQ("World", result.first->first);
+ EXPECT_EQ(42, *result.first->second);
+ EXPECT_TRUE(result.second);
+ EXPECT_EQ(nullptr, new_value);
+}
+
+TEST(STLUtilTest, TryEmplaceHint) {
+ std::map<std::string, std::unique_ptr<int>> my_map;
+ auto result = TryEmplace(my_map, my_map.begin(), "Hello", nullptr);
+ EXPECT_THAT(*result, Pair("Hello", IsNull()));
+
+ auto new_value = std::make_unique<int>(42);
+ result = TryEmplace(my_map, result, "Hello", std::move(new_value));
+ EXPECT_THAT(*result, Pair("Hello", IsNull()));
+ // |new_value| should not be touched following a failed insertion.
+ ASSERT_NE(nullptr, new_value);
+ EXPECT_EQ(42, *new_value);
+
+ result = TryEmplace(my_map, result, "World", std::move(new_value));
+ EXPECT_EQ("World", result->first);
+ EXPECT_EQ(42, *result->second);
+ EXPECT_EQ(nullptr, new_value);
+}
+
+TEST(STLUtilTest, TryEmplaceWrongHints) {
+ std::map<int, int> my_map;
+ // Since we emplace keys in sorted order, my_map.begin() will be a wrong hint
+ // after the first iteration. Check that emplacement happens anyway.
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE(i);
+ auto result = TryEmplace(my_map, my_map.begin(), i, i);
+ EXPECT_THAT(*result, Pair(i, i));
+ }
+
+ // Fail to overwrite the keys we just inserted. Since we no longer emplace
+ // into the map, my_map.end() will be a wrong hint for all tried emplacements
+ // but the last.
+ for (int i = 0; i < 10; ++i) {
+ SCOPED_TRACE(10 + i);
+ auto result = TryEmplace(my_map, my_map.end(), i, 10 + i);
+ EXPECT_THAT(*result, Pair(i, i));
+ }
}
TEST(STLUtilTest, OptionalOrNullptr) {
diff --git a/chromium/base/strings/string_number_conversions_fuzzer.cc b/chromium/base/strings/string_number_conversions_fuzzer.cc
index 2fed7de9c55..0088cae324d 100644
--- a/chromium/base/strings/string_number_conversions_fuzzer.cc
+++ b/chromium/base/strings/string_number_conversions_fuzzer.cc
@@ -10,8 +10,59 @@
#include "base/strings/string_number_conversions.h"
+template <class NumberType, class StringPieceType, class StringType>
+void CheckRoundtripsT(const uint8_t* data,
+ const size_t size,
+ StringType (*num_to_string)(NumberType),
+ bool (*string_to_num)(StringPieceType, NumberType*)) {
+ // Ensure we can read a NumberType from |data|
+ if (size < sizeof(NumberType))
+ return;
+ const NumberType v1 = *reinterpret_cast<const NumberType*>(data);
+
+ // Because we started with an arbitrary NumberType value, not an arbitrary
+ // string, we expect that the function |string_to_num| (e.g. StringToInt) will
+ // return true, indicating a perfect conversion.
+ NumberType v2;
+ CHECK(string_to_num(num_to_string(v1), &v2));
+
+ // Given that this was a perfect conversion, we expect the original NumberType
+ // value to equal the newly parsed one.
+ CHECK_EQ(v1, v2);
+}
+
+template <class NumberType>
+void CheckRoundtrips(const uint8_t* data,
+ const size_t size,
+ bool (*string_to_num)(base::StringPiece, NumberType*)) {
+ return CheckRoundtripsT<NumberType, base::StringPiece, std::string>(
+ data, size, &base::NumberToString, string_to_num);
+}
+
+template <class NumberType>
+void CheckRoundtrips16(const uint8_t* data,
+ const size_t size,
+ bool (*string_to_num)(base::StringPiece16,
+ NumberType*)) {
+ return CheckRoundtripsT<NumberType, base::StringPiece16, base::string16>(
+ data, size, &base::NumberToString16, string_to_num);
+}
+
// Entry point for LibFuzzer.
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ // For each instantiation of NumberToString f and its corresponding StringTo*
+ // function g, check that f(g(x)) = x holds for fuzzer-determined values of x.
+ CheckRoundtrips<int>(data, size, &base::StringToInt);
+ CheckRoundtrips16<int>(data, size, &base::StringToInt);
+ CheckRoundtrips<unsigned int>(data, size, &base::StringToUint);
+ CheckRoundtrips16<unsigned int>(data, size, &base::StringToUint);
+ CheckRoundtrips<int64_t>(data, size, &base::StringToInt64);
+ CheckRoundtrips16<int64_t>(data, size, &base::StringToInt64);
+ CheckRoundtrips<uint64_t>(data, size, &base::StringToUint64);
+ CheckRoundtrips16<uint64_t>(data, size, &base::StringToUint64);
+ CheckRoundtrips<size_t>(data, size, &base::StringToSizeT);
+ CheckRoundtrips16<size_t>(data, size, &base::StringToSizeT);
+
base::StringPiece string_piece_input(reinterpret_cast<const char*>(data),
size);
std::string string_input(reinterpret_cast<const char*>(data), size);
diff --git a/chromium/base/synchronization/cancellation_flag.h b/chromium/base/synchronization/cancellation_flag.h
deleted file mode 100644
index 39094e2dc08..00000000000
--- a/chromium/base/synchronization/cancellation_flag.h
+++ /dev/null
@@ -1,20 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
-#define BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
-
-#include "base/synchronization/atomic_flag.h"
-
-namespace base {
-
-// Use inheritance instead of "using" to allow forward declaration of "class
-// CancellationFlag".
-// TODO(fdoray): Replace CancellationFlag with AtomicFlag throughout the
-// codebase and delete this file. crbug.com/630251
-class CancellationFlag : public AtomicFlag {};
-
-} // namespace base
-
-#endif // BASE_SYNCHRONIZATION_CANCELLATION_FLAG_H_
diff --git a/chromium/base/syslog_logging.cc b/chromium/base/syslog_logging.cc
index 6d139b70986..cb9e26f6dfe 100644
--- a/chromium/base/syslog_logging.cc
+++ b/chromium/base/syslog_logging.cc
@@ -6,9 +6,11 @@
#if defined(OS_WIN)
#include <windows.h>
+#include <sddl.h>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/debug/stack_trace.h"
+#include "base/win/win_util.h"
#elif defined(OS_LINUX)
// <syslog.h> defines LOG_INFO, LOG_WARNING macros that could conflict with
// base::LOG_INFO, base::LOG_WARNING.
@@ -29,6 +31,7 @@ namespace {
std::string* g_event_source_name = nullptr;
uint16_t g_category = 0;
uint32_t g_event_id = 0;
+base::string16* g_user_sid = nullptr;
} // namespace
@@ -39,11 +42,16 @@ void SetEventSource(const std::string& name,
g_event_source_name = new std::string(name);
g_category = category;
g_event_id = event_id;
+ DCHECK_EQ(nullptr, g_user_sid);
+ g_user_sid = new base::string16();
+ base::win::GetUserSidString(g_user_sid);
}
void ResetEventSourceForTesting() {
delete g_event_source_name;
g_event_source_name = nullptr;
+ delete g_user_sid;
+ g_user_sid = nullptr;
}
#endif // defined(OS_WIN)
@@ -90,10 +98,18 @@ EventLogMessage::~EventLogMessage() {
break;
}
LPCSTR strings[1] = {message.data()};
- if (!ReportEventA(event_log_handle, log_type, g_category, g_event_id, nullptr,
- 1, 0, strings, nullptr)) {
+ PSID user_sid = nullptr;
+ if (!::ConvertStringSidToSid(g_user_sid->c_str(), &user_sid)) {
+ stream() << " !!ERROR GETTING USER SID!!";
+ }
+
+ if (!ReportEventA(event_log_handle, log_type, g_category, g_event_id,
+ user_sid, 1, 0, strings, nullptr)) {
stream() << " !!NOT ADDED TO EVENTLOG!!";
}
+
+ if (user_sid != nullptr)
+ ::LocalFree(user_sid);
#elif defined(OS_LINUX)
const char kEventSource[] = "chrome";
openlog(kEventSource, LOG_NOWAIT | LOG_PID, LOG_USER);
diff --git a/chromium/base/system/sys_info.cc b/chromium/base/system/sys_info.cc
index 5328e0002c8..64e6a8a0a7c 100644
--- a/chromium/base/system/sys_info.cc
+++ b/chromium/base/system/sys_info.cc
@@ -11,10 +11,10 @@
#include "base/callback.h"
#include "base/command_line.h"
#include "base/feature_list.h"
-#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/metrics/field_trial_params.h"
+#include "base/no_destructor.h"
#include "base/system/sys_info_internal.h"
#include "base/task/post_task.h"
#include "base/task/task_traits.h"
@@ -89,12 +89,12 @@ bool DetectLowEndDevice() {
return (ram_size_mb > 0 && ram_size_mb <= GetLowMemoryDeviceThresholdMB());
}
-static LazyInstance<internal::LazySysInfoValue<bool, DetectLowEndDevice>>::Leaky
- g_lazy_low_end_device = LAZY_INSTANCE_INITIALIZER;
-
// static
bool SysInfo::IsLowEndDeviceImpl() {
- return g_lazy_low_end_device.Get().value();
+ static base::NoDestructor<
+ internal::LazySysInfoValue<bool, DetectLowEndDevice>>
+ instance;
+ return instance->value();
}
#endif
@@ -107,15 +107,15 @@ std::string SysInfo::HardwareModelName() {
void SysInfo::GetHardwareInfo(base::OnceCallback<void(HardwareInfo)> callback) {
#if defined(OS_WIN)
base::PostTaskAndReplyWithResult(
- base::CreateCOMSTATaskRunnerWithTraits({}).get(), FROM_HERE,
+ base::CreateCOMSTATaskRunner({}).get(), FROM_HERE,
base::BindOnce(&GetHardwareInfoSync), std::move(callback));
#elif defined(OS_ANDROID) || defined(OS_MACOSX)
base::PostTaskAndReplyWithResult(
FROM_HERE, base::BindOnce(&GetHardwareInfoSync), std::move(callback));
#elif defined(OS_LINUX)
- base::PostTaskWithTraitsAndReplyWithResult(
- FROM_HERE, {base::MayBlock()}, base::BindOnce(&GetHardwareInfoSync),
- std::move(callback));
+ base::PostTaskAndReplyWithResult(FROM_HERE, {ThreadPool(), base::MayBlock()},
+ base::BindOnce(&GetHardwareInfoSync),
+ std::move(callback));
#else
NOTIMPLEMENTED();
base::PostTask(FROM_HERE,
diff --git a/chromium/base/system/sys_info_fuchsia.cc b/chromium/base/system/sys_info_fuchsia.cc
index 51a2dd6d767..7a6a8a72b87 100644
--- a/chromium/base/system/sys_info_fuchsia.cc
+++ b/chromium/base/system/sys_info_fuchsia.cc
@@ -6,7 +6,10 @@
#include <zircon/syscalls.h>
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/logging.h"
+#include "base/threading/scoped_blocking_call.h"
+#include "build/build_config.h"
namespace base {
@@ -17,8 +20,8 @@ int64_t SysInfo::AmountOfPhysicalMemoryImpl() {
// static
int64_t SysInfo::AmountOfAvailablePhysicalMemoryImpl() {
- // TODO(fuchsia): https://crbug.com/706592 This is not exposed.
- NOTREACHED();
+ // TODO(https://crbug.com/986608): Implement this.
+ NOTIMPLEMENTED_LOG_ONCE();
return 0;
}
@@ -32,4 +35,62 @@ int64_t SysInfo::AmountOfVirtualMemory() {
return 0;
}
+// static
+std::string SysInfo::OperatingSystemName() {
+ return "Fuchsia";
+}
+
+// static
+int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::MAY_BLOCK);
+ NOTIMPLEMENTED_LOG_ONCE();
+ return -1;
+}
+
+// static
+int64_t SysInfo::AmountOfTotalDiskSpace(const FilePath& path) {
+ base::ScopedBlockingCall scoped_blocking_call(FROM_HERE,
+ base::BlockingType::MAY_BLOCK);
+ NOTIMPLEMENTED_LOG_ONCE();
+ return -1;
+}
+
+// static
+std::string SysInfo::OperatingSystemVersion() {
+ char result[64] = {};
+ zx_status_t status = zx_system_get_version(result, sizeof(result));
+ if (status != ZX_OK) {
+ ZX_DLOG(WARNING, status) << "zx_system_get_version";
+ return std::string();
+ }
+ return result;
+}
+
+// static
+void SysInfo::OperatingSystemVersionNumbers(int32_t* major_version,
+ int32_t* minor_version,
+ int32_t* bugfix_version) {
+ // Fuchsia doesn't have OS version numbers.
+ *major_version = 0;
+ *minor_version = 0;
+ *bugfix_version = 0;
+}
+
+// static
+std::string SysInfo::OperatingSystemArchitecture() {
+#if defined(ARCH_CPU_X86_64)
+ return "x86_64";
+#elif defined(ARCH_CPU_ARM64)
+ return "aarch64";
+#else
+#error Unsupported architecture.
+#endif
+}
+
+// static
+size_t SysInfo::VMAllocationGranularity() {
+ return getpagesize();
+}
+
} // namespace base
diff --git a/chromium/base/system/sys_info_posix.cc b/chromium/base/system/sys_info_posix.cc
index 97003954171..58c0c4f63c3 100644
--- a/chromium/base/system/sys_info_posix.cc
+++ b/chromium/base/system/sys_info_posix.cc
@@ -9,6 +9,7 @@
#include <stdint.h>
#include <string.h>
#include <sys/param.h>
+#include <sys/resource.h>
#include <sys/utsname.h>
#include <unistd.h>
@@ -20,10 +21,6 @@
#include "base/threading/scoped_blocking_call.h"
#include "build/build_config.h"
-#if !defined(OS_FUCHSIA)
-#include <sys/resource.h>
-#endif
-
#if defined(OS_ANDROID)
#include <sys/vfs.h>
#define statvfs statfs // Android uses a statvfs-like statfs struct and call.
@@ -38,7 +35,7 @@
namespace {
-#if !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+#if !defined(OS_OPENBSD)
int NumberOfProcessors() {
// sysconf returns the number of "logical" (not "physical") processors on both
// Mac and Linux. So we get the number of max available "logical" processors.
@@ -64,9 +61,8 @@ int NumberOfProcessors() {
base::LazyInstance<base::internal::LazySysInfoValue<int, NumberOfProcessors>>::
Leaky g_lazy_number_of_processors = LAZY_INSTANCE_INITIALIZER;
-#endif // !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+#endif // !defined(OS_OPENBSD)
-#if !defined(OS_FUCHSIA)
int64_t AmountOfVirtualMemory() {
struct rlimit limit;
int result = getrlimit(RLIMIT_DATA, &limit);
@@ -80,7 +76,6 @@ int64_t AmountOfVirtualMemory() {
base::LazyInstance<
base::internal::LazySysInfoValue<int64_t, AmountOfVirtualMemory>>::Leaky
g_lazy_virtual_memory = LAZY_INSTANCE_INITIALIZER;
-#endif // !defined(OS_FUCHSIA)
#if defined(OS_LINUX)
bool IsStatsZeroIfUnlimited(const base::FilePath& path) {
@@ -97,7 +92,7 @@ bool IsStatsZeroIfUnlimited(const base::FilePath& path) {
}
return false;
}
-#endif
+#endif // defined(OS_LINUX)
bool GetDiskSpaceInfo(const base::FilePath& path,
int64_t* available_bytes,
@@ -132,18 +127,16 @@ bool GetDiskSpaceInfo(const base::FilePath& path,
namespace base {
-#if !defined(OS_OPENBSD) && !defined(OS_FUCHSIA)
+#if !defined(OS_OPENBSD)
int SysInfo::NumberOfProcessors() {
return g_lazy_number_of_processors.Get().value();
}
-#endif
+#endif // !defined(OS_OPENBSD)
-#if !defined(OS_FUCHSIA)
// static
int64_t SysInfo::AmountOfVirtualMemory() {
return g_lazy_virtual_memory.Get().value();
}
-#endif
// static
int64_t SysInfo::AmountOfFreeDiskSpace(const FilePath& path) {
@@ -177,7 +170,7 @@ std::string SysInfo::OperatingSystemName() {
}
return std::string(info.sysname);
}
-#endif
+#endif //! defined(OS_MACOSX) && !defined(OS_ANDROID)
#if !defined(OS_MACOSX) && !defined(OS_ANDROID) && !defined(OS_CHROMEOS)
// static
diff --git a/chromium/base/system/sys_info_unittest.cc b/chromium/base/system/sys_info_unittest.cc
index e4e3bee2d36..80efb99d938 100644
--- a/chromium/base/system/sys_info_unittest.cc
+++ b/chromium/base/system/sys_info_unittest.cc
@@ -115,7 +115,8 @@ TEST_F(SysInfoTest, MAYBE_AmountOfTotalDiskSpace) {
EXPECT_GT(SysInfo::AmountOfTotalDiskSpace(tmp_path), 0) << tmp_path.value();
}
-#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
+#if defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX) || \
+ defined(OS_FUCHSIA)
TEST_F(SysInfoTest, OperatingSystemVersionNumbers) {
int32_t os_major_version = -1;
int32_t os_minor_version = -1;
diff --git a/chromium/base/task/OWNERS b/chromium/base/task/OWNERS
index 42f0e57dc18..75024e6bffb 100644
--- a/chromium/base/task/OWNERS
+++ b/chromium/base/task/OWNERS
@@ -3,6 +3,7 @@ gab@chromium.org
robliao@chromium.org
alexclarke@chromium.org
altimin@chromium.org
+carlscab@google.com
skyosti@lchromium.org
# TEAM: scheduler-dev@chromium.org
diff --git a/chromium/base/task/README.md b/chromium/base/task/README.md
index a3710c54f2f..e437e4809c9 100644
--- a/chromium/base/task/README.md
+++ b/chromium/base/task/README.md
@@ -11,3 +11,4 @@ Documentation:
* [Threading and tasks](/docs/threading_and_tasks.md)
* [Callbacks](/docs/callback.md)
+* [Vision for future API changes](https://docs.google.com/document/d/1pySz2xeJ6kLlbzDnS2jqAC1F8T_6pLEV8pgaMfURXAw/edit)
diff --git a/chromium/base/task/common/task_annotator_unittest.cc b/chromium/base/task/common/task_annotator_unittest.cc
index 43c5a575752..de8aa17ab2d 100644
--- a/chromium/base/task/common/task_annotator_unittest.cc
+++ b/chromium/base/task/common/task_annotator_unittest.cc
@@ -218,9 +218,9 @@ TEST_F(TaskAnnotatorBacktraceIntegrationTest, MultipleThreads) {
// SingleThreadTaskRunner) to verify that TaskAnnotator can capture backtraces
// for PostTasks back-and-forth between these.
auto main_thread_a = ThreadTaskRunnerHandle::Get();
- auto task_runner_b = CreateSingleThreadTaskRunnerWithTraits({});
- auto task_runner_c = CreateSequencedTaskRunnerWithTraits(
- {base::MayBlock(), base::WithBaseSyncPrimitives()});
+ auto task_runner_b = CreateSingleThreadTaskRunner({ThreadPool()});
+ auto task_runner_c = CreateSequencedTaskRunner(
+ {ThreadPool(), base::MayBlock(), base::WithBaseSyncPrimitives()});
const Location& location_a0 = FROM_HERE;
const Location& location_a1 = FROM_HERE;
diff --git a/chromium/base/task/lazy_task_runner.cc b/chromium/base/task/lazy_task_runner.cc
index fc80c8e7a10..883da7c79ec 100644
--- a/chromium/base/task/lazy_task_runner.cc
+++ b/chromium/base/task/lazy_task_runner.cc
@@ -6,7 +6,7 @@
#include <utility>
-#include "base/lazy_instance.h"
+#include "base/lazy_instance_helpers.h"
#include "base/logging.h"
#include "base/task/post_task.h"
@@ -45,20 +45,20 @@ LazyTaskRunner<SequencedTaskRunner, false>::Create() {
// LazySequencedTaskRunner.
DCHECK_EQ(thread_mode_, SingleThreadTaskRunnerThreadMode::SHARED);
- return CreateSequencedTaskRunnerWithTraits(traits_);
+ return CreateSequencedTaskRunner(traits_);
}
template <>
scoped_refptr<SingleThreadTaskRunner>
LazyTaskRunner<SingleThreadTaskRunner, false>::Create() {
- return CreateSingleThreadTaskRunnerWithTraits(traits_, thread_mode_);
+ return CreateSingleThreadTaskRunner(traits_, thread_mode_);
}
#if defined(OS_WIN)
template <>
scoped_refptr<SingleThreadTaskRunner>
LazyTaskRunner<SingleThreadTaskRunner, true>::Create() {
- return CreateCOMSTATaskRunnerWithTraits(traits_, thread_mode_);
+ return CreateCOMSTATaskRunner(traits_, thread_mode_);
}
#endif
diff --git a/chromium/base/task/lazy_task_runner.h b/chromium/base/task/lazy_task_runner.h
index 68d19a6877e..fb1c430ef4f 100644
--- a/chromium/base/task/lazy_task_runner.h
+++ b/chromium/base/task/lazy_task_runner.h
@@ -10,7 +10,6 @@
#include "base/atomicops.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "base/lazy_instance_helpers.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/checked_lock.h"
@@ -32,15 +31,15 @@
//
// IMPORTANT: Only use this API as a last resort. Prefer storing a
// (Sequenced|SingleThread)TaskRunner returned by
-// base::Create(Sequenced|SingleThread|COMSTA)TaskRunnerWithTraits() as a member
-// on an object accessible by all PostTask() call sites.
+// base::Create(Sequenced|SingleThread|COMSTA)TaskRunner() as a member on an
+// object accessible by all PostTask() call sites.
//
// Example usage 1:
//
// namespace {
// base::LazySequencedTaskRunner g_sequenced_task_runner =
// LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(
-// base::TaskTraits(base::MayBlock(),
+// base::TaskTraits(base::ThreadPool(), base::MayBlock(),
// base::TaskPriority::USER_VISIBLE));
// } // namespace
//
@@ -54,7 +53,8 @@
//
// namespace {
// base::LazySequencedTaskRunner g_sequenced_task_task_runner =
-// LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({base::MayBlock()});
+// LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(
+// base::TaskTraits(base::ThreadPool(), base::MayBlock()));
// } // namespace
//
// // Code from different files can access the SequencedTaskRunner via this
diff --git a/chromium/base/task/lazy_task_runner_unittest.cc b/chromium/base/task/lazy_task_runner_unittest.cc
index 98619dd7b72..dae11ea4f47 100644
--- a/chromium/base/task/lazy_task_runner_unittest.cc
+++ b/chromium/base/task/lazy_task_runner_unittest.cc
@@ -22,27 +22,29 @@ namespace base {
namespace {
LazySequencedTaskRunner g_sequenced_task_runner_user_visible =
- LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({TaskPriority::USER_VISIBLE});
+ LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(
+ TaskTraits(ThreadPool(), TaskPriority::USER_VISIBLE));
LazySequencedTaskRunner g_sequenced_task_runner_user_blocking =
- LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER({TaskPriority::USER_BLOCKING});
+ LAZY_SEQUENCED_TASK_RUNNER_INITIALIZER(
+ TaskTraits(ThreadPool(), TaskPriority::USER_BLOCKING));
LazySingleThreadTaskRunner g_single_thread_task_runner_user_visible =
LAZY_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(
- {TaskPriority::USER_VISIBLE},
+ TaskTraits(ThreadPool(), TaskPriority::USER_VISIBLE),
SingleThreadTaskRunnerThreadMode::SHARED);
LazySingleThreadTaskRunner g_single_thread_task_runner_user_blocking =
LAZY_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(
- {TaskPriority::USER_BLOCKING},
+ TaskTraits(ThreadPool(), TaskPriority::USER_BLOCKING),
SingleThreadTaskRunnerThreadMode::SHARED);
#if defined(OS_WIN)
LazyCOMSTATaskRunner g_com_sta_task_runner_user_visible =
LAZY_COM_STA_TASK_RUNNER_INITIALIZER(
- {TaskPriority::USER_VISIBLE},
+ TaskTraits(ThreadPool(), TaskPriority::USER_VISIBLE),
SingleThreadTaskRunnerThreadMode::SHARED);
LazyCOMSTATaskRunner g_com_sta_task_runner_user_blocking =
LAZY_COM_STA_TASK_RUNNER_INITIALIZER(
- {TaskPriority::USER_BLOCKING},
+ TaskTraits(ThreadPool(), TaskPriority::USER_BLOCKING),
SingleThreadTaskRunnerThreadMode::SHARED);
#endif // defined(OS_WIN)
diff --git a/chromium/base/task/post_task.cc b/chromium/base/task/post_task.cc
index aef08cc4586..ada4a00c7ab 100644
--- a/chromium/base/task/post_task.cc
+++ b/chromium/base/task/post_task.cc
@@ -25,7 +25,7 @@ class PostTaskAndReplyWithTraitsTaskRunner
private:
bool PostTask(const Location& from_here, OnceClosure task) override {
- PostTaskWithTraits(from_here, traits_, std::move(task));
+ ::base::PostTask(from_here, traits_, std::move(task));
return true;
}
@@ -61,88 +61,129 @@ bool PostTask(const Location& from_here, OnceClosure task) {
bool PostDelayedTask(const Location& from_here,
OnceClosure task,
TimeDelta delay) {
- return PostDelayedTaskWithTraits(from_here, TaskTraits(), std::move(task),
- delay);
+ return PostDelayedTask(from_here, {ThreadPool()}, std::move(task), delay);
}
bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply) {
- return PostTaskWithTraitsAndReply(from_here, TaskTraits(), std::move(task),
- std::move(reply));
+ return PostTaskAndReply(from_here, {ThreadPool()}, std::move(task),
+ std::move(reply));
}
-bool PostTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task) {
- return PostDelayedTaskWithTraits(from_here, traits, std::move(task),
- TimeDelta());
+bool PostTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task) {
+ return PostDelayedTask(from_here, traits, std::move(task), TimeDelta());
}
-bool PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay) {
+bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) {
const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
return GetTaskExecutorForTraits(adjusted_traits)
- ->PostDelayedTaskWithTraits(from_here, adjusted_traits, std::move(task),
- delay);
+ ->PostDelayedTask(from_here, adjusted_traits, std::move(task), delay);
}
-bool PostTaskWithTraitsAndReply(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- OnceClosure reply) {
+bool PostTaskAndReply(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ OnceClosure reply) {
return PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
-scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(const TaskTraits& traits) {
- const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
- return GetTaskExecutorForTraits(adjusted_traits)
- ->CreateTaskRunnerWithTraits(adjusted_traits);
+scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits) {
+ return GetTaskExecutorForTraits(traits)->CreateTaskRunner(traits);
}
-scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits) {
- const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
- return GetTaskExecutorForTraits(adjusted_traits)
- ->CreateSequencedTaskRunnerWithTraits(adjusted_traits);
+ return GetTaskExecutorForTraits(traits)->CreateSequencedTaskRunner(traits);
}
scoped_refptr<UpdateableSequencedTaskRunner>
-CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits) {
+CreateUpdateableSequencedTaskRunner(const TaskTraits& traits) {
DCHECK(ThreadPoolInstance::Get())
<< "Ref. Prerequisite section of post_task.h.\n\n"
"Hint: if this is in a unit test, you're likely merely missing a "
"base::test::ScopedTaskEnvironment member in your fixture.\n";
DCHECK(traits.use_thread_pool())
<< "The base::UseThreadPool() trait is mandatory with "
- "CreateUpdateableSequencedTaskRunnerWithTraits().";
+ "CreateUpdateableSequencedTaskRunner().";
CHECK_EQ(traits.extension_id(),
TaskTraitsExtensionStorage::kInvalidExtensionId)
<< "Extension traits cannot be used with "
- "CreateUpdateableSequencedTaskRunnerWithTraits().";
+ "CreateUpdateableSequencedTaskRunner().";
const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
return static_cast<internal::ThreadPoolImpl*>(ThreadPoolInstance::Get())
- ->CreateUpdateableSequencedTaskRunnerWithTraits(adjusted_traits);
+ ->CreateUpdateableSequencedTaskRunner(adjusted_traits);
+}
+
+scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return GetTaskExecutorForTraits(traits)->CreateSingleThreadTaskRunner(
+ traits, thread_mode);
+}
+
+#if defined(OS_WIN)
+scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode) {
+ return GetTaskExecutorForTraits(traits)->CreateCOMSTATaskRunner(traits,
+ thread_mode);
+}
+#endif // defined(OS_WIN)
+
+// TODO(crbug.com/968047): Update all call sites and remove these forwarding
+// wrappers.
+bool PostTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task) {
+ return PostTask(from_here, traits, std::move(task));
+}
+
+bool PostDelayedTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) {
+ return PostDelayedTask(from_here, traits, std::move(task), delay);
+}
+
+bool PostTaskWithTraitsAndReply(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ OnceClosure reply) {
+ return PostTaskAndReply(from_here, traits, std::move(task), std::move(reply));
+}
+
+scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(const TaskTraits& traits) {
+ return CreateTaskRunner(traits);
+}
+
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return CreateSequencedTaskRunner(traits);
+}
+
+scoped_refptr<UpdateableSequencedTaskRunner>
+CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits) {
+ return CreateUpdateableSequencedTaskRunner(traits);
}
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
- return GetTaskExecutorForTraits(adjusted_traits)
- ->CreateSingleThreadTaskRunnerWithTraits(adjusted_traits, thread_mode);
+ return CreateSingleThreadTaskRunner(traits, thread_mode);
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- const TaskTraits adjusted_traits = GetTaskTraitsWithExplicitPriority(traits);
- return GetTaskExecutorForTraits(adjusted_traits)
- ->CreateCOMSTATaskRunnerWithTraits(adjusted_traits, thread_mode);
+ return CreateCOMSTATaskRunner(traits, thread_mode);
}
#endif // defined(OS_WIN)
diff --git a/chromium/base/task/post_task.h b/chromium/base/task/post_task.h
index 3f6c18a76f5..09bd82c02cd 100644
--- a/chromium/base/task/post_task.h
+++ b/chromium/base/task/post_task.h
@@ -32,21 +32,21 @@ namespace base {
// PostTask(FROM_HERE, BindOnce(...));
//
// To post a high priority one-off task to respond to a user interaction:
-// PostTaskWithTraits(
+// PostTask(
// FROM_HERE,
-// {TaskPriority::USER_BLOCKING},
+// {ThreadPool(), TaskPriority::USER_BLOCKING},
// BindOnce(...));
//
// To post tasks that must run in sequence with default traits:
// scoped_refptr<SequencedTaskRunner> task_runner =
-// CreateSequencedTaskRunnerWithTraits(TaskTraits());
+// CreateSequencedTaskRunner({ThreadPool()});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
//
// To post tasks that may block, must run in sequence and can be skipped on
// shutdown:
// scoped_refptr<SequencedTaskRunner> task_runner =
-// CreateSequencedTaskRunnerWithTraits(
+// CreateSequencedTaskRunner(
// {MayBlock(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
@@ -78,24 +78,27 @@ namespace base {
// have to worry about this. You will encounter DCHECKs or nullptr dereferences
// if this is violated. For tests, prefer base::test::ScopedTaskEnvironment.
-// Equivalent to calling PostTaskWithTraits with default TaskTraits.
+// Equivalent to calling PostTask with default TaskTraits.
BASE_EXPORT bool PostTask(const Location& from_here, OnceClosure task);
+inline bool PostTask(OnceClosure task,
+ const Location& from_here = Location::Current()) {
+ return PostTask(from_here, std::move(task));
+}
-// Equivalent to calling PostDelayedTaskWithTraits with default TaskTraits.
+// Equivalent to calling PostDelayedTask with default TaskTraits.
//
-// Use PostDelayedTaskWithTraits to specify a BEST_EFFORT priority if the task
-// doesn't have to run as soon as |delay| expires.
+// Use PostDelayedTask to specify a BEST_EFFORT priority if the task doesn't
+// have to run as soon as |delay| expires.
BASE_EXPORT bool PostDelayedTask(const Location& from_here,
OnceClosure task,
TimeDelta delay);
-// Equivalent to calling PostTaskWithTraitsAndReply with default TaskTraits.
+// Equivalent to calling PostTaskAndReply with default TaskTraits.
BASE_EXPORT bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply);
-// Equivalent to calling PostTaskWithTraitsAndReplyWithResult with default
-// TaskTraits.
+// Equivalent to calling PostTaskAndReplyWithResult with default TaskTraits.
//
// Though RepeatingCallback is convertible to OnceCallback, we need a
// CallbackType template since we can not use template deduction and object
@@ -109,15 +112,15 @@ template <template <typename> class CallbackType,
bool PostTaskAndReplyWithResult(const Location& from_here,
CallbackType<TaskReturnType()> task,
CallbackType<void(ReplyArgType)> reply) {
- return PostTaskWithTraitsAndReplyWithResult(
- from_here, TaskTraits(), std::move(task), std::move(reply));
+ return PostTaskAndReplyWithResult(from_here, {ThreadPool()}, std::move(task),
+ std::move(reply));
}
// Posts |task| with specific |traits|. Returns false if the task definitely
// won't run because of current shutdown state.
-BASE_EXPORT bool PostTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task);
+BASE_EXPORT bool PostTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task);
// Posts |task| with specific |traits|. |task| will not run before |delay|
// expires. Returns false if the task definitely won't run because of current
@@ -125,20 +128,20 @@ BASE_EXPORT bool PostTaskWithTraits(const Location& from_here,
//
// Specify a BEST_EFFORT priority via |traits| if the task doesn't have to run
// as soon as |delay| expires.
-BASE_EXPORT bool PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay);
+BASE_EXPORT bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay);
// Posts |task| with specific |traits| and posts |reply| on the caller's
// execution context (i.e. same sequence or thread and same TaskTraits if
// applicable) when |task| completes. Returns false if the task definitely won't
// run because of current shutdown state. Can only be called when
// SequencedTaskRunnerHandle::IsSet().
-BASE_EXPORT bool PostTaskWithTraitsAndReply(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- OnceClosure reply);
+BASE_EXPORT bool PostTaskAndReply(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ OnceClosure reply);
// Posts |task| with specific |traits| and posts |reply| with the return value
// of |task| as argument on the caller's execution context (i.e. same sequence
@@ -155,13 +158,12 @@ template <template <typename> class CallbackType,
typename TaskReturnType,
typename ReplyArgType,
typename = EnableIfIsBaseCallback<CallbackType>>
-bool PostTaskWithTraitsAndReplyWithResult(
- const Location& from_here,
- const TaskTraits& traits,
- CallbackType<TaskReturnType()> task,
- CallbackType<void(ReplyArgType)> reply) {
+bool PostTaskAndReplyWithResult(const Location& from_here,
+ const TaskTraits& traits,
+ CallbackType<TaskReturnType()> task,
+ CallbackType<void(ReplyArgType)> reply) {
auto* result = new std::unique_ptr<TaskReturnType>();
- return PostTaskWithTraitsAndReply(
+ return PostTaskAndReply(
from_here, traits,
BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>, std::move(task),
result),
@@ -169,15 +171,30 @@ bool PostTaskWithTraitsAndReplyWithResult(
std::move(reply), Owned(result)));
}
+// Temporary wrapper for PostTaskAndReplyWithResult.
+// TODO(crbug.com/968047): Update all call sites and remove.
+template <template <typename> class CallbackType,
+ typename TaskReturnType,
+ typename ReplyArgType,
+ typename = EnableIfIsBaseCallback<CallbackType>>
+bool PostTaskWithTraitsAndReplyWithResult(
+ const Location& from_here,
+ const TaskTraits& traits,
+ CallbackType<TaskReturnType()> task,
+ CallbackType<void(ReplyArgType)> reply) {
+ return PostTaskAndReplyWithResult(from_here, traits, std::move(task),
+ std::move(reply));
+}
+
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
-BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits);
// Returns a SequencedTaskRunner whose PostTask invocations result in scheduling
// tasks using |traits|. Tasks run one at a time in posting order.
-BASE_EXPORT scoped_refptr<SequencedTaskRunner>
-CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+BASE_EXPORT scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
+ const TaskTraits& traits);
// Returns a task runner whose PostTask invocations result in scheduling tasks
// using |traits|. The priority in |traits| can be updated at any time via
@@ -191,7 +208,7 @@ CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
// - base::ThreadPolicy must be specified if the priority of the task runner
// will ever be increased from BEST_EFFORT.
BASE_EXPORT scoped_refptr<UpdateableSequencedTaskRunner>
-CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+CreateUpdateableSequencedTaskRunner(const TaskTraits& traits);
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| on a thread determined by |thread_mode|. See
@@ -202,12 +219,11 @@ CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits);
//
// If all you need is to make sure that tasks don't run concurrently (e.g.
// because they access a data structure which is not thread-safe), use
-// CreateSequencedTaskRunnerWithTraits(). Only use this if you rely on a thread-
-// affine API (it might be safer to assume thread-affinity when dealing with
+// CreateSequencedTaskRunner(). Only use this if you rely on a thread-affine API
+// (it might be safer to assume thread-affinity when dealing with
// under-documented third-party APIs, e.g. other OS') or share data across tasks
// using thread-local storage.
-BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
-CreateSingleThreadTaskRunnerWithTraits(
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED);
@@ -225,6 +241,38 @@ CreateSingleThreadTaskRunnerWithTraits(
// implementation is free to share apartments or create new apartments as
// necessary. In either case, care should be taken to make sure COM pointers are
// not smuggled across apartments.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode =
+ SingleThreadTaskRunnerThreadMode::SHARED);
+#endif // defined(OS_WIN)
+
+// Temporary wrappers for the task posting APIs while we remove the "WithTraits"
+// suffix.
+// TODO(crbug.com/968047): Update all call sites and remove.
+BASE_EXPORT bool PostTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task);
+BASE_EXPORT bool PostDelayedTaskWithTraits(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay);
+BASE_EXPORT bool PostTaskWithTraitsAndReply(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ OnceClosure reply);
+BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits);
+BASE_EXPORT scoped_refptr<SequencedTaskRunner>
+CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+BASE_EXPORT scoped_refptr<UpdateableSequencedTaskRunner>
+CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
+CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits,
+ SingleThreadTaskRunnerThreadMode thread_mode =
+ SingleThreadTaskRunnerThreadMode::SHARED);
+#if defined(OS_WIN)
BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
CreateCOMSTATaskRunnerWithTraits(const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
diff --git a/chromium/base/task/post_task_unittest.cc b/chromium/base/task/post_task_unittest.cc
index b3274c4cac5..35d1f0dce50 100644
--- a/chromium/base/task/post_task_unittest.cc
+++ b/chromium/base/task/post_task_unittest.cc
@@ -26,47 +26,44 @@ namespace {
class MockTaskExecutor : public TaskExecutor {
public:
MockTaskExecutor() {
- ON_CALL(*this, PostDelayedTaskWithTraitsMock(_, _, _, _))
+ ON_CALL(*this, PostDelayedTaskMock(_, _, _, _))
.WillByDefault(Invoke([this](const Location& from_here,
const TaskTraits& traits,
OnceClosure& task, TimeDelta delay) {
return runner_->PostDelayedTask(from_here, std::move(task), delay);
}));
- ON_CALL(*this, CreateTaskRunnerWithTraits(_))
- .WillByDefault(Return(runner_));
- ON_CALL(*this, CreateSequencedTaskRunnerWithTraits(_))
- .WillByDefault(Return(runner_));
- ON_CALL(*this, CreateSingleThreadTaskRunnerWithTraits(_, _))
+ ON_CALL(*this, CreateTaskRunner(_)).WillByDefault(Return(runner_));
+ ON_CALL(*this, CreateSequencedTaskRunner(_)).WillByDefault(Return(runner_));
+ ON_CALL(*this, CreateSingleThreadTaskRunner(_, _))
.WillByDefault(Return(runner_));
#if defined(OS_WIN)
- ON_CALL(*this, CreateCOMSTATaskRunnerWithTraits(_, _))
- .WillByDefault(Return(runner_));
+ ON_CALL(*this, CreateCOMSTATaskRunner(_, _)).WillByDefault(Return(runner_));
#endif // defined(OS_WIN)
}
// TaskExecutor:
// Helper because gmock doesn't support move-only types.
- bool PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay) override {
- return PostDelayedTaskWithTraitsMock(from_here, traits, task, delay);
+ bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) override {
+ return PostDelayedTaskMock(from_here, traits, task, delay);
}
- MOCK_METHOD4(PostDelayedTaskWithTraitsMock,
+ MOCK_METHOD4(PostDelayedTaskMock,
bool(const Location& from_here,
const TaskTraits& traits,
OnceClosure& task,
TimeDelta delay));
- MOCK_METHOD1(CreateTaskRunnerWithTraits,
+ MOCK_METHOD1(CreateTaskRunner,
scoped_refptr<TaskRunner>(const TaskTraits& traits));
- MOCK_METHOD1(CreateSequencedTaskRunnerWithTraits,
+ MOCK_METHOD1(CreateSequencedTaskRunner,
scoped_refptr<SequencedTaskRunner>(const TaskTraits& traits));
- MOCK_METHOD2(CreateSingleThreadTaskRunnerWithTraits,
+ MOCK_METHOD2(CreateSingleThreadTaskRunner,
scoped_refptr<SingleThreadTaskRunner>(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode));
#if defined(OS_WIN)
- MOCK_METHOD2(CreateCOMSTATaskRunnerWithTraits,
+ MOCK_METHOD2(CreateCOMSTATaskRunner,
scoped_refptr<SingleThreadTaskRunner>(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode));
@@ -103,34 +100,33 @@ TEST_F(PostTaskTestWithExecutor, PostTaskToThreadPool) {
EXPECT_TRUE(PostTask(FROM_HERE, DoNothing()));
EXPECT_FALSE(executor_.runner()->HasPendingTask());
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, {MayBlock()}, DoNothing()));
+ EXPECT_TRUE(PostTask(FROM_HERE, {ThreadPool(), MayBlock()}, DoNothing()));
EXPECT_FALSE(executor_.runner()->HasPendingTask());
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, {ThreadPool()}, DoNothing()));
+ EXPECT_TRUE(PostTask(FROM_HERE, {ThreadPool()}, DoNothing()));
EXPECT_FALSE(executor_.runner()->HasPendingTask());
// Task runners without extension should not be the executor's.
- auto task_runner = CreateTaskRunnerWithTraits({});
+ auto task_runner = CreateTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), task_runner);
- auto sequenced_task_runner = CreateSequencedTaskRunnerWithTraits({});
+ auto sequenced_task_runner = CreateSequencedTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), sequenced_task_runner);
- auto single_thread_task_runner = CreateSingleThreadTaskRunnerWithTraits({});
+ auto single_thread_task_runner = CreateSingleThreadTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), single_thread_task_runner);
#if defined(OS_WIN)
- auto comsta_task_runner = CreateCOMSTATaskRunnerWithTraits({});
+ auto comsta_task_runner = CreateCOMSTATaskRunner({});
EXPECT_NE(executor_.runner(), comsta_task_runner);
#endif // defined(OS_WIN)
// Thread pool task runners should not be the executor's.
- task_runner = CreateTaskRunnerWithTraits({ThreadPool()});
+ task_runner = CreateTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), task_runner);
- sequenced_task_runner = CreateSequencedTaskRunnerWithTraits({ThreadPool()});
+ sequenced_task_runner = CreateSequencedTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), sequenced_task_runner);
- single_thread_task_runner =
- CreateSingleThreadTaskRunnerWithTraits({ThreadPool()});
+ single_thread_task_runner = CreateSingleThreadTaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), single_thread_task_runner);
#if defined(OS_WIN)
- comsta_task_runner = CreateCOMSTATaskRunnerWithTraits({ThreadPool()});
+ comsta_task_runner = CreateCOMSTATaskRunner({ThreadPool()});
EXPECT_NE(executor_.runner(), comsta_task_runner);
#endif // defined(OS_WIN)
}
@@ -139,27 +135,24 @@ TEST_F(PostTaskTestWithExecutor, PostTaskToTaskExecutor) {
// Tasks with extension should go to the executor.
{
TaskTraits traits = {TestExtensionBoolTrait()};
- EXPECT_CALL(executor_, PostDelayedTaskWithTraitsMock(_, traits, _, _))
- .Times(1);
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, traits, DoNothing()));
+ EXPECT_CALL(executor_, PostDelayedTaskMock(_, traits, _, _)).Times(1);
+ EXPECT_TRUE(PostTask(FROM_HERE, traits, DoNothing()));
EXPECT_TRUE(executor_.runner()->HasPendingTask());
executor_.runner()->ClearPendingTasks();
}
{
TaskTraits traits = {MayBlock(), TestExtensionBoolTrait()};
- EXPECT_CALL(executor_, PostDelayedTaskWithTraitsMock(_, traits, _, _))
- .Times(1);
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, traits, DoNothing()));
+ EXPECT_CALL(executor_, PostDelayedTaskMock(_, traits, _, _)).Times(1);
+ EXPECT_TRUE(PostTask(FROM_HERE, traits, DoNothing()));
EXPECT_TRUE(executor_.runner()->HasPendingTask());
executor_.runner()->ClearPendingTasks();
}
{
TaskTraits traits = {TestExtensionEnumTrait::kB, TestExtensionBoolTrait()};
- EXPECT_CALL(executor_, PostDelayedTaskWithTraitsMock(_, traits, _, _))
- .Times(1);
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, traits, DoNothing()));
+ EXPECT_CALL(executor_, PostDelayedTaskMock(_, traits, _, _)).Times(1);
+ EXPECT_TRUE(PostTask(FROM_HERE, traits, DoNothing()));
EXPECT_TRUE(executor_.runner()->HasPendingTask());
executor_.runner()->ClearPendingTasks();
}
@@ -167,22 +160,18 @@ TEST_F(PostTaskTestWithExecutor, PostTaskToTaskExecutor) {
// Task runners with extension should be the executor's.
{
TaskTraits traits = {TestExtensionBoolTrait()};
- EXPECT_CALL(executor_, CreateTaskRunnerWithTraits(traits)).Times(1);
- auto task_runner = CreateTaskRunnerWithTraits(traits);
+ EXPECT_CALL(executor_, CreateTaskRunner(traits)).Times(1);
+ auto task_runner = CreateTaskRunner(traits);
EXPECT_EQ(executor_.runner(), task_runner);
- EXPECT_CALL(executor_, CreateSequencedTaskRunnerWithTraits(traits))
- .Times(1);
- auto sequenced_task_runner = CreateSequencedTaskRunnerWithTraits(traits);
+ EXPECT_CALL(executor_, CreateSequencedTaskRunner(traits)).Times(1);
+ auto sequenced_task_runner = CreateSequencedTaskRunner(traits);
EXPECT_EQ(executor_.runner(), sequenced_task_runner);
- EXPECT_CALL(executor_, CreateSingleThreadTaskRunnerWithTraits(traits, _))
- .Times(1);
- auto single_thread_task_runner =
- CreateSingleThreadTaskRunnerWithTraits(traits);
+ EXPECT_CALL(executor_, CreateSingleThreadTaskRunner(traits, _)).Times(1);
+ auto single_thread_task_runner = CreateSingleThreadTaskRunner(traits);
EXPECT_EQ(executor_.runner(), single_thread_task_runner);
#if defined(OS_WIN)
- EXPECT_CALL(executor_, CreateCOMSTATaskRunnerWithTraits(traits, _))
- .Times(1);
- auto comsta_task_runner = CreateCOMSTATaskRunnerWithTraits(traits);
+ EXPECT_CALL(executor_, CreateCOMSTATaskRunner(traits, _)).Times(1);
+ auto comsta_task_runner = CreateCOMSTATaskRunner(traits);
EXPECT_EQ(executor_.runner(), comsta_task_runner);
#endif // defined(OS_WIN)
}
@@ -200,10 +189,11 @@ TEST_F(PostTaskTestWithExecutor, PriorityInherited) {
TaskTraits traits = {TestExtensionBoolTrait()};
TaskTraits traits_with_inherited_priority = traits;
traits_with_inherited_priority.InheritPriority(TaskPriority::BEST_EFFORT);
- EXPECT_CALL(executor_, PostDelayedTaskWithTraitsMock(
- _, traits_with_inherited_priority, _, _))
+ EXPECT_FALSE(traits_with_inherited_priority.priority_set_explicitly());
+ EXPECT_CALL(executor_,
+ PostDelayedTaskMock(_, traits_with_inherited_priority, _, _))
.Times(1);
- EXPECT_TRUE(PostTaskWithTraits(FROM_HERE, traits, DoNothing()));
+ EXPECT_TRUE(PostTask(FROM_HERE, traits, DoNothing()));
EXPECT_TRUE(executor_.runner()->HasPendingTask());
executor_.runner()->ClearPendingTasks();
}
diff --git a/chromium/base/task/promise/abstract_promise.cc b/chromium/base/task/promise/abstract_promise.cc
index df73b511656..fa3893e58a0 100644
--- a/chromium/base/task/promise/abstract_promise.cc
+++ b/chromium/base/task/promise/abstract_promise.cc
@@ -3,9 +3,11 @@
// found in the LICENSE file.
#include "base/task/promise/abstract_promise.h"
+
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "base/sequenced_task_runner.h"
+#include "base/task/promise/dependent_list.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
@@ -13,33 +15,40 @@ namespace internal {
AbstractPromise::~AbstractPromise() {
#if DCHECK_IS_ON()
- CheckedAutoLock lock(GetCheckedLock());
+ {
+ CheckedAutoLock lock(GetCheckedLock());
- DCHECK(!must_catch_ancestor_that_could_reject_ ||
- passed_catch_responsibility_)
- << "Promise chain ending at " << from_here_.ToString()
- << " didn't have a catch for potentially rejecting promise here "
- << must_catch_ancestor_that_could_reject_->from_here().ToString();
+ DCHECK(!must_catch_ancestor_that_could_reject_ ||
+ passed_catch_responsibility_)
+ << "Promise chain ending at " << from_here_.ToString()
+ << " didn't have a catch for potentially rejecting promise here "
+ << must_catch_ancestor_that_could_reject_->from_here().ToString();
- DCHECK(!this_must_catch_ || passed_catch_responsibility_)
- << "Potentially rejecting promise at " << from_here_.ToString()
- << " doesn't have a catch .";
+ DCHECK(!this_must_catch_ || passed_catch_responsibility_)
+ << "Potentially rejecting promise at " << from_here_.ToString()
+ << " doesn't have a catch.";
+ }
#endif
+
+ // If we're not settled we might be retaining some promises which need to be
+ // released to prevent memory leaks. If we are settled this does nothing.
+ OnCanceled();
}
bool AbstractPromise::IsCanceled() const {
if (dependents_.IsCanceled())
return true;
- const Executor* executor = GetExecutor();
+ const PromiseExecutor* executor = GetExecutor();
return executor && executor->IsCancelled();
}
const AbstractPromise* AbstractPromise::FindNonCurriedAncestor() const {
const AbstractPromise* promise = this;
- while (promise->IsResolvedWithPromise()) {
- promise =
- unique_any_cast<scoped_refptr<AbstractPromise>>(promise->value_).get();
+ while (
+ const scoped_refptr<AbstractPromise>* curried_promise =
+ unique_any_cast<scoped_refptr<AbstractPromise>>(&promise->value_)) {
+ promise = curried_promise->get();
}
return promise;
}
@@ -53,23 +62,29 @@ void AbstractPromise::AddAsDependentForAllPrerequisites() {
// although that'll be done lazily (only once they resolve/reject, so there
// is a possibility the DCHECKs might be racy.
- for (AdjacencyListNode& node : prerequisites_->prerequisite_list) {
- node.dependent_node.dependent = this;
+ for (DependentList::Node& node : *prerequisites_->prerequisite_list()) {
+ node.dependent() = this;
- // If |node.prerequisite| was canceled then early out because
+ // If |node.prerequisite()| was canceled then early out because
// |prerequisites_->prerequisite_list| will have been cleared.
- if (!node.prerequisite->InsertDependentOnAnyThread(&node.dependent_node))
+ DCHECK(node.prerequisite());
+ if (!node.prerequisite()->InsertDependentOnAnyThread(&node))
break;
}
}
bool AbstractPromise::InsertDependentOnAnyThread(DependentList::Node* node) {
- scoped_refptr<AbstractPromise>& dependent = node->dependent;
+ scoped_refptr<AbstractPromise>& dependent = node->dependent();
+
+ // Used to ensure no reference to the dependent is kept in case the Promise is
+ // already settled.
+ scoped_refptr<AbstractPromise> dependent_to_release;
#if DCHECK_IS_ON()
{
CheckedAutoLock lock(GetCheckedLock());
- node->dependent->MaybeInheritChecks(this);
+ DCHECK(node->dependent()) << from_here_.ToString();
+ node->dependent()->MaybeInheritChecks(this);
}
#endif
@@ -79,16 +94,37 @@ bool AbstractPromise::InsertDependentOnAnyThread(DependentList::Node* node) {
case DependentList::InsertResult::SUCCESS:
break;
- case DependentList::InsertResult::FAIL_PROMISE_RESOLVED:
- dependent->OnPrerequisiteResolved();
+ case DependentList::InsertResult::FAIL_PROMISE_RESOLVED: {
+ AbstractPromise* curried_promise = GetCurriedPromise();
+ if (curried_promise) {
+ // Try and reinsert |node| in the curried ancestor.
+ node->SetPrerequisite(curried_promise);
+ return curried_promise->InsertDependentOnAnyThread(node);
+ } else {
+ dependent_to_release = std::move(dependent);
+ node->RetainSettledPrerequisite();
+ dependent_to_release->OnPrerequisiteResolved(this);
+ }
break;
+ }
- case DependentList::InsertResult::FAIL_PROMISE_REJECTED:
- dependent->OnPrerequisiteRejected(this);
+ case DependentList::InsertResult::FAIL_PROMISE_REJECTED: {
+ AbstractPromise* curried_promise = GetCurriedPromise();
+ if (curried_promise) {
+ // Try and reinsert |node| in the curried ancestor.
+ node->SetPrerequisite(curried_promise);
+ return curried_promise->InsertDependentOnAnyThread(node);
+ } else {
+ dependent_to_release = std::move(dependent);
+ node->RetainSettledPrerequisite();
+ dependent_to_release->OnPrerequisiteRejected(this);
+ }
break;
+ }
case DependentList::InsertResult::FAIL_PROMISE_CANCELED:
- return dependent->OnPrerequisiteCancelled();
+ dependent_to_release = std::move(dependent);
+ return dependent_to_release->OnPrerequisiteCancelled(this);
}
return true;
@@ -111,12 +147,12 @@ CheckedLock& AbstractPromise::GetCheckedLock() {
void AbstractPromise::DoubleMoveDetector::CheckForDoubleMoveErrors(
const base::Location& new_dependent_location,
- Executor::ArgumentPassingType new_dependent_executor_type) {
+ PromiseExecutor::ArgumentPassingType new_dependent_executor_type) {
switch (new_dependent_executor_type) {
- case Executor::ArgumentPassingType::kNoCallback:
+ case PromiseExecutor::ArgumentPassingType::kNoCallback:
return;
- case Executor::ArgumentPassingType::kNormal:
+ case PromiseExecutor::ArgumentPassingType::kNormal:
DCHECK(!dependent_move_only_promise_)
<< "Can't mix move only and non-move only " << callback_type_
<< "callback arguments for the same " << callback_type_
@@ -127,7 +163,7 @@ void AbstractPromise::DoubleMoveDetector::CheckForDoubleMoveErrors(
std::make_unique<Location>(new_dependent_location);
return;
- case Executor::ArgumentPassingType::kMove:
+ case PromiseExecutor::ArgumentPassingType::kMove:
DCHECK(!dependent_move_only_promise_ ||
*dependent_move_only_promise_ == new_dependent_location)
<< "Can't have multiple move only " << callback_type_
@@ -152,7 +188,7 @@ void AbstractPromise::MaybeInheritChecks(AbstractPromise* prerequisite) {
// Inherit |prerequisite|'s resolve ancestor if it doesn't have a resolve
// callback.
if (prerequisite->resolve_argument_passing_type_ ==
- Executor::ArgumentPassingType::kNoCallback) {
+ PromiseExecutor::ArgumentPassingType::kNoCallback) {
ancestor_that_could_resolve_ = prerequisite->ancestor_that_could_resolve_;
}
@@ -167,7 +203,7 @@ void AbstractPromise::MaybeInheritChecks(AbstractPromise* prerequisite) {
if (!ancestor_that_could_reject_) {
// Inherit |prerequisite|'s reject ancestor if it doesn't have a Catch.
if (prerequisite->reject_argument_passing_type_ ==
- Executor::ArgumentPassingType::kNoCallback) {
+ PromiseExecutor::ArgumentPassingType::kNoCallback) {
ancestor_that_could_reject_ = prerequisite->ancestor_that_could_reject_;
}
@@ -182,7 +218,7 @@ void AbstractPromise::MaybeInheritChecks(AbstractPromise* prerequisite) {
if (!must_catch_ancestor_that_could_reject_) {
// Inherit |prerequisite|'s must catch ancestor if it doesn't have a Catch.
if (prerequisite->reject_argument_passing_type_ ==
- Executor::ArgumentPassingType::kNoCallback) {
+ PromiseExecutor::ArgumentPassingType::kNoCallback) {
must_catch_ancestor_that_could_reject_ =
prerequisite->must_catch_ancestor_that_could_reject_;
}
@@ -224,33 +260,44 @@ AbstractPromise::DoubleMoveDetector::~DoubleMoveDetector() = default;
#endif
-const AbstractPromise::Executor* AbstractPromise::GetExecutor() const {
- return base::unique_any_cast<Executor>(&value_);
+AbstractPromise* AbstractPromise::GetCurriedPromise() {
+ if (scoped_refptr<AbstractPromise>* curried_promise_refptr =
+ unique_any_cast<scoped_refptr<AbstractPromise>>(&value_)) {
+ return curried_promise_refptr->get();
+ } else {
+ return nullptr;
+ }
}
-AbstractPromise::Executor::PrerequisitePolicy
-AbstractPromise::GetPrerequisitePolicy() {
- Executor* executor = GetExecutor();
+const PromiseExecutor* AbstractPromise::GetExecutor() const {
+ return base::unique_any_cast<PromiseExecutor>(&value_);
+}
+
+PromiseExecutor::PrerequisitePolicy AbstractPromise::GetPrerequisitePolicy() {
+ PromiseExecutor* executor = GetExecutor();
if (!executor) {
// If there's no executor it's because the promise has already run. We
// can't run again however. The only circumstance in which we expect
// GetPrerequisitePolicy() to be called after execution is when it was
- // resolved with a promise.
- DCHECK(IsResolvedWithPromise());
- return Executor::PrerequisitePolicy::kNever;
+ // resolved with a promise or we're already settled.
+ DCHECK(IsSettled());
+ return PromiseExecutor::PrerequisitePolicy::kNever;
}
return executor->GetPrerequisitePolicy();
}
-AbstractPromise* AbstractPromise::GetFirstRejectedPrerequisite() const {
+AbstractPromise* AbstractPromise::GetFirstSettledPrerequisite() const {
if (!prerequisites_)
return nullptr;
- return reinterpret_cast<AbstractPromise*>(
- prerequisites_->first_rejecting_promise.load(std::memory_order_acquire));
+ return prerequisites_->GetFirstSettledPrerequisite();
}
void AbstractPromise::Execute() {
- if (IsCanceled()) {
+ const PromiseExecutor* executor = GetExecutor();
+ DCHECK(executor || dependents_.IsCanceled())
+ << from_here_.ToString() << " value_ contains " << value_.type();
+
+ if (!executor || executor->IsCancelled()) {
OnCanceled();
return;
}
@@ -258,108 +305,90 @@ void AbstractPromise::Execute() {
#if DCHECK_IS_ON()
// Clear |must_catch_ancestor_that_could_reject_| if we can catch it.
if (reject_argument_passing_type_ !=
- Executor::ArgumentPassingType::kNoCallback) {
+ PromiseExecutor::ArgumentPassingType::kNoCallback) {
CheckedAutoLock lock(GetCheckedLock());
must_catch_ancestor_that_could_reject_ = nullptr;
}
#endif
- if (IsResolvedWithPromise()) {
- bool settled = DispatchIfNonCurriedRootSettled();
- DCHECK(settled);
-
- prerequisites_->prerequisite_list.clear();
- return;
- }
-
- DCHECK(GetExecutor()) << from_here_.ToString() << " value_ contains "
- << value_.type();
+ DCHECK(!IsResolvedWithPromise());
// This is likely to delete the executor.
GetExecutor()->Execute(this);
}
-bool AbstractPromise::DispatchIfNonCurriedRootSettled() {
- AbstractPromise* curried_root = FindNonCurriedAncestor();
- if (!curried_root->IsSettled())
- return false;
-
-#if DCHECK_IS_ON()
- {
- CheckedAutoLock lock(GetCheckedLock());
- MaybeInheritChecks(curried_root);
- }
-#endif
-
- if (curried_root->IsResolved()) {
- OnResolveDispatchReadyDependents();
- } else if (curried_root->IsRejected()) {
- OnRejectDispatchReadyDependents();
- } else {
- DCHECK(curried_root->IsCanceled());
- OnPrerequisiteCancelled();
+void AbstractPromise::ReplaceCurriedPrerequisite(
+ AbstractPromise* curried_prerequisite,
+ AbstractPromise* replacement) {
+ DCHECK(curried_prerequisite->IsResolved() ||
+ curried_prerequisite->IsRejected());
+ DCHECK(curried_prerequisite->IsResolvedWithPromise());
+ DCHECK(replacement);
+ for (DependentList::Node& node : *prerequisites_->prerequisite_list()) {
+ if (node.prerequisite() == curried_prerequisite) {
+ node.Reset(replacement, this);
+ replacement->InsertDependentOnAnyThread(&node);
+ return;
+ }
}
- return true;
+ NOTREACHED();
}
-void AbstractPromise::OnPrerequisiteResolved() {
- if (IsResolvedWithPromise()) {
- bool settled = DispatchIfNonCurriedRootSettled();
- DCHECK(settled);
- return;
- }
+void AbstractPromise::OnPrerequisiteResolved(
+ AbstractPromise* resolved_prerequisite) {
+ DCHECK(resolved_prerequisite->IsResolved());
switch (GetPrerequisitePolicy()) {
- case Executor::PrerequisitePolicy::kAll:
+ case PromiseExecutor::PrerequisitePolicy::kAll:
if (prerequisites_->DecrementPrerequisiteCountAndCheckIfZero())
DispatchPromise();
break;
- case Executor::PrerequisitePolicy::kAny:
+ case PromiseExecutor::PrerequisitePolicy::kAny:
// PrerequisitePolicy::kAny should resolve immediately.
- DispatchPromise();
+ if (prerequisites_->MarkPrerequisiteAsSettling(resolved_prerequisite))
+ DispatchPromise();
break;
- case Executor::PrerequisitePolicy::kNever:
+ case PromiseExecutor::PrerequisitePolicy::kNever:
break;
}
}
void AbstractPromise::OnPrerequisiteRejected(
- AbstractPromise* rejected_promise) {
- DCHECK(rejected_promise->IsRejected());
- uintptr_t expected = 0;
- // Promises::All (or Race if we add that) can have multiple prerequsites and
- // it will reject as soon as any prerequsite rejects. Multiple prerequsites
- // can reject, but we wish to record only the first one.
- bool is_first_rejection =
- prerequisites_->first_rejecting_promise.compare_exchange_strong(
- expected, reinterpret_cast<uintptr_t>(rejected_promise),
- std::memory_order_acq_rel);
- // We only want to dispatch a promise the first time a prerequisite is
- // rejected because the executors can only be invoked once.
- if (is_first_rejection)
+ AbstractPromise* rejected_prerequisite) {
+ DCHECK(rejected_prerequisite->IsRejected());
+
+ // Promises::All (or Race if we add that) can have multiple prerequisites and
+ // it will reject as soon as any prerequisite rejects. Multiple prerequisites
+ // can reject, but we wish to record only the first one. Also we can only
+ // invoke executors once.
+ if (prerequisites_->MarkPrerequisiteAsSettling(rejected_prerequisite)) {
DispatchPromise();
+ }
}
-bool AbstractPromise::OnPrerequisiteCancelled() {
+bool AbstractPromise::OnPrerequisiteCancelled(
+ AbstractPromise* canceled_prerequisite) {
switch (GetPrerequisitePolicy()) {
- case Executor::PrerequisitePolicy::kAll:
+ case PromiseExecutor::PrerequisitePolicy::kAll:
// PrerequisitePolicy::kAll should cancel immediately.
OnCanceled();
return false;
- case Executor::PrerequisitePolicy::kAny:
+ case PromiseExecutor::PrerequisitePolicy::kAny:
// PrerequisitePolicy::kAny should only cancel if all if it's
// pre-requisites have been canceled.
if (prerequisites_->DecrementPrerequisiteCountAndCheckIfZero()) {
OnCanceled();
return false;
+ } else {
+ prerequisites_->RemoveCanceledPrerequisite(canceled_prerequisite);
}
return true;
- case Executor::PrerequisitePolicy::kNever:
- // If we we where resolved with a promise then we can't have had
+ case PromiseExecutor::PrerequisitePolicy::kNever:
+ // If we where resolved with a promise then we can't have had
// PrerequisitePolicy::kAny or PrerequisitePolicy::kNever before the
// executor was replaced with the curried promise, so pass on
// cancellation.
@@ -370,38 +399,81 @@ bool AbstractPromise::OnPrerequisiteCancelled() {
}
void AbstractPromise::OnResolveDispatchReadyDependents() {
- DependentList::Node* dependent_list = dependents_.ConsumeOnceForResolve();
- dependent_list = NonThreadSafeReverseList(dependent_list);
-
- // Propagate resolve to dependents.
- DependentList::Node* next;
- for (DependentList::Node* node = dependent_list; node; node = next) {
- // We want to release |node->dependent| but we need to do so before
- // we post a task to execute |dependent| on what might be another thread.
- scoped_refptr<AbstractPromise> dependent = std::move(node->dependent);
- // OnPrerequisiteResolved might post a task which destructs |node| on
- // another thread so load |node->next| now.
- next = node->next.load(std::memory_order_relaxed);
- dependent->OnPrerequisiteResolved();
- }
+ class Visitor : public DependentList::Visitor {
+ public:
+ explicit Visitor(AbstractPromise* resolved_prerequisite)
+ : resolved_prerequisite_(resolved_prerequisite) {}
+
+ private:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependent->OnPrerequisiteResolved(resolved_prerequisite_);
+ }
+ AbstractPromise* const resolved_prerequisite_;
+ };
+
+ Visitor visitor(this);
+ dependents_.ResolveAndConsumeAllDependents(&visitor);
}
void AbstractPromise::OnRejectDispatchReadyDependents() {
- DependentList::Node* dependent_list = dependents_.ConsumeOnceForReject();
- dependent_list = NonThreadSafeReverseList(dependent_list);
-
- // Propagate rejection to dependents. We always propagate rejection
- // immediately.
- DependentList::Node* next;
- for (DependentList::Node* node = dependent_list; node; node = next) {
- // We want to release |node->dependent| but we need to do so before
- // we post a task to execute |dependent| on what might be another thread.
- scoped_refptr<AbstractPromise> dependent = std::move(node->dependent);
- // OnPrerequisiteRejected might post a task which destructs |node| on
- // another thread so load |node->next| now.
- next = node->next.load(std::memory_order_relaxed);
- dependent->OnPrerequisiteRejected(this);
- }
+ class Visitor : public DependentList::Visitor {
+ public:
+ explicit Visitor(AbstractPromise* rejected_prerequisite)
+ : rejected_prerequisite_(rejected_prerequisite) {}
+
+ private:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependent->OnPrerequisiteRejected(rejected_prerequisite_);
+ }
+ AbstractPromise* const rejected_prerequisite_;
+ };
+
+ Visitor visitor(this);
+ dependents_.RejectAndConsumeAllDependents(&visitor);
+}
+
+void AbstractPromise::OnResolveMakeDependantsUseCurriedPrerequisite(
+ AbstractPromise* non_curried_root) {
+ class Visitor : public DependentList::Visitor {
+ public:
+ explicit Visitor(AbstractPromise* resolved_prerequisite,
+ AbstractPromise* non_curried_root)
+ : resolved_prerequisite_(resolved_prerequisite),
+ non_curried_root_(non_curried_root) {}
+
+ private:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependent->ReplaceCurriedPrerequisite(resolved_prerequisite_,
+ non_curried_root_);
+ }
+ AbstractPromise* const resolved_prerequisite_;
+ AbstractPromise* const non_curried_root_;
+ };
+
+ Visitor visitor(this, non_curried_root);
+ dependents_.ResolveAndConsumeAllDependents(&visitor);
+}
+
+void AbstractPromise::OnRejectMakeDependantsUseCurriedPrerequisite(
+ AbstractPromise* non_curried_root) {
+ class Visitor : public DependentList::Visitor {
+ public:
+ explicit Visitor(AbstractPromise* rejected_prerequisite,
+ AbstractPromise* non_curried_root)
+ : rejected_prerequisite_(rejected_prerequisite),
+ non_curried_root_(non_curried_root) {}
+
+ private:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependent->ReplaceCurriedPrerequisite(rejected_prerequisite_,
+ non_curried_root_);
+ }
+ AbstractPromise* const rejected_prerequisite_;
+ AbstractPromise* const non_curried_root_;
+ };
+
+ Visitor visitor(this, non_curried_root);
+ dependents_.RejectAndConsumeAllDependents(&visitor);
}
void AbstractPromise::DispatchPromise() {
@@ -413,10 +485,22 @@ void AbstractPromise::DispatchPromise() {
}
void AbstractPromise::OnCanceled() {
- if (dependents_.IsCanceled() || dependents_.IsResolved() ||
- dependents_.IsRejected()) {
+ class Visitor : public DependentList::Visitor {
+ public:
+ explicit Visitor(AbstractPromise* canceled_prerequisite)
+ : canceled_prerequisite_(canceled_prerequisite) {}
+
+ private:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependent->OnPrerequisiteCancelled(canceled_prerequisite_);
+ }
+
+ AbstractPromise* const canceled_prerequisite_;
+ };
+
+ Visitor visitor(this);
+ if (!dependents_.CancelAndConsumeAllDependents(&visitor))
return;
- }
// The executor could be keeping a promise alive, but it's never going to run
// so clear it.
@@ -429,28 +513,8 @@ void AbstractPromise::OnCanceled() {
}
#endif
- DependentList::Node* dependent_list = dependents_.ConsumeOnceForCancel();
-
- // Propagate cancellation to dependents.
- while (dependent_list) {
- scoped_refptr<AbstractPromise> dependent =
- std::move(dependent_list->dependent);
- dependent_list = dependent_list->next.load(std::memory_order_relaxed);
- dependent->OnPrerequisiteCancelled();
- }
-
- // We need to release any AdjacencyListNodes we own to prevent memory leaks
- // due to refcount cycles. We can't just clear |prerequisite_list| (which
- // contains DependentList::Node) because in the case of multiple prerequisites
- // they may not have all be settled, which means some will want to traverse
- // their |dependent_list| which includes this promise. This is a problem
- // because there isn't a conveniant way of removing ourself from their
- // |dependent_list|. It's sufficient however to simply null our references.
- if (prerequisites_) {
- for (AdjacencyListNode& node : prerequisites_->prerequisite_list) {
- node.prerequisite = nullptr;
- }
- }
+ if (prerequisites_)
+ prerequisites_->Clear();
}
void AbstractPromise::OnResolved() {
@@ -458,149 +522,136 @@ void AbstractPromise::OnResolved() {
DCHECK(executor_can_resolve_ || IsResolvedWithPromise())
<< from_here_.ToString();
#endif
- if (IsResolvedWithPromise()) {
- scoped_refptr<AbstractPromise> curried_promise =
- unique_any_cast<scoped_refptr<AbstractPromise>>(value_);
-
- if (DispatchIfNonCurriedRootSettled()) {
- prerequisites_->prerequisite_list.clear();
- } else {
- // The curried promise isn't already settled we need to throw away any
- // existing dependencies and make |curried_promise| the only dependency of
- // this promise.
-
- if (!curried_promise->prerequisites_)
- curried_promise->prerequisites_ = std::make_unique<AdjacencyList>();
-
+ if (AbstractPromise* curried_promise = GetCurriedPromise()) {
#if DCHECK_IS_ON()
- {
- CheckedAutoLock lock(GetCheckedLock());
- ancestor_that_could_resolve_ = nullptr;
- ancestor_that_could_reject_ = nullptr;
- }
+ {
+ CheckedAutoLock lock(GetCheckedLock());
+ MaybeInheritChecks(curried_promise);
+ }
#endif
- prerequisites_->ResetWithSingleDependency(curried_promise);
- AddAsDependentForAllPrerequisites();
+
+ // If there are settled curried ancestors we can skip then do so.
+ while (curried_promise->IsSettled()) {
+ if (curried_promise->IsCanceled()) {
+ OnCanceled();
+ return;
+ }
+ const scoped_refptr<AbstractPromise>* curried_ancestor =
+ unique_any_cast<scoped_refptr<AbstractPromise>>(
+ &curried_promise->value_);
+ if (curried_ancestor) {
+ curried_promise = curried_ancestor->get();
+ } else {
+ break;
+ }
}
+
+ OnResolveMakeDependantsUseCurriedPrerequisite(curried_promise);
} else {
OnResolveDispatchReadyDependents();
-
- // We need to release any AdjacencyListNodes we own to prevent memory leaks
- // due to refcount cycles.
- if (prerequisites_)
- prerequisites_->prerequisite_list.clear();
}
+
+ if (prerequisites_)
+ prerequisites_->Clear();
}
void AbstractPromise::OnRejected() {
- // Rejection with a rejected promise doesn't need special handling.
- DCHECK(!IsResolvedWithPromise() ||
- unique_any_cast<scoped_refptr<AbstractPromise>>(value_)->IsRejected());
#if DCHECK_IS_ON()
DCHECK(executor_can_reject_) << from_here_.ToString();
#endif
- OnRejectDispatchReadyDependents();
-
- // We need to release any AdjacencyListNodes we own to prevent memory leaks
- // due to refcount cycles. We can't just clear |prerequisite_list| (which
- // contains DependentList::Node) because in the case of multiple prerequisites
- // they may not have all be settled, which means some will want to traverse
- // their |dependent_list| which includes this promise. This is a problem
- // because there isn't a conveniant way of removing ourself from their
- // |dependent_list|. It's sufficient however to simply null our references.
- if (prerequisites_) {
- for (AdjacencyListNode& node : prerequisites_->prerequisite_list) {
- node.prerequisite = nullptr;
- }
- }
-}
-
-// static
-DependentList::Node* AbstractPromise::NonThreadSafeReverseList(
- DependentList::Node* list) {
- DependentList::Node* prev = nullptr;
- while (list) {
- DependentList::Node* next = list->next.load(std::memory_order_relaxed);
- list->next.store(prev, std::memory_order_relaxed);
- prev = list;
- list = next;
- }
- return prev;
-}
-AbstractPromise::AdjacencyListNode::AdjacencyListNode() = default;
+ if (AbstractPromise* curried_promise = GetCurriedPromise()) {
+#if DCHECK_IS_ON()
+ {
+ CheckedAutoLock lock(GetCheckedLock());
+ MaybeInheritChecks(curried_promise);
+ }
+#endif
-AbstractPromise::AdjacencyListNode::AdjacencyListNode(
- scoped_refptr<AbstractPromise> promise)
- : prerequisite(std::move(promise)) {}
+ // If there are settled curried ancestors we can skip then do so.
+ while (curried_promise->IsSettled()) {
+ if (curried_promise->IsCanceled()) {
+ OnCanceled();
+ return;
+ }
+ const scoped_refptr<AbstractPromise>* curried_ancestor =
+ unique_any_cast<scoped_refptr<AbstractPromise>>(
+ &curried_promise->value_);
+ if (curried_ancestor) {
+ curried_promise = curried_ancestor->get();
+ } else {
+ break;
+ }
+ }
-AbstractPromise::AdjacencyListNode::~AdjacencyListNode() = default;
+ OnRejectMakeDependantsUseCurriedPrerequisite(curried_promise);
+ } else {
+ OnRejectDispatchReadyDependents();
+ }
-AbstractPromise::AdjacencyListNode::AdjacencyListNode(
- AdjacencyListNode&& other) noexcept = default;
+ if (prerequisites_)
+ prerequisites_->Clear();
+}
AbstractPromise::AdjacencyList::AdjacencyList() = default;
-AbstractPromise::AdjacencyList::AdjacencyList(
- scoped_refptr<AbstractPromise> prerequisite)
- : prerequisite_list(1), action_prerequisite_count(1) {
- prerequisite_list[0].prerequisite = std::move(prerequisite);
+AbstractPromise::AdjacencyList::AdjacencyList(AbstractPromise* prerequisite)
+ : prerequisite_list_(1), action_prerequisite_count_(1) {
+ prerequisite_list_[0].SetPrerequisite(prerequisite);
}
AbstractPromise::AdjacencyList::AdjacencyList(
- std::vector<AdjacencyListNode> nodes)
- : prerequisite_list(std::move(nodes)),
- action_prerequisite_count(prerequisite_list.size()) {}
+ std::vector<DependentList::Node> nodes)
+ : prerequisite_list_(std::move(nodes)),
+ action_prerequisite_count_(prerequisite_list_.size()) {}
AbstractPromise::AdjacencyList::~AdjacencyList() = default;
bool AbstractPromise::AdjacencyList::
DecrementPrerequisiteCountAndCheckIfZero() {
- return action_prerequisite_count.fetch_sub(1, std::memory_order_acq_rel) == 1;
-}
-
-void AbstractPromise::AdjacencyList::ResetWithSingleDependency(
- scoped_refptr<AbstractPromise> prerequisite) {
- prerequisite_list.clear();
- prerequisite_list.push_back(AdjacencyListNode{std::move(prerequisite)});
- action_prerequisite_count = 1;
-}
-
-AbstractPromise::Executor::~Executor() {
- vtable_->destructor(storage_);
-}
-
-AbstractPromise::Executor::PrerequisitePolicy
-AbstractPromise::Executor::GetPrerequisitePolicy() const {
- return vtable_->get_prerequsite_policy(storage_);
-}
-
-bool AbstractPromise::Executor::IsCancelled() const {
- return vtable_->is_cancelled(storage_);
+ return action_prerequisite_count_.fetch_sub(1, std::memory_order_acq_rel) ==
+ 1;
}
-#if DCHECK_IS_ON()
-AbstractPromise::Executor::ArgumentPassingType
-AbstractPromise::Executor::ResolveArgumentPassingType() const {
- return vtable_->resolve_argument_passing_type(storage_);
-}
-
-AbstractPromise::Executor::ArgumentPassingType
-AbstractPromise::Executor::RejectArgumentPassingType() const {
- return vtable_->reject_argument_passing_type(storage_);
-}
-
-bool AbstractPromise::Executor::CanResolve() const {
- return vtable_->can_resolve(storage_);
+// For PrerequisitePolicy::kAll this is called for the first rejected
+// prerequisite. For PrerequisitePolicy:kAny this is called for the first
+// resolving or rejecting prerequisite.
+bool AbstractPromise::AdjacencyList::MarkPrerequisiteAsSettling(
+ AbstractPromise* settled_prerequisite) {
+ DCHECK(settled_prerequisite->IsSettled());
+ uintptr_t expected = 0;
+ return first_settled_prerequisite_.compare_exchange_strong(
+ expected, reinterpret_cast<uintptr_t>(settled_prerequisite),
+ std::memory_order_acq_rel);
}
-bool AbstractPromise::Executor::CanReject() const {
- return vtable_->can_reject(storage_);
+void AbstractPromise::AdjacencyList::RemoveCanceledPrerequisite(
+ AbstractPromise* canceled_prerequisite) {
+ DCHECK(canceled_prerequisite->IsCanceled());
+ for (DependentList::Node& node : prerequisite_list_) {
+ if (node.prerequisite() == canceled_prerequisite) {
+ node.ClearPrerequisite();
+ return;
+ }
+ }
+ NOTREACHED() << "Couldn't find canceled_prerequisite "
+ << canceled_prerequisite->from_here().ToString();
}
-#endif
-void AbstractPromise::Executor::Execute(AbstractPromise* promise) {
- return vtable_->execute(storage_, promise);
+void AbstractPromise::AdjacencyList::Clear() {
+ // If there's only one prerequisite we can just clear |prerequisite_list_|
+ // which deals with potential refcounting cycles due to curried promises.
+ if (prerequisite_list_.size() == 1) {
+ prerequisite_list_.clear();
+ } else {
+ // If there's multiple prerequisites we can't do that because the
+ // DependentList::Nodes may still be in use by some of them. Instead we
+ // release our prerequisite references and rely on refcounting to release
+ // the owning AbstractPromise.
+ for (DependentList::Node& node : prerequisite_list_) {
+ node.ClearPrerequisite();
+ }
+ }
}
} // namespace internal
diff --git a/chromium/base/task/promise/abstract_promise.h b/chromium/base/task/promise/abstract_promise.h
index ba921e5bb2b..0a4ef6ac926 100644
--- a/chromium/base/task/promise/abstract_promise.h
+++ b/chromium/base/task/promise/abstract_promise.h
@@ -14,11 +14,242 @@
#include "base/no_destructor.h"
#include "base/task/common/checked_lock.h"
#include "base/task/promise/dependent_list.h"
+#include "base/task/promise/promise_executor.h"
#include "base/thread_annotations.h"
namespace base {
class TaskRunner;
+// AbstractPromise Memory Management.
+//
+// Consider a chain of promises: P1, P2 & P3
+//
+// Before resolve:
+// * P1 needs an external reference (such as a Promise<> handle or it has been
+// posted) to keep it alive
+// * P2 is kept alive by P1
+// * P3 is kept alive by P2
+//
+// ------------
+// Key: | P1 | P1 doesn't have an
+// ═ Denotes a reference is held | | AdjacencyList
+// ─ Denotes a raw pointer | Dependants |
+// -----|------
+// | ^
+// ╔════════════╗ | |
+// ↓ ║ ↓ |
+// ------------ ---║--------|--
+// | P2 | | dependent_ | |
+// | |==| | | P2's AdjacencyList
+// | Dependants | | prerequisite_ |
+// -----|------ ---------------
+// | ^
+// ╔════════════╗ | |
+// ↓ ║ ↓ |
+// ------------ ---║--------|--
+// | P3 | | dependent_ | |
+// | |==| | | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+//
+//
+// After P1's executor runs, P2's |prerequisite_| link is upgraded by
+// OnResolveDispatchReadyDependents (which incirectly calls
+// RetainSettledPrerequisite) from a raw pointer to a reference. This is done to
+// ensure P1's |value_| is available when P2's executor runs.
+//
+// ------------
+// | P1 | P1 doesn't have an
+// | | AdjacencyList
+// | Dependants |
+// -----|------
+// | ^
+// ╔════════════╗ | ║
+// ↓ ║ ↓ ║
+// ------------ ---║--------║--
+// | P2 | | dependent_ ║ |
+// | |==| ║ | P2's AdjacencyList
+// | Dependants | | prerequisite_ |
+// -----|------ ---------------
+// | ^
+// ╔════════════╗ | |
+// ↓ ║ ↓ |
+// ------------ ---║--------|--
+// | P3 | | dependent_ | |
+// | |==| | | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+//
+//
+// After P2's executor runs, it's AdjacencyList is cleared. Unless there's
+// external references, at this stage P1 will be deleted. P3's |prerequisite_|
+// is from a raw pointer to a reference to ensure P2's |value_| is available
+// when P3's executor runs.
+//
+// ------------
+// | P1 | P1 doesn't have an
+// | | AdjacencyList
+// | Dependants |
+// -----|------
+// |
+// null | null
+// ^ ↓ ^
+// ------------ ---|--------|--
+// | P2 | | dependent_ | |
+// | |==| | | P2's AdjacencyList
+// | Dependants | | prerequisite_ |
+// -----|------ ---------------
+// | ^
+// ╔════════════╗ | ║
+// ↓ ║ ↓ ║
+// ------------ ---║--------║--
+// | P3 | | dependent_ ║ |
+// | |==| ║ | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+//
+// =============================================================================
+// Consider a promise P1 that is resolved with an unresolved promise P2, and P3
+// which depends on P1.
+//
+// 1) Initially P1 doesn't have an AdjacencyList and must be kept alive by an
+// external reference. P1 keeps P3 alive.
+//
+// 2) P1's executor resolves with P2 and P3 is modified to have P2 as a
+// dependent instead of P1. P1 has a reference to P2, but it needs an
+// external reference to keep alive.
+//
+// 3) When P2's executor runs, P3's executor is scheduled and P3's
+// |prerequisite_| link to P2 is upgraded to a reference. So P3 keeps P2
+// alive.
+//
+// 4) When P3's executor runs, its AdjacencyList is cleared. At this stage
+// unless there are external referecnes P2 and P3 will be deleted.
+//
+//
+// 1. --------------
+// | P1 value_ |
+// | | P1 doesn't have an AdjacencyList
+// | Dependants |
+// ---|----------
+// | ^
+// ↓ ║
+// ------------ ------------║---
+// | P3 | | dependent_ ║ |
+// | |==| ║ | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+//
+// 2. ------------
+// | P2 |
+// ╔══════>| | P2 doesn't have an
+// ║ | Dependants | AdjacencyList
+// ║ -----|------
+// ║ | ^
+// ------║------- | |
+// | P1 value_ | | |
+// | | | |
+// | Dependants | | |
+// -------------- | |
+// ╔═════════╗ ┌────────────┘ |
+// ↓ ║ ↓ ┌──────────┘
+// ------------ ---║--------|---
+// | P3 | | dependent_ | |
+// | |==| | | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+// 3. ------------
+// | P2 |
+// | | P2 doesn't have an
+// | Dependants | AdjacencyList
+// -----|------
+// | ^
+// | ║
+// | ║
+// | ║
+// | ║
+// | ║
+// ╔═════════╗ ┌────────────┘ ║
+// ↓ ║ ↓ ╔══════════╝
+// ------------ ---║--------║---
+// | P3 | | dependent_ ║ |
+// | |==| ║ | P3's AdjacencyList
+// | Dependants | | prerequisite_ |
+// ---|-------- ---------------
+// |
+// ↓
+// null
+//
+//
+// 4. ------------
+// | P2 |
+// | | P2 doesn't have an
+// | Dependants | AdjacencyList
+// ------------
+//
+//
+//
+//
+//
+//
+//
+//
+// ------------
+// | P3 | P3 doesn't have an AdjacencyList anymore.
+// | |
+// | Dependants |
+// ---|--------
+// |
+// ↓
+// null
+//
+// =============================================================================
+// Consider an all promise Pall with dependents P1, P2 & P3:
+//
+// Before resolve P1, P2 & P3 keep Pall alive. If say P2 rejects then Pall
+// keeps P2 alive, however all the dependents in Pall's AdjacencyList are
+// cleared. When there are no external references to P1, P2 & P3 then Pall
+// will get deleted too if it has no external references.
+//
+// Pall's AdjacencyList
+// ------------ ----------------
+// | P1 | | |
+// | | <─────────── prerequisite_ |
+// | Dependants────────────>| dependent_══════════════════╗
+// ------------ | | ↓
+// |----------------| ---------
+// ------------ | | | |
+// | P2 | <─────────── prerequisite_ | | Pall |
+// | | | dependent_════════════>| |
+// | Dependants────────────>| | | |
+// ------------ | | ---------
+// |----------------| ^
+// ------------ | | ║
+// | P3 | <─────────── prerequisite_ | ║
+// | | | dependent_══════════════════╝
+// | Dependants────────────>| |
+// ------------ ----------------
+//
+//
+// In general a promise's AdjacencyList's only retains prerequisites after the
+// promise has resolved. It is necessary to retain the prerequisites because a
+// ThenOn or CatchOn can be added after the promise has resolved.
+
// std::variant, std::tuple and other templates can't contain void but they can
// contain the empty type Void. This is the same idea as std::monospace.
struct Void {};
@@ -107,25 +338,20 @@ namespace internal {
class BASE_EXPORT AbstractPromise
: public RefCountedThreadSafe<AbstractPromise> {
public:
- struct AdjacencyList;
-
- template <typename ConstructType, typename DerivedExecutorType>
- struct ConstructWith {};
+ class AdjacencyList;
- template <typename ConstructType,
- typename DerivedExecutorType,
- typename... ExecutorArgs>
+ template <typename ConstructType>
static scoped_refptr<AbstractPromise> Create(
- scoped_refptr<TaskRunner>&& task_runner,
+ const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
std::unique_ptr<AdjacencyList> prerequisites,
RejectPolicy reject_policy,
- ConstructWith<ConstructType, DerivedExecutorType> tag,
- ExecutorArgs&&... executor_args) {
+ ConstructType tag,
+ PromiseExecutor::Data&& executor_data) noexcept {
scoped_refptr<AbstractPromise> promise = subtle::AdoptRefIfNeeded(
- new internal::AbstractPromise(
- std::move(task_runner), from_here, std::move(prerequisites),
- reject_policy, tag, std::forward<ExecutorArgs>(executor_args)...),
+ new internal::AbstractPromise(task_runner, from_here,
+ std::move(prerequisites), reject_policy,
+ tag, std::move(executor_data)),
AbstractPromise::kRefCountPreference);
// It's important this is called after |promise| has been initialized
// because otherwise it could trigger a scoped_refptr destructor on another
@@ -134,19 +360,41 @@ class BASE_EXPORT AbstractPromise
return promise;
}
+ template <typename ConstructType>
+ static scoped_refptr<AbstractPromise> CreateNoPrerequisitePromise(
+ const Location& from_here,
+ RejectPolicy reject_policy,
+ ConstructType tag,
+ PromiseExecutor::Data&& executor_data) noexcept {
+ scoped_refptr<AbstractPromise> promise =
+ subtle::AdoptRefIfNeeded(new internal::AbstractPromise(
+ nullptr, from_here, nullptr, reject_policy,
+ tag, std::move(executor_data)),
+ AbstractPromise::kRefCountPreference);
+ return promise;
+ }
+
AbstractPromise(const AbstractPromise&) = delete;
AbstractPromise& operator=(const AbstractPromise&) = delete;
const Location& from_here() const { return from_here_; }
+ bool IsSettled() const { return dependents_.IsSettled(); }
bool IsCanceled() const;
+
+ // It's an error (result will be racy) to call these if unsettled.
bool IsRejected() const { return dependents_.IsRejected(); }
bool IsResolved() const { return dependents_.IsResolved(); }
- bool IsSettled() const { return dependents_.IsSettled(); }
+
+ bool IsRejectedForTesting() const {
+ return dependents_.IsRejectedForTesting();
+ }
+ bool IsResolvedForTesting() const {
+ return dependents_.IsResolvedForTesting();
+ }
bool IsResolvedWithPromise() const {
- return value_.type() ==
- TypeId::From<scoped_refptr<internal::AbstractPromise>>();
+ return value_.type() == TypeId::From<scoped_refptr<AbstractPromise>>();
}
const unique_any& value() const { return FindNonCurriedAncestor()->value_; }
@@ -180,179 +428,27 @@ class BASE_EXPORT AbstractPromise
const_cast<const AbstractPromise*>(this)->FindNonCurriedAncestor());
}
+ // Returns nullptr if there isn't a curried promise.
+ const AbstractPromise* GetCurriedPromise() const;
+
// Sets the |value_| to |t|. The caller should call OnResolved() or
// OnRejected() afterwards.
template <typename T>
void emplace(T&& t) {
DCHECK(GetExecutor() != nullptr) << "Only valid to emplace once";
value_ = std::forward<T>(t);
+ static_assert(!std::is_same<std::decay_t<T>, AbstractPromise*>::value,
+ "Use scoped_refptr<AbstractPromise> instead");
}
template <typename T, typename... Args>
void emplace(in_place_type_t<T>, Args&&... args) {
DCHECK(GetExecutor() != nullptr) << "Only valid to emplace once";
value_.emplace<T>(std::forward<Args>(args)...);
+ static_assert(!std::is_same<std::decay_t<T>, AbstractPromise*>::value,
+ "Use scoped_refptr<AbstractPromise> instead");
}
- // Unresolved promises have an executor which invokes one of the callbacks
- // associated with the promise. Once the callback has been invoked the
- // Executor is destroyed.
- //
- // Ideally Executor would be a pure virtual class, but we want to store these
- // inline to reduce the number of memory allocations (small object
- // optimization). The problem is even though placement new returns the same
- // address it was allocated at, you have to use the returned pointer. Casting
- // the buffer to the derived class is undefined behavior. STL implementations
- // usually store an extra pointer, but there we have opted for implementing
- // our own VTable to save a little bit of memory.
- class BASE_EXPORT Executor {
- public:
- // Constructs |Derived| in place.
- template <typename Derived, typename... Args>
- explicit Executor(in_place_type_t<Derived>, Args&&... args) {
- static_assert(sizeof(Derived) <= MaxSize, "Derived is too big");
- static_assert(sizeof(Executor) <= sizeof(AnyInternal::InlineAlloc),
- "Executor is too big");
- vtable_ = &VTableHelper<Derived>::vtable_;
- new (storage_) Derived(std::forward<Args>(args)...);
- }
-
- ~Executor();
-
- // Controls whether or not a promise should wait for its prerequisites
- // before becoming eligible for execution.
- enum class PrerequisitePolicy : uint8_t {
- // Wait for all prerequisites to resolve (or any to reject) before
- // becoming eligible for execution. If any prerequisites are canceled it
- // will be canceled too.
- kAll,
-
- // Wait for any prerequisite to resolve or reject before becoming eligible
- // for execution. If all prerequisites are canceled it will be canceled
- // too.
- kAny,
-
- // Never become eligible for execution. Cancellation is ignored.
- kNever,
- };
-
- // Returns the associated PrerequisitePolicy.
- PrerequisitePolicy GetPrerequisitePolicy() const;
-
- // NB if there is both a resolve and a reject executor we require them to
- // be both canceled at the same time.
- bool IsCancelled() const;
-
- // Describes an executor callback.
- enum class ArgumentPassingType : uint8_t {
- // No callback. E.g. the RejectArgumentPassingType in a promise with a
- // resolve callback but no reject callback.
- kNoCallback,
-
- // Executor callback argument passed by value or by reference.
- kNormal,
-
- // Executor callback argument passed by r-value reference.
- kMove,
- };
-
-#if DCHECK_IS_ON()
- // Returns details of the resolve and reject executor callbacks if any. This
- // data is used to diagnose double moves and missing catches.
- ArgumentPassingType ResolveArgumentPassingType() const;
- ArgumentPassingType RejectArgumentPassingType() const;
- bool CanResolve() const;
- bool CanReject() const;
-#endif
-
- // Invokes the associate callback for |promise|. If the callback was
- // cancelled it should call |promise->OnCanceled()|. If the callback
- // resolved it should store the resolve result via |promise->emplace()| and
- // call |promise->OnResolved()|. If the callback was rejected it should
- // store the reject result in |promise->state()| and call
- // |promise->OnResolved()|.
- // Caution the Executor will be destructed when |promise->state()| is
- // written to.
- void Execute(AbstractPromise* promise);
-
- private:
- static constexpr size_t MaxSize = sizeof(void*) * 2;
-
- struct VTable {
- void (*destructor)(void* self);
- PrerequisitePolicy (*get_prerequsite_policy)(const void* self);
- bool (*is_cancelled)(const void* self);
-#if DCHECK_IS_ON()
- ArgumentPassingType (*resolve_argument_passing_type)(const void* self);
- ArgumentPassingType (*reject_argument_passing_type)(const void* self);
- bool (*can_resolve)(const void* self);
- bool (*can_reject)(const void* self);
-#endif
- void (*execute)(void* self, AbstractPromise* promise);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(VTable);
- };
-
- template <typename DerivedType>
- struct VTableHelper {
- VTableHelper(const VTableHelper& other) = delete;
- VTableHelper& operator=(const VTableHelper& other) = delete;
-
- static void Destructor(void* self) {
- static_cast<DerivedType*>(self)->~DerivedType();
- }
-
- static PrerequisitePolicy GetPrerequisitePolicy(const void* self) {
- return static_cast<const DerivedType*>(self)->GetPrerequisitePolicy();
- }
-
- static bool IsCancelled(const void* self) {
- return static_cast<const DerivedType*>(self)->IsCancelled();
- }
-
-#if DCHECK_IS_ON()
- static ArgumentPassingType ResolveArgumentPassingType(const void* self) {
- return static_cast<const DerivedType*>(self)
- ->ResolveArgumentPassingType();
- }
-
- static ArgumentPassingType RejectArgumentPassingType(const void* self) {
- return static_cast<const DerivedType*>(self)
- ->RejectArgumentPassingType();
- }
-
- static bool CanResolve(const void* self) {
- return static_cast<const DerivedType*>(self)->CanResolve();
- }
-
- static bool CanReject(const void* self) {
- return static_cast<const DerivedType*>(self)->CanReject();
- }
-#endif
-
- static void Execute(void* self, AbstractPromise* promise) {
- return static_cast<DerivedType*>(self)->Execute(promise);
- }
-
- static constexpr VTable vtable_ = {
- &VTableHelper::Destructor,
- &VTableHelper::GetPrerequisitePolicy,
- &VTableHelper::IsCancelled,
-#if DCHECK_IS_ON()
- &VTableHelper::ResolveArgumentPassingType,
- &VTableHelper::RejectArgumentPassingType,
- &VTableHelper::CanResolve,
- &VTableHelper::CanReject,
-#endif
- &VTableHelper::Execute,
- };
- };
-
- const VTable* vtable_;
- char storage_[MaxSize];
- };
-
// Signals that this promise was cancelled. If executor hasn't run yet, this
// will prevent it from running and cancels any dependent promises unless they
// have PrerequisitePolicy::kAny, in which case they will only be canceled if
@@ -368,57 +464,76 @@ class BASE_EXPORT AbstractPromise
// scheduled for execution.
void OnRejected();
- struct BASE_EXPORT AdjacencyListNode {
- AdjacencyListNode();
- explicit AdjacencyListNode(scoped_refptr<AbstractPromise> prerequisite);
- explicit AdjacencyListNode(AdjacencyListNode&& other) noexcept;
- ~AdjacencyListNode();
-
- scoped_refptr<AbstractPromise> prerequisite;
- DependentList::Node dependent_node;
- };
-
// This is separate from AbstractPromise to reduce the memory footprint of
// regular PostTask without promise chains.
- struct BASE_EXPORT AdjacencyList {
+ class BASE_EXPORT AdjacencyList {
+ public:
AdjacencyList();
- explicit AdjacencyList(scoped_refptr<AbstractPromise> prerequisite);
- explicit AdjacencyList(std::vector<AdjacencyListNode> prerequisite_list);
~AdjacencyList();
- void ResetWithSingleDependency(scoped_refptr<AbstractPromise> prerequisite);
+ explicit AdjacencyList(AbstractPromise* prerequisite);
+ explicit AdjacencyList(std::vector<DependentList::Node> prerequisite_list);
bool DecrementPrerequisiteCountAndCheckIfZero();
- std::vector<AdjacencyListNode> prerequisite_list;
+ // Called for each prerequisites that resolves or rejects for
+ // PrerequisitePolicy::kAny and each prerequisite that rejects for
+ // PrerequisitePolicy::kAll. This saves |settled_prerequisite| and returns
+ // true iff called for the first time.
+ bool MarkPrerequisiteAsSettling(AbstractPromise* settled_prerequisite);
+
+ // Invoked when this promise is notified that |canceled_prerequisite| is
+ // cancelled. Clears the reference to |canceled_prerequisite| in this
+ // AdjacencyList to ensure the it is not accessed later when Clear() is
+ // called.
+ void RemoveCanceledPrerequisite(AbstractPromise* canceled_prerequisite);
+
+ std::vector<DependentList::Node>* prerequisite_list() {
+ return &prerequisite_list_;
+ }
+
+ AbstractPromise* GetFirstSettledPrerequisite() const {
+ return reinterpret_cast<AbstractPromise*>(
+ first_settled_prerequisite_.load(std::memory_order_acquire));
+ }
+
+ void Clear();
+
+ private:
+ std::vector<DependentList::Node> prerequisite_list_;
// PrerequisitePolicy::kAny waits for at most 1 resolve or N cancellations.
// PrerequisitePolicy::kAll waits for N resolves or at most 1 cancellation.
// PrerequisitePolicy::kNever doesn't use this.
- std::atomic_int action_prerequisite_count;
+ std::atomic_int action_prerequisite_count_;
- // Stores the address of the first rejecting promise. The purpose of this is
- // two-fold, first to ensure that Promises::All/Race return the first
- // prerequisite that rejected and secondly to prevent the executor from
- // being run multiple times if there's multiple rejection.
- std::atomic<uintptr_t> first_rejecting_promise{0};
+ // For PrerequisitePolicy::kAll the address of the first rejected
+ // prerequisite if any.
+ // For PrerequisitePolicy::kAll the address of the first rejected or
+ // resolved rerequsite if any.
+ std::atomic<uintptr_t> first_settled_prerequisite_{0};
};
- const std::vector<AdjacencyListNode>* prerequisite_list() const {
+ const std::vector<DependentList::Node>* prerequisite_list() const {
if (!prerequisites_)
return nullptr;
- return &prerequisites_->prerequisite_list;
+ return prerequisites_->prerequisite_list();
}
// Returns the first and only prerequisite AbstractPromise. It's an error to
// call this if the number of prerequisites isn't exactly one.
AbstractPromise* GetOnlyPrerequisite() const {
DCHECK(prerequisites_);
- DCHECK_EQ(prerequisites_->prerequisite_list.size(), 1u);
- return prerequisites_->prerequisite_list[0].prerequisite.get();
+ const std::vector<DependentList::Node>* prerequisite_list =
+ prerequisites_->prerequisite_list();
+ DCHECK_EQ(prerequisite_list->size(), 1u);
+ return (*prerequisite_list)[0].prerequisite();
}
- AbstractPromise* GetFirstRejectedPrerequisite() const;
+ // For PrerequisitePolicy::kAll returns the first rejected prerequisite if
+ // any. For PrerequisitePolicy::kAny returns the first rejected or resolved
+ // rerequsite if any.
+ AbstractPromise* GetFirstSettledPrerequisite() const;
// Calls |RunExecutor()| or posts a task to do so if |from_here_| is not
// nullopt.
@@ -429,20 +544,16 @@ class BASE_EXPORT AbstractPromise
private:
friend base::RefCountedThreadSafe<AbstractPromise>;
- template <typename ConstructType,
- typename DerivedExecutorType,
- typename... ExecutorArgs>
- AbstractPromise(scoped_refptr<TaskRunner>&& task_runner,
+ template <typename ConstructType>
+ AbstractPromise(const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
std::unique_ptr<AdjacencyList> prerequisites,
RejectPolicy reject_policy,
- ConstructWith<ConstructType, DerivedExecutorType>,
- ExecutorArgs&&... executor_args) noexcept
- : task_runner_(std::move(task_runner)),
+ ConstructType tag,
+ PromiseExecutor::Data&& executor_data) noexcept
+ : task_runner_(task_runner),
from_here_(std::move(from_here)),
- value_(in_place_type_t<Executor>(),
- in_place_type_t<DerivedExecutorType>(),
- std::forward<ExecutorArgs>(executor_args)...),
+ value_(in_place_type_t<PromiseExecutor>(), std::move(executor_data)),
#if DCHECK_IS_ON()
reject_policy_(reject_policy),
resolve_argument_passing_type_(
@@ -452,7 +563,7 @@ class BASE_EXPORT AbstractPromise
executor_can_resolve_(GetExecutor()->CanResolve()),
executor_can_reject_(GetExecutor()->CanReject()),
#endif
- dependents_(ConstructType()),
+ dependents_(tag),
prerequisites_(std::move(prerequisites)) {
#if DCHECK_IS_ON()
{
@@ -475,17 +586,20 @@ class BASE_EXPORT AbstractPromise
NOINLINE ~AbstractPromise();
- // Returns the associated Executor if there is one.
- const Executor* GetExecutor() const;
+ // Returns the curried promise if there is one or null otherwise.
+ AbstractPromise* GetCurriedPromise();
+
+ // Returns the associated PromiseExecutor if there is one.
+ const PromiseExecutor* GetExecutor() const;
- Executor* GetExecutor() {
- return const_cast<Executor*>(
+ PromiseExecutor* GetExecutor() {
+ return const_cast<PromiseExecutor*>(
const_cast<const AbstractPromise*>(this)->GetExecutor());
}
// With the exception of curried promises, this may only be called before the
// executor has run.
- Executor::PrerequisitePolicy GetPrerequisitePolicy();
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy();
void AddAsDependentForAllPrerequisites();
@@ -498,14 +612,14 @@ class BASE_EXPORT AbstractPromise
// Checks if the promise is now ready to be executed and if so posts it on the
// given task runner.
- void OnPrerequisiteResolved();
+ void OnPrerequisiteResolved(AbstractPromise* resolved_prerequisite);
// Schedules the promise for execution.
- void OnPrerequisiteRejected(AbstractPromise* rejected_promise);
+ void OnPrerequisiteRejected(AbstractPromise* rejected_prerequisite);
// Returns true if we are still potentially eligible to run despite the
// cancellation.
- bool OnPrerequisiteCancelled();
+ bool OnPrerequisiteCancelled(AbstractPromise* canceled_prerequisite);
// This promise was resolved, post any dependent promises that are now ready
// as a result.
@@ -515,6 +629,16 @@ class BASE_EXPORT AbstractPromise
// as a result.
void OnRejectDispatchReadyDependents();
+ // This promise was resolved with a curried promise, make any dependent
+ // promises depend on |non_curried_root| instead.
+ void OnResolveMakeDependantsUseCurriedPrerequisite(
+ AbstractPromise* non_curried_root);
+
+ // This promise was rejected with a curried promise, make any dependent
+ // promises depend on |non_curried_root| instead.
+ void OnRejectMakeDependantsUseCurriedPrerequisite(
+ AbstractPromise* non_curried_root);
+
void DispatchPromise();
// Reverses |list| so dependents can be dispatched in the order they where
@@ -522,9 +646,8 @@ class BASE_EXPORT AbstractPromise
static DependentList::Node* NonThreadSafeReverseList(
DependentList::Node* list);
- // Finds the non-curried root, and if settled ready dependents are posted.
- // Returns true if the non-curried root was settled.
- bool DispatchIfNonCurriedRootSettled();
+ void ReplaceCurriedPrerequisite(AbstractPromise* curried_prerequisite,
+ AbstractPromise* replacement);
scoped_refptr<TaskRunner> task_runner_;
@@ -536,6 +659,18 @@ class BASE_EXPORT AbstractPromise
// * Rejected<T>
// * scoped_refptr<AbstractPromise> (for curried promises - i.e. a promise
// which is resolved with a promise).
+ //
+ // The state transitions which occur during Execute() (which is once only) are
+ // like so:
+ //
+ // ┌────────── Executor ─────────┐
+ // | | │
+ // | | │
+ // ↓ | ↓
+ // Resolved<T> | Rejected<T>
+ // ↓
+ // scoped_refptr<AbstractPromise>
+ //
unique_any value_;
#if DCHECK_IS_ON()
@@ -547,8 +682,8 @@ class BASE_EXPORT AbstractPromise
// Cached because we need to access these values after the Executor they came
// from has gone away.
- const Executor::ArgumentPassingType resolve_argument_passing_type_;
- const Executor::ArgumentPassingType reject_argument_passing_type_;
+ const PromiseExecutor::ArgumentPassingType resolve_argument_passing_type_;
+ const PromiseExecutor::ArgumentPassingType reject_argument_passing_type_;
const bool executor_can_resolve_;
const bool executor_can_reject_;
@@ -586,7 +721,7 @@ class BASE_EXPORT AbstractPromise
void CheckForDoubleMoveErrors(
const base::Location& new_dependent_location,
- Executor::ArgumentPassingType new_dependent_executor_type);
+ PromiseExecutor::ArgumentPassingType new_dependent_executor_type);
private:
const Location from_here_;
@@ -626,11 +761,6 @@ class BASE_EXPORT AbstractPromise
std::unique_ptr<AdjacencyList> prerequisites_;
};
-// static
-template <typename T>
-const AbstractPromise::Executor::VTable
- AbstractPromise::Executor::VTableHelper<T>::vtable_;
-
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/abstract_promise_unittest.cc b/chromium/base/task/promise/abstract_promise_unittest.cc
index e8a63e4f3a5..ebee591e4e8 100644
--- a/chromium/base/task/promise/abstract_promise_unittest.cc
+++ b/chromium/base/task/promise/abstract_promise_unittest.cc
@@ -4,10 +4,12 @@
#include "base/task/promise/abstract_promise.h"
+#include "base/task/post_task.h"
#include "base/test/bind_test_util.h"
#include "base/test/do_nothing_promise.h"
#include "base/test/gtest_util.h"
#include "base/test/scoped_task_environment.h"
+#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -25,13 +27,25 @@
using testing::ElementsAre;
using ArgumentPassingType =
- base::internal::AbstractPromise::Executor::ArgumentPassingType;
+ base::internal::PromiseExecutor::ArgumentPassingType;
-using PrerequisitePolicy =
- base::internal::AbstractPromise::Executor::PrerequisitePolicy;
+using PrerequisitePolicy = base::internal::PromiseExecutor::PrerequisitePolicy;
namespace base {
namespace internal {
+namespace {
+
+size_t CountTasksRunUntilIdle(
+ const scoped_refptr<TestSimpleTaskRunner>& task_runner) {
+ size_t count = 0;
+ while (task_runner->HasPendingTask()) {
+ count += task_runner->NumPendingTasks();
+ task_runner->RunPendingTasks();
+ }
+ return count;
+}
+
+} // namespace
class TestExecutor {
public:
@@ -104,7 +118,7 @@ class AbstractPromiseTest : public testing::Test {
std::unique_ptr<AbstractPromise::AdjacencyList> prerequisites;
PrerequisitePolicy prerequisite_policy =
- AbstractPromise::Executor::PrerequisitePolicy::kAll;
+ PromiseExecutor::PrerequisitePolicy::kAll;
bool executor_can_resolve = true;
@@ -184,17 +198,18 @@ class AbstractPromiseTest : public testing::Test {
}
operator scoped_refptr<AbstractPromise>() {
- return AbstractPromise::Create(
- std::move(settings.task_runner), settings.from_here,
- std::move(settings.prerequisites), settings.reject_policy,
- AbstractPromise::ConstructWith<DependentList::ConstructUnresolved,
- TestExecutor>(),
- settings.prerequisite_policy,
+ PromiseExecutor::Data executor_data(
+ in_place_type_t<TestExecutor>(), settings.prerequisite_policy,
#if DCHECK_IS_ON()
settings.resolve_executor_type, settings.reject_executor_type,
settings.executor_can_resolve, settings.executor_can_reject,
#endif
std::move(settings.callback));
+
+ return AbstractPromise::Create(
+ settings.task_runner, settings.from_here,
+ std::move(settings.prerequisites), settings.reject_policy,
+ DependentList::ConstructUnresolved(), std::move(executor_data));
}
private:
@@ -204,9 +219,9 @@ class AbstractPromiseTest : public testing::Test {
PromiseSettingsBuilder ThenPromise(Location from_here,
scoped_refptr<AbstractPromise> parent) {
PromiseSettingsBuilder builder(
- from_here, parent ? std::make_unique<AbstractPromise::AdjacencyList>(
- std::move(parent))
- : std::make_unique<AbstractPromise::AdjacencyList>());
+ from_here,
+ parent ? std::make_unique<AbstractPromise::AdjacencyList>(parent.get())
+ : std::make_unique<AbstractPromise::AdjacencyList>());
builder.With(BindOnce([](AbstractPromise* p) {
AbstractPromise* prerequisite = p->GetOnlyPrerequisite();
if (prerequisite->IsResolved()) {
@@ -216,6 +231,8 @@ class AbstractPromiseTest : public testing::Test {
// Consistent with BaseThenAndCatchExecutor::ProcessNullExecutor.
p->emplace(scoped_refptr<AbstractPromise>(prerequisite));
p->OnResolved();
+ } else {
+ NOTREACHED();
}
}));
return builder;
@@ -224,9 +241,9 @@ class AbstractPromiseTest : public testing::Test {
PromiseSettingsBuilder CatchPromise(Location from_here,
scoped_refptr<AbstractPromise> parent) {
PromiseSettingsBuilder builder(
- from_here, parent ? std::make_unique<AbstractPromise::AdjacencyList>(
- std::move(parent))
- : std::make_unique<AbstractPromise::AdjacencyList>());
+ from_here,
+ parent ? std::make_unique<AbstractPromise::AdjacencyList>(parent.get())
+ : std::make_unique<AbstractPromise::AdjacencyList>());
builder.With(CallbackResultType::kNoCallback)
.With(CallbackResultType::kCanResolve)
.WithResolve(ArgumentPassingType::kNoCallback)
@@ -240,6 +257,8 @@ class AbstractPromiseTest : public testing::Test {
} else if (prerequisite->IsRejected()) {
p->emplace(Resolved<void>());
p->OnResolved();
+ } else {
+ NOTREACHED();
}
}));
return builder;
@@ -247,22 +266,19 @@ class AbstractPromiseTest : public testing::Test {
PromiseSettingsBuilder AllPromise(
Location from_here,
- std::vector<internal::AbstractPromise::AdjacencyListNode>
- prerequisite_list) {
+ std::vector<internal::DependentList::Node> prerequisite_list) {
PromiseSettingsBuilder builder(
from_here, std::make_unique<AbstractPromise::AdjacencyList>(
std::move(prerequisite_list)));
builder.With(PrerequisitePolicy::kAll)
.With(BindOnce([](AbstractPromise* p) {
- // Reject if any prerequisites rejected.
- for (const AbstractPromise::AdjacencyListNode& node :
- *p->prerequisite_list()) {
- if (node.prerequisite->IsRejected()) {
- p->emplace(Rejected<void>());
- p->OnRejected();
- return;
- }
+ AbstractPromise* first_settled = p->GetFirstSettledPrerequisite();
+ if (first_settled && first_settled->IsRejected()) {
+ p->emplace(Rejected<void>());
+ p->OnRejected();
+ return;
}
+
p->emplace(Resolved<void>());
p->OnResolved();
}));
@@ -271,22 +287,19 @@ class AbstractPromiseTest : public testing::Test {
PromiseSettingsBuilder AnyPromise(
Location from_here,
- std::vector<internal::AbstractPromise::AdjacencyListNode>
- prerequisite_list) {
+ std::vector<internal::DependentList::Node> prerequisite_list) {
PromiseSettingsBuilder builder(
from_here, std::make_unique<AbstractPromise::AdjacencyList>(
std::move(prerequisite_list)));
builder.With(PrerequisitePolicy::kAny)
.With(BindOnce([](AbstractPromise* p) {
- // Reject if any prerequisites rejected.
- for (const AbstractPromise::AdjacencyListNode& node :
- *p->prerequisite_list()) {
- if (node.prerequisite->IsRejected()) {
- p->emplace(Rejected<void>());
- p->OnRejected();
- return;
- }
+ AbstractPromise* first_settled = p->GetFirstSettledPrerequisite();
+ if (first_settled && first_settled->IsRejected()) {
+ p->emplace(Rejected<void>());
+ p->OnRejected();
+ return;
}
+
p->emplace(Resolved<void>());
p->OnResolved();
}));
@@ -299,26 +312,26 @@ class AbstractPromiseTest : public testing::Test {
TEST_F(AbstractPromiseTest, UnfulfilledPromise) {
scoped_refptr<AbstractPromise> promise =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- EXPECT_FALSE(promise->IsResolved());
- EXPECT_FALSE(promise->IsRejected());
+ EXPECT_FALSE(promise->IsResolvedForTesting());
+ EXPECT_FALSE(promise->IsRejectedForTesting());
EXPECT_FALSE(promise->IsCanceled());
}
TEST_F(AbstractPromiseTest, OnResolve) {
scoped_refptr<AbstractPromise> promise =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- EXPECT_FALSE(promise->IsResolved());
+ EXPECT_FALSE(promise->IsResolvedForTesting());
promise->OnResolved();
- EXPECT_TRUE(promise->IsResolved());
+ EXPECT_TRUE(promise->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, OnReject) {
scoped_refptr<AbstractPromise> promise =
DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
RejectPolicy::kCatchNotRequired);
- EXPECT_FALSE(promise->IsRejected());
+ EXPECT_FALSE(promise->IsRejectedForTesting());
promise->OnRejected();
- EXPECT_TRUE(promise->IsRejected());
+ EXPECT_TRUE(promise->IsRejectedForTesting());
}
TEST_F(AbstractPromiseTest, ExecuteOnResolve) {
@@ -328,9 +341,9 @@ TEST_F(AbstractPromiseTest, ExecuteOnResolve) {
p->OnResolved();
}));
- EXPECT_FALSE(promise->IsResolved());
+ EXPECT_FALSE(promise->IsResolvedForTesting());
promise->Execute();
- EXPECT_TRUE(promise->IsResolved());
+ EXPECT_TRUE(promise->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, ExecuteOnReject) {
@@ -343,9 +356,9 @@ TEST_F(AbstractPromiseTest, ExecuteOnReject) {
p->OnRejected();
}));
- EXPECT_FALSE(promise->IsRejected());
+ EXPECT_FALSE(promise->IsRejectedForTesting());
promise->Execute();
- EXPECT_TRUE(promise->IsRejected());
+ EXPECT_TRUE(promise->IsRejectedForTesting());
}
TEST_F(AbstractPromiseTest, ExecutionChain) {
@@ -358,15 +371,15 @@ TEST_F(AbstractPromiseTest, ExecutionChain) {
p1->OnResolved();
- EXPECT_FALSE(p2->IsResolved());
- EXPECT_FALSE(p3->IsResolved());
- EXPECT_FALSE(p4->IsResolved());
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+ EXPECT_FALSE(p4->IsResolvedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p1->IsResolved());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p1->IsResolvedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MoveExecutionChain) {
@@ -387,15 +400,15 @@ TEST_F(AbstractPromiseTest, MoveExecutionChain) {
p1->OnResolved();
- EXPECT_FALSE(p2->IsResolved());
- EXPECT_FALSE(p3->IsResolved());
- EXPECT_FALSE(p4->IsResolved());
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+ EXPECT_FALSE(p4->IsResolvedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p1->IsResolved());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p1->IsResolvedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MoveResolveCatchExecutionChain) {
@@ -440,15 +453,15 @@ TEST_F(AbstractPromiseTest, MoveResolveCatchExecutionChain) {
p1->OnResolved();
- EXPECT_FALSE(p2->IsRejected());
- EXPECT_FALSE(p3->IsResolved());
- EXPECT_FALSE(p4->IsRejected());
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p2->IsRejectedForTesting());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+ EXPECT_FALSE(p4->IsRejectedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p2->IsRejected());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsRejected());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p2->IsRejectedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsRejectedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MoveResolveCatchExecutionChainType2) {
@@ -528,23 +541,23 @@ TEST_F(AbstractPromiseTest, MoveResolveCatchExecutionChainType2) {
}));
p1->OnResolved();
- EXPECT_FALSE(p2->IsRejected());
- EXPECT_FALSE(p3->IsRejected());
- EXPECT_FALSE(p4->IsResolved());
- EXPECT_FALSE(p5->IsResolved());
- EXPECT_FALSE(p6->IsRejected());
- EXPECT_FALSE(p7->IsRejected());
- EXPECT_FALSE(p8->IsResolved());
- EXPECT_FALSE(p9->IsResolved());
+ EXPECT_FALSE(p2->IsRejectedForTesting());
+ EXPECT_FALSE(p3->IsRejectedForTesting());
+ EXPECT_FALSE(p4->IsResolvedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
+ EXPECT_FALSE(p6->IsRejectedForTesting());
+ EXPECT_FALSE(p7->IsRejectedForTesting());
+ EXPECT_FALSE(p8->IsResolvedForTesting());
+ EXPECT_FALSE(p9->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p2->IsRejected());
- EXPECT_TRUE(p3->IsRejected());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
- EXPECT_TRUE(p6->IsRejected());
- EXPECT_TRUE(p7->IsRejected());
- EXPECT_TRUE(p8->IsResolved());
- EXPECT_TRUE(p9->IsResolved());
+ EXPECT_TRUE(p2->IsRejectedForTesting());
+ EXPECT_TRUE(p3->IsRejectedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
+ EXPECT_TRUE(p6->IsRejectedForTesting());
+ EXPECT_TRUE(p7->IsRejectedForTesting());
+ EXPECT_TRUE(p8->IsResolvedForTesting());
+ EXPECT_TRUE(p9->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MixedMoveAndNormalExecutionChain) {
@@ -563,15 +576,15 @@ TEST_F(AbstractPromiseTest, MixedMoveAndNormalExecutionChain) {
p1->OnResolved();
- EXPECT_FALSE(p2->IsResolved());
- EXPECT_FALSE(p3->IsResolved());
- EXPECT_FALSE(p4->IsResolved());
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+ EXPECT_FALSE(p4->IsResolvedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p1->IsResolved());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p1->IsResolvedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MoveAtEndOfChain) {
@@ -592,15 +605,15 @@ TEST_F(AbstractPromiseTest, BranchedExecutionChain) {
p1->OnResolved();
- EXPECT_FALSE(p2->IsResolved());
- EXPECT_FALSE(p3->IsResolved());
- EXPECT_FALSE(p4->IsResolved());
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+ EXPECT_FALSE(p4->IsResolvedForTesting());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p2->IsResolved());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p2->IsResolvedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, PrerequisiteAlreadyResolved) {
@@ -610,9 +623,9 @@ TEST_F(AbstractPromiseTest, PrerequisiteAlreadyResolved) {
scoped_refptr<AbstractPromise> p2 = ThenPromise(FROM_HERE, p1);
- EXPECT_FALSE(p2->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p2->IsResolved());
+ EXPECT_TRUE(p2->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, PrerequisiteAlreadyRejected) {
@@ -624,14 +637,16 @@ TEST_F(AbstractPromiseTest, PrerequisiteAlreadyRejected) {
scoped_refptr<AbstractPromise> p2 =
CatchPromise(FROM_HERE, p1)
.With(BindLambdaForTesting([&](AbstractPromise* p) {
- EXPECT_EQ(p->GetFirstRejectedPrerequisite(), p1);
+ EXPECT_TRUE(
+ p->GetFirstSettledPrerequisite()->IsRejectedForTesting());
+ EXPECT_EQ(p->GetFirstSettledPrerequisite(), p1);
p->emplace(Resolved<void>());
p->OnResolved();
}));
- EXPECT_FALSE(p2->IsResolved());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p2->IsResolved());
+ EXPECT_TRUE(p2->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MultipleResolvedPrerequisitePolicyALL) {
@@ -644,12 +659,11 @@ TEST_F(AbstractPromiseTest, MultipleResolvedPrerequisitePolicyALL) {
scoped_refptr<AbstractPromise> p4 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> all_promise =
AllPromise(FROM_HERE, std::move(prerequisite_list));
@@ -659,11 +673,11 @@ TEST_F(AbstractPromiseTest, MultipleResolvedPrerequisitePolicyALL) {
p3->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_FALSE(all_promise->IsResolved());
+ EXPECT_FALSE(all_promise->IsResolvedForTesting());
p4->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(all_promise->IsResolved());
+ EXPECT_TRUE(all_promise->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest,
@@ -678,11 +692,10 @@ TEST_F(AbstractPromiseTest,
}
scoped_refptr<AbstractPromise> promise[num_promises];
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- num_promises);
+ std::vector<internal::DependentList::Node> prerequisite_list(num_promises);
for (int i = 0; i < num_promises; i++) {
promise[i] = DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- prerequisite_list[i].prerequisite = promise[i];
+ prerequisite_list[i].SetPrerequisite(promise[i].get());
}
scoped_refptr<AbstractPromise> all_promise =
@@ -691,15 +704,17 @@ TEST_F(AbstractPromiseTest,
RunLoop run_loop;
scoped_refptr<AbstractPromise> p2 =
ThenPromise(FROM_HERE, all_promise)
- .With(BindLambdaForTesting(
- [&](AbstractPromise* p) { run_loop.Quit(); }));
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ run_loop.Quit();
+ p->OnResolved();
+ }));
for (int i = 0; i < num_promises; i++) {
thread[i % num_threads]->task_runner()->PostTask(
FROM_HERE, BindOnce(
[](scoped_refptr<AbstractPromise> all_promise,
scoped_refptr<AbstractPromise> promise) {
- EXPECT_FALSE(all_promise->IsResolved());
+ EXPECT_FALSE(all_promise->IsResolvedForTesting());
promise->OnResolved();
},
all_promise, promise[i]));
@@ -708,10 +723,10 @@ TEST_F(AbstractPromiseTest,
run_loop.Run();
for (int i = 0; i < num_promises; i++) {
- EXPECT_TRUE(promise[i]->IsResolved());
+ EXPECT_TRUE(promise[i]->IsResolvedForTesting());
}
- EXPECT_TRUE(all_promise->IsResolved());
+ EXPECT_TRUE(all_promise->IsResolvedForTesting());
for (int i = 0; i < num_threads; i++) {
thread[i]->Stop();
@@ -732,18 +747,19 @@ TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyALL) {
DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
false);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> all_promise =
AllPromise(FROM_HERE, std::move(prerequisite_list))
.With(CallbackResultType::kCanResolveOrReject)
.With(BindLambdaForTesting([&](AbstractPromise* p) {
- EXPECT_EQ(p->GetFirstRejectedPrerequisite(), p3);
+ EXPECT_TRUE(
+ p->GetFirstSettledPrerequisite()->IsRejectedForTesting());
+ EXPECT_EQ(p->GetFirstSettledPrerequisite(), p3);
p->emplace(Rejected<void>());
p->OnRejected();
}));
@@ -752,8 +768,8 @@ TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyALL) {
p3->OnRejected();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(all_promise->IsRejected());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(all_promise->IsRejectedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, MultipleRejectPrerequisitePolicyALL) {
@@ -770,19 +786,19 @@ TEST_F(AbstractPromiseTest, MultipleRejectPrerequisitePolicyALL) {
DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
false);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> all_promise =
AllPromise(FROM_HERE, std::move(prerequisite_list))
.With(CallbackResultType::kCanResolveOrReject)
.With(BindLambdaForTesting([&](AbstractPromise* p) {
- if (AbstractPromise* rejected = p->GetFirstRejectedPrerequisite()) {
- EXPECT_EQ(rejected, p2);
+ AbstractPromise* settled = p->GetFirstSettledPrerequisite();
+ if (settled && settled->IsRejected()) {
+ EXPECT_EQ(settled, p2);
p->emplace(Rejected<void>());
p->OnRejected();
} else {
@@ -794,7 +810,9 @@ TEST_F(AbstractPromiseTest, MultipleRejectPrerequisitePolicyALL) {
CatchPromise(FROM_HERE, all_promise)
.With(BindLambdaForTesting([&](AbstractPromise* p) {
EXPECT_FALSE(p->IsSettled()); // Should only happen once.
- EXPECT_EQ(p->GetFirstRejectedPrerequisite(), all_promise);
+ EXPECT_TRUE(
+ p->GetFirstSettledPrerequisite()->IsRejectedForTesting());
+ EXPECT_EQ(p->GetFirstSettledPrerequisite(), all_promise);
p->emplace(Resolved<void>());
p->OnResolved();
}));
@@ -803,11 +821,11 @@ TEST_F(AbstractPromiseTest, MultipleRejectPrerequisitePolicyALL) {
p1->OnRejected();
p3->OnRejected();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(all_promise->IsRejected());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(all_promise->IsRejectedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
-TEST_F(AbstractPromiseTest, SingleResolvedPrerequisitesPrerequisitePolicyANY) {
+TEST_F(AbstractPromiseTest, SingleResolvedPrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p1 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
scoped_refptr<AbstractPromise> p2 =
@@ -817,19 +835,43 @@ TEST_F(AbstractPromiseTest, SingleResolvedPrerequisitesPrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p4 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> any_promise =
AnyPromise(FROM_HERE, std::move(prerequisite_list));
p2->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(any_promise->IsResolved());
+ EXPECT_TRUE(any_promise->IsResolvedForTesting());
+}
+
+TEST_F(AbstractPromiseTest, MultipleResolvedPrerequisitePolicyANY) {
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p2 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p3 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p4 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
+
+ scoped_refptr<AbstractPromise> any_promise =
+ AnyPromise(FROM_HERE, std::move(prerequisite_list));
+
+ p1->OnResolved();
+ p2->OnResolved();
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(any_promise->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyANY) {
@@ -846,12 +888,11 @@ TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyANY) {
DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
false);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> any_promise =
AnyPromise(FROM_HERE, std::move(prerequisite_list))
@@ -861,8 +902,8 @@ TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyANY) {
p3->OnRejected();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(any_promise->IsRejected());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(any_promise->IsRejectedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, SingleResolvePrerequisitePolicyANY) {
@@ -875,12 +916,11 @@ TEST_F(AbstractPromiseTest, SingleResolvePrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p4 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 4);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
- prerequisite_list[3].prerequisite = p4;
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
scoped_refptr<AbstractPromise> any_promise =
AnyPromise(FROM_HERE, std::move(prerequisite_list));
@@ -889,8 +929,8 @@ TEST_F(AbstractPromiseTest, SingleResolvePrerequisitePolicyANY) {
p3->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(any_promise->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(any_promise->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, IsCanceled) {
@@ -980,11 +1020,10 @@ TEST_F(AbstractPromiseTest, CancelationPrerequisitePolicyALL) {
scoped_refptr<AbstractPromise> p3 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 3);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
+ std::vector<internal::DependentList::Node> prerequisite_list(3);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
scoped_refptr<AbstractPromise> all_promise =
AllPromise(FROM_HERE, std::move(prerequisite_list));
@@ -1001,11 +1040,10 @@ TEST_F(AbstractPromiseTest, CancelationPrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p3 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 3);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
+ std::vector<internal::DependentList::Node> prerequisite_list(3);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
scoped_refptr<AbstractPromise> any_promise =
AnyPromise(FROM_HERE, std::move(prerequisite_list));
@@ -1026,11 +1064,10 @@ TEST_F(AbstractPromiseTest, AlreadyCanceledPrerequisitePolicyALL) {
scoped_refptr<AbstractPromise> p3 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 3);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
+ std::vector<internal::DependentList::Node> prerequisite_list(3);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
p2->OnCanceled();
scoped_refptr<AbstractPromise> all_promise =
@@ -1047,11 +1084,10 @@ TEST_F(AbstractPromiseTest, SomeAlreadyCanceledPrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p3 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 3);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
+ std::vector<internal::DependentList::Node> prerequisite_list(3);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
p2->OnCanceled();
scoped_refptr<AbstractPromise> any_promise =
@@ -1068,11 +1104,10 @@ TEST_F(AbstractPromiseTest, AllAlreadyCanceledPrerequisitePolicyANY) {
scoped_refptr<AbstractPromise> p3 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
- 3);
- prerequisite_list[0].prerequisite = p1;
- prerequisite_list[1].prerequisite = p2;
- prerequisite_list[2].prerequisite = p3;
+ std::vector<internal::DependentList::Node> prerequisite_list(3);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
p1->OnCanceled();
p2->OnCanceled();
p3->OnCanceled();
@@ -1083,6 +1118,62 @@ TEST_F(AbstractPromiseTest, AllAlreadyCanceledPrerequisitePolicyANY) {
EXPECT_TRUE(any_promise->IsCanceled());
}
+TEST_F(AbstractPromiseTest, CurriedResolvedPromiseAny) {
+ scoped_refptr<AbstractPromise> p0 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p2 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p1 =
+ ThenPromise(FROM_HERE, p0)
+ .With(BindOnce(
+ [](scoped_refptr<AbstractPromise> p2, AbstractPromise* p) {
+ p->emplace(std::move(p2));
+ p->OnResolved();
+ },
+ p2))
+ .With(PrerequisitePolicy::kAny);
+
+ scoped_refptr<TestSimpleTaskRunner> task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ scoped_refptr<AbstractPromise> p3 =
+ ThenPromise(FROM_HERE, p1).With(task_runner);
+
+ p0->OnResolved();
+ p2->OnResolved();
+ RunLoop().RunUntilIdle();
+
+ // |p3| should run.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+}
+
+TEST_F(AbstractPromiseTest, CurriedRejectedPromiseAny) {
+ scoped_refptr<AbstractPromise> p0 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ scoped_refptr<AbstractPromise> p2 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true);
+ scoped_refptr<AbstractPromise> p1 =
+ ThenPromise(FROM_HERE, p0)
+ .With(BindOnce(
+ [](scoped_refptr<AbstractPromise> p2, AbstractPromise* p) {
+ p->emplace(std::move(p2));
+ p->OnResolved();
+ },
+ p2))
+ .With(PrerequisitePolicy::kAny);
+
+ scoped_refptr<TestSimpleTaskRunner> task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ scoped_refptr<AbstractPromise> p3 =
+ CatchPromise(FROM_HERE, p1).With(task_runner);
+
+ p0->OnResolved();
+ p2->OnRejected();
+ RunLoop().RunUntilIdle();
+
+ // |p3| should run.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+}
+
TEST_F(AbstractPromiseTest,
ABSTRACT_PROMISE_DEATH_TEST(DetectResolveDoubleMoveHazard)) {
scoped_refptr<AbstractPromise> p0 = ThenPromise(FROM_HERE, nullptr);
@@ -1881,6 +1972,43 @@ TEST_F(AbstractPromiseTest, UnresolvedCurriedPromise) {
EXPECT_THAT(run_order, ElementsAre(3, 4));
}
+TEST_F(AbstractPromiseTest, NeverResolvedCurriedPromise) {
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+ std::vector<int> run_order;
+
+ // Promise |p3| will be resolved with.
+ scoped_refptr<AbstractPromise> p2 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<AbstractPromise> p3 =
+ ThenPromise(FROM_HERE, p1)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ run_order.push_back(3);
+ // Resolve with a promise.
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&AbstractPromise::Execute, p2));
+ p->emplace(p2);
+ p->OnResolved();
+
+ EXPECT_TRUE(p3->IsResolvedWithPromise());
+ }));
+
+ scoped_refptr<AbstractPromise> p4 =
+ ThenPromise(FROM_HERE, p3)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ run_order.push_back(4);
+ p->emplace(Resolved<void>());
+ p->OnResolved();
+ }));
+
+ p1->OnResolved();
+ RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(3));
+
+ // This shouldn't leak.
+}
+
TEST_F(AbstractPromiseTest, CanceledCurriedPromise) {
scoped_refptr<AbstractPromise> p1 =
DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
@@ -1896,8 +2024,10 @@ TEST_F(AbstractPromiseTest, CanceledCurriedPromise) {
// Resolve with a promise.
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, BindOnce(&AbstractPromise::Execute, p2));
+ EXPECT_TRUE(p2->IsCanceled());
p->emplace(p2);
p->OnResolved();
+ EXPECT_TRUE(p->IsCanceled());
}));
scoped_refptr<AbstractPromise> p4 =
@@ -2002,7 +2132,7 @@ TEST_F(AbstractPromiseTest, CurriedPromiseChainType2) {
p1->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p4->IsResolved());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
EXPECT_EQ(p1.get(), p4->FindNonCurriedAncestor());
}
@@ -2050,7 +2180,7 @@ TEST_F(AbstractPromiseTest, CatchCurriedPromise) {
scoped_refptr<AbstractPromise> p2 =
ThenPromise(FROM_HERE, p0)
.With(BindOnce(
- [&](scoped_refptr<AbstractPromise> p1, AbstractPromise* p) {
+ [](scoped_refptr<AbstractPromise> p1, AbstractPromise* p) {
// Resolve with a promise that can and does reject.
ThreadTaskRunnerHandle::Get()->PostTask(
FROM_HERE, BindOnce(&AbstractPromise::Execute, p1));
@@ -2062,10 +2192,93 @@ TEST_F(AbstractPromiseTest, CatchCurriedPromise) {
scoped_refptr<AbstractPromise> p3 = CatchPromise(FROM_HERE, p2);
p0->OnResolved();
- EXPECT_FALSE(p3->IsResolved());
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+}
+
+TEST_F(AbstractPromiseTest, ManuallyResolveWithNonSettledCurriedPromise) {
+ scoped_refptr<AbstractPromise> p0 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<AbstractPromise> p2 = ThenPromise(FROM_HERE, p1);
+ p1->emplace(p0);
+ p1->OnResolved();
RunLoop().RunUntilIdle();
- EXPECT_TRUE(p3->IsResolved());
+ EXPECT_TRUE(p1->IsResolvedForTesting());
+ EXPECT_FALSE(p2->IsResolvedForTesting());
+
+ p0->OnResolved();
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(p2->IsResolvedForTesting());
+}
+
+TEST_F(AbstractPromiseTest, ExecuteCalledOnceForLateResolvedCurriedPromise) {
+ scoped_refptr<AbstractPromise> p0 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<TestSimpleTaskRunner> task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ scoped_refptr<AbstractPromise> p2 =
+ ThenPromise(FROM_HERE, p0)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ p->emplace(p1);
+ p->OnResolved();
+ }))
+ .With(task_runner);
+
+ scoped_refptr<AbstractPromise> p3 =
+ ThenPromise(FROM_HERE, p1).With(task_runner);
+
+ p0->OnResolved();
+ // |p2| should run but not |p3|.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+
+ p1->OnResolved();
+ // |p3| should run.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+}
+
+TEST_F(AbstractPromiseTest, ExecuteCalledOnceForLateRejectedCurriedPromise) {
+ scoped_refptr<AbstractPromise> p0 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
+ RejectPolicy::kCatchNotRequired);
+
+ scoped_refptr<TestSimpleTaskRunner> task_runner =
+ MakeRefCounted<TestSimpleTaskRunner>();
+ scoped_refptr<AbstractPromise> p2 =
+ ThenPromise(FROM_HERE, p0)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ p->emplace(p1);
+ p->OnResolved();
+ }))
+ .With(task_runner);
+
+ scoped_refptr<AbstractPromise> p3 =
+ CatchPromise(FROM_HERE, p1).With(task_runner);
+
+ p0->OnResolved();
+ // |p2| should run but not |p3|.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+ EXPECT_FALSE(p3->IsResolvedForTesting());
+
+ p1->OnRejected();
+ // |p3| should run.
+ EXPECT_EQ(1u, CountTasksRunUntilIdle(task_runner));
+ EXPECT_TRUE(p3->IsResolvedForTesting());
}
TEST_F(AbstractPromiseTest, ThreadHopping) {
@@ -2121,12 +2334,12 @@ TEST_F(AbstractPromiseTest, ThreadHopping) {
p1->OnResolved();
- EXPECT_FALSE(p5->IsResolved());
+ EXPECT_FALSE(p5->IsResolvedForTesting());
run_loop.Run();
- EXPECT_TRUE(p2->IsResolved());
- EXPECT_TRUE(p3->IsResolved());
- EXPECT_TRUE(p4->IsResolved());
- EXPECT_TRUE(p5->IsResolved());
+ EXPECT_TRUE(p2->IsResolvedForTesting());
+ EXPECT_TRUE(p3->IsResolvedForTesting());
+ EXPECT_TRUE(p4->IsResolvedForTesting());
+ EXPECT_TRUE(p5->IsResolvedForTesting());
thread_a->Stop();
thread_b->Stop();
@@ -2135,7 +2348,7 @@ TEST_F(AbstractPromiseTest, ThreadHopping) {
TEST_F(AbstractPromiseTest, MutipleThreadsAddingDependants) {
constexpr int num_threads = 4;
- constexpr int num_promises = 10000;
+ constexpr int num_promises = 100000;
std::unique_ptr<Thread> thread[num_threads];
for (int i = 0; i < num_threads; i++) {
@@ -2154,6 +2367,7 @@ TEST_F(AbstractPromiseTest, MutipleThreadsAddingDependants) {
int count = pending_count.fetch_sub(1, std::memory_order_acq_rel);
if (count == 1)
run_loop.Quit();
+ p->OnResolved();
});
// Post a bunch of tasks on multiple threads that create Then promises
@@ -2180,5 +2394,58 @@ TEST_F(AbstractPromiseTest, MutipleThreadsAddingDependants) {
}
}
+TEST_F(AbstractPromiseTest, SingleRejectPrerequisitePolicyALLModified) {
+ // Regression test to ensure cross thread rejection works as intended. Loop
+ // increaces chances of catching any bugs.
+ for (size_t i = 0; i < 1000; ++i) {
+ scoped_refptr<AbstractPromise> p1 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
+ false);
+ scoped_refptr<AbstractPromise> p2 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
+ false);
+ scoped_refptr<AbstractPromise> p3 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
+ false);
+ scoped_refptr<AbstractPromise> p4 =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetCanResolve(
+ false);
+
+ std::vector<internal::DependentList::Node> prerequisite_list(4);
+ prerequisite_list[0].SetPrerequisite(p1.get());
+ prerequisite_list[1].SetPrerequisite(p2.get());
+ prerequisite_list[2].SetPrerequisite(p3.get());
+ prerequisite_list[3].SetPrerequisite(p4.get());
+
+ scoped_refptr<AbstractPromise> all_promise =
+ AllPromise(FROM_HERE, std::move(prerequisite_list))
+ .With(CallbackResultType::kCanResolveOrReject)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ p->emplace(Rejected<void>());
+ p->OnRejected();
+ }));
+
+ base::PostTaskWithTraits(
+ FROM_HERE, {},
+ base::Bind([](scoped_refptr<AbstractPromise> p2) { p2->OnRejected(); },
+ p2));
+
+ RunLoop run_loop;
+ scoped_refptr<AbstractPromise> p5 =
+ CatchPromise(FROM_HERE, all_promise)
+ .With(BindLambdaForTesting([&](AbstractPromise* p) {
+ p->emplace(Resolved<void>());
+ p->OnResolved();
+ run_loop.Quit();
+ }));
+
+ p3->OnRejected();
+ run_loop.Run();
+ EXPECT_TRUE(all_promise->IsRejected());
+ EXPECT_TRUE(p5->IsResolved());
+ scoped_task_environment_.RunUntilIdle();
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/all_container_executor.h b/chromium/base/task/promise/all_container_executor.h
index 4045fe65adf..e7e95d43aea 100644
--- a/chromium/base/task/promise/all_container_executor.h
+++ b/chromium/base/task/promise/all_container_executor.h
@@ -20,45 +20,37 @@ class AllContainerPromiseExecutor {
public:
bool IsCancelled() const { return false; }
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const {
- return AbstractPromise::Executor::PrerequisitePolicy::kAll;
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const {
+ return PromiseExecutor::PrerequisitePolicy::kAll;
}
+ struct VoidResolveType {};
+ struct NonVoidResolveType {};
+
+ using ResolveTypeTag = std::conditional_t<std::is_void<ResolveType>::value,
+ VoidResolveType,
+ NonVoidResolveType>;
+
void Execute(AbstractPromise* promise) {
// All is rejected if any prerequisites are rejected.
- if (AbstractPromise* rejected = promise->GetFirstRejectedPrerequisite()) {
- AllPromiseRejectHelper<Rejected<RejectType>>::Reject(promise, rejected);
+ AbstractPromise* first_settled = promise->GetFirstSettledPrerequisite();
+ if (first_settled && first_settled->IsRejected()) {
+ AllPromiseRejectHelper<Rejected<RejectType>>::Reject(promise,
+ first_settled);
promise->OnRejected();
return;
}
- const std::vector<AbstractPromise::AdjacencyListNode>* prerequisite_list =
- promise->prerequisite_list();
- DCHECK(prerequisite_list);
- using NonVoidResolveType = ToNonVoidT<ResolveType>;
- Resolved<std::vector<NonVoidResolveType>> result;
- result.value.reserve(prerequisite_list->size());
-
- for (const auto& node : *prerequisite_list) {
- DCHECK(node.prerequisite->IsResolved());
- result.value.push_back(
- ArgMoveSemanticsHelper<
- NonVoidResolveType,
- Resolved<NonVoidResolveType>>::Get(node.prerequisite.get()));
- }
-
- promise->emplace(std::move(result));
+ ResolveInternal(promise, ResolveTypeTag());
promise->OnResolved();
}
#if DCHECK_IS_ON()
- AbstractPromise::Executor::ArgumentPassingType ResolveArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const {
return UseMoveSemantics<ResolveType>::argument_passing_type;
}
- AbstractPromise::Executor::ArgumentPassingType RejectArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const {
return UseMoveSemantics<RejectType>::argument_passing_type;
}
@@ -68,6 +60,33 @@ class AllContainerPromiseExecutor {
bool CanReject() const { return !std::is_same<RejectType, NoReject>::value; }
#endif
+
+ private:
+ // For containers of Promise<void> there is no point resolving with
+ // std::vector<Void>.
+ void ResolveInternal(AbstractPromise* promise, VoidResolveType) {
+ promise->emplace(Resolved<void>());
+ }
+
+ void ResolveInternal(AbstractPromise* promise, NonVoidResolveType) {
+ using NonVoidResolveType = ToNonVoidT<ResolveType>;
+ Resolved<std::vector<NonVoidResolveType>> result;
+
+ const std::vector<DependentList::Node>* prerequisite_list =
+ promise->prerequisite_list();
+ DCHECK(prerequisite_list);
+ result.value.reserve(prerequisite_list->size());
+
+ for (const auto& node : *prerequisite_list) {
+ DCHECK(node.prerequisite()->IsResolved());
+ result.value.push_back(
+ ArgMoveSemanticsHelper<
+ NonVoidResolveType,
+ Resolved<NonVoidResolveType>>::Get(node.prerequisite()));
+ }
+
+ promise->emplace(std::move(result));
+ }
};
template <typename Container, typename ContainerT>
@@ -76,23 +95,32 @@ struct AllContainerHelper;
template <typename Container, typename ResolveType, typename RejectType>
struct AllContainerHelper<Container, Promise<ResolveType, RejectType>> {
using PromiseResolve = std::vector<ToNonVoidT<ResolveType>>;
- using PromiseType = Promise<PromiseResolve, RejectType>;
+
+ // As an optimization we don't return std::vector<ResolveType> for void
+ // ResolveType.
+ using PromiseType = std::conditional_t<std::is_void<ResolveType>::value,
+ Promise<void, RejectType>,
+ Promise<PromiseResolve, RejectType>>;
static PromiseType All(const Location& from_here, const Container& promises) {
size_t i = 0;
- std::vector<AbstractPromise::AdjacencyListNode> prerequisite_list(
- promises.size());
+ std::vector<DependentList::Node> prerequisite_list(promises.size());
+ // TODO(alexclarke): Move construction of this list and AbstractPromise out
+ // of line to reduce template bloat.
for (auto& promise : promises) {
- prerequisite_list[i++].prerequisite = promise.abstract_promise_;
+ prerequisite_list[i++].SetPrerequisite(promise.abstract_promise_.get());
}
+
+ internal::PromiseExecutor::Data executor_data(
+ (in_place_type_t<
+ AllContainerPromiseExecutor<ResolveType, RejectType>>()));
+
return PromiseType(AbstractPromise::Create(
nullptr, from_here,
std::make_unique<AbstractPromise::AdjacencyList>(
std::move(prerequisite_list)),
- RejectPolicy::kMustCatchRejection,
- AbstractPromise::ConstructWith<
- DependentList::ConstructUnresolved,
- AllContainerPromiseExecutor<ResolveType, RejectType>>()));
+ RejectPolicy::kMustCatchRejection, DependentList::ConstructUnresolved(),
+ std::move(executor_data)));
}
};
diff --git a/chromium/base/task/promise/all_tuple_executor.h b/chromium/base/task/promise/all_tuple_executor.h
index 545ef95fd28..455c2272caf 100644
--- a/chromium/base/task/promise/all_tuple_executor.h
+++ b/chromium/base/task/promise/all_tuple_executor.h
@@ -29,13 +29,13 @@ struct TupleConstructor<Tuple, std::index_sequence<Indices...>> {
// Resolves |result| with a std::tuple of the promise results of the dependent
// promises.
static void ConstructTuple(
- const std::vector<AbstractPromise::AdjacencyListNode>* prerequisite_list,
+ const std::vector<DependentList::Node>* prerequisite_list,
AbstractPromise* result) {
DCHECK_EQ(sizeof...(Indices), prerequisite_list->size());
result->emplace(
in_place_type_t<Resolved<Tuple>>(),
GetResolvedValueFromPromise<std::tuple_element_t<Indices, Tuple>>(
- (*prerequisite_list)[Indices].prerequisite.get())...);
+ (*prerequisite_list)[Indices].prerequisite())...);
}
};
@@ -56,19 +56,20 @@ class AllTuplePromiseExecutor {
bool IsCancelled() const { return false; }
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const {
- return AbstractPromise::Executor::PrerequisitePolicy::kAll;
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const {
+ return PromiseExecutor::PrerequisitePolicy::kAll;
}
void Execute(AbstractPromise* promise) {
// All is rejected if any prerequisites are rejected.
- if (AbstractPromise* rejected = promise->GetFirstRejectedPrerequisite()) {
- AllPromiseRejectHelper<RejectT>::Reject(promise, rejected);
+ AbstractPromise* first_settled = promise->GetFirstSettledPrerequisite();
+ if (first_settled && first_settled->IsRejected()) {
+ AllPromiseRejectHelper<RejectT>::Reject(promise, first_settled);
promise->OnRejected();
return;
}
- const std::vector<AbstractPromise::AdjacencyListNode>* prerequisite_list =
+ const std::vector<DependentList::Node>* prerequisite_list =
promise->prerequisite_list();
DCHECK(prerequisite_list);
TupleConstructor<ResolveTuple>::ConstructTuple(prerequisite_list, promise);
@@ -76,13 +77,11 @@ class AllTuplePromiseExecutor {
}
#if DCHECK_IS_ON()
- AbstractPromise::Executor::ArgumentPassingType ResolveArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const {
return UseMoveSemantics<ResolveTuple>::argument_passing_type;
}
- AbstractPromise::Executor::ArgumentPassingType RejectArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const {
return UseMoveSemantics<RejectType>::argument_passing_type;
}
diff --git a/chromium/base/task/promise/dependent_list.cc b/chromium/base/task/promise/dependent_list.cc
index e1c1a0a72fe..6cbbf345ef2 100644
--- a/chromium/base/task/promise/dependent_list.cc
+++ b/chromium/base/task/promise/dependent_list.cc
@@ -4,115 +4,279 @@
#include "base/task/promise/dependent_list.h"
+#include <algorithm>
+#include <atomic>
+#include <cstdint>
+
+#include "base/logging.h"
#include "base/task/promise/abstract_promise.h"
namespace base {
namespace internal {
-DependentList::DependentList(ConstructUnresolved) : head_(0) {}
+// static
+DependentList::Node* DependentList::ReverseList(DependentList::Node* list) {
+ DependentList::Node* prev = nullptr;
+ while (list) {
+ DependentList::Node* next = list->next_;
+ list->next_ = prev;
+ prev = list;
+ list = next;
+ }
+ return prev;
+}
+
+// static
+void DependentList::DispatchAll(DependentList::Node* head,
+ DependentList::Visitor* visitor,
+ bool retain_prerequsites) {
+ head = ReverseList(head);
+ DependentList::Node* next = nullptr;
+ while (head) {
+ next = head->next_;
+ if (retain_prerequsites)
+ head->RetainSettledPrerequisite();
+ // |visitor| might delete the node, so no access to node past this
+ // call!
+ visitor->Visit(std::move(head->dependent_));
+ head = next;
+ }
+}
+
+DependentList::Visitor::~Visitor() = default;
+
+DependentList::Node::Node() = default;
+
+DependentList::Node::Node(Node&& other) noexcept {
+ prerequisite_ = other.prerequisite_.load(std::memory_order_relaxed);
+ other.prerequisite_ = 0;
+ dependent_ = std::move(other.dependent_);
+ DCHECK_EQ(other.next_, nullptr);
+}
+
+DependentList::Node::Node(AbstractPromise* prerequisite,
+ scoped_refptr<AbstractPromise> dependent)
+ : prerequisite_(reinterpret_cast<intptr_t>(prerequisite)),
+ dependent_(std::move(dependent)) {}
+
+DependentList::Node::~Node() {
+ ClearPrerequisite();
+}
+
+DependentList::DependentList(State initial_state)
+ : data_(CreateData(nullptr,
+ initial_state,
+ initial_state == State::kUnresolved ? kAllowInserts
+ : kBlockInserts)) {}
+
+DependentList::DependentList(ConstructUnresolved)
+ : DependentList(State::kUnresolved) {}
-DependentList::DependentList(ConstructResolved) : head_(kResolvedSentinel) {}
+DependentList::DependentList(ConstructResolved)
+ : DependentList(State::kResolved) {}
-DependentList::DependentList(ConstructRejected) : head_(kRejectedSentinel) {}
+DependentList::DependentList(ConstructRejected)
+ : DependentList(State::kRejected) {}
DependentList::~DependentList() = default;
-DependentList::Node::Node() = default;
+void DependentList::Node::Reset(AbstractPromise* prerequisite,
+ scoped_refptr<AbstractPromise> dependent) {
+ SetPrerequisite(prerequisite);
+ dependent_ = std::move(dependent);
+ next_ = nullptr;
+}
+
+void DependentList::Node::SetPrerequisite(AbstractPromise* prerequisite) {
+ DCHECK(prerequisite);
+ intptr_t prev_value = prerequisite_.exchange(
+ reinterpret_cast<intptr_t>(prerequisite), std::memory_order_acq_rel);
-DependentList::Node::Node(Node&& other) {
- dependent = std::move(other.dependent);
- DCHECK_EQ(other.next, nullptr);
+ if (prev_value & kIsRetained)
+ reinterpret_cast<AbstractPromise*>(prev_value & ~kIsRetained)->Release();
}
-DependentList::Node::~Node() = default;
+AbstractPromise* DependentList::Node::prerequisite() const {
+ return reinterpret_cast<AbstractPromise*>(
+ prerequisite_.load(std::memory_order_acquire) & ~kIsRetained);
+}
+
+void DependentList::Node::RetainSettledPrerequisite() {
+ intptr_t prerequisite = prerequisite_.load(std::memory_order_acquire);
+ DCHECK((prerequisite & kIsRetained) == 0) << "May only be called once";
+ if (!prerequisite)
+ return;
+
+ // Mark as retained, note we could have another thread trying to call
+ // ClearPrerequisite.
+ if (prerequisite_.compare_exchange_strong(
+ prerequisite, prerequisite | kIsRetained, std::memory_order_release,
+ std::memory_order_acquire)) {
+ reinterpret_cast<AbstractPromise*>(prerequisite)->AddRef();
+ }
+}
+
+void DependentList::Node::ClearPrerequisite() {
+ intptr_t prerequisite = prerequisite_.exchange(0, std::memory_order_acq_rel);
+ if (prerequisite & kIsRetained)
+ reinterpret_cast<AbstractPromise*>(prerequisite & ~kIsRetained)->Release();
+}
DependentList::InsertResult DependentList::Insert(Node* node) {
- // This method uses std::memory_order_acquire semantics on read (the failure
- // case of compare_exchange_weak() below is a read) to ensure setting
- // |node->next| happens-after all memory modifications applied to |prev_head|
- // before it became |head_|. Conversely it uses std::memory_order_release
- // semantics on write to ensure that all memory modifications applied to
- // |node| happened-before it becomes |head_|.
- DCHECK(!node->next);
- uintptr_t prev_head = head_.load(std::memory_order_acquire);
- do {
- if (prev_head == kResolvedSentinel) {
- node->next = 0;
- return InsertResult::FAIL_PROMISE_RESOLVED;
+ DCHECK(!node->next_);
+
+ // std::memory_order_acquire for hapens-after relation with
+ // SettleAndDispatchAllDependents completing and thus this this call returning
+ // an error.
+ uintptr_t prev_data = data_.load(std::memory_order_acquire);
+ bool did_insert = false;
+ while (IsAllowingInserts(prev_data) && !did_insert) {
+ node->next_ = ExtractHead(prev_data);
+
+ // On success std::memory_order_release so that all memory operations become
+ // visible in SettleAndDispatchAllDependents when iterating the list.
+ // On failure std::memory_order_acquire for happens-after relation with
+ // SettleAndDispatchAllDependents completing and thus this this call
+ // returning an error.
+ //
+ // Note: ABA is not an issue here as we do not care that head_ might now be
+ // pointing to a different node (but with same address) we only need to
+ // guarantee that node->next points to the current head (which is now the
+ // new node but with the same address so node->next is still valid).
+ if (data_.compare_exchange_weak(
+ prev_data, CreateData(node, ExtractState(prev_data), kAllowInserts),
+ std::memory_order_seq_cst, std::memory_order_seq_cst)) {
+ did_insert = true;
+ } else {
+ // Cleanup in case the loop terminates
+ node->next_ = nullptr;
}
+ }
+
+ if (did_insert) {
+ return InsertResult::SUCCESS;
+ }
- if (prev_head == kRejectedSentinel) {
- node->next = 0;
+ switch (ExtractState(prev_data)) {
+ case State::kResolved:
+ return InsertResult::FAIL_PROMISE_RESOLVED;
+
+ case State::kRejected:
return InsertResult::FAIL_PROMISE_REJECTED;
- }
- if (prev_head == kCanceledSentinel) {
- node->next = 0;
+ case State::kCanceled:
return InsertResult::FAIL_PROMISE_CANCELED;
+
+ case State::kUnresolved:
+ // We must have inserted, as inserts must be allowed if in state
+ // kUnresolved
+ NOTREACHED();
+ return InsertResult::SUCCESS;
+ }
+}
+
+bool DependentList::SettleAndDispatchAllDependents(const State settled_state,
+ Visitor* visitor) {
+ DCHECK_NE(settled_state, State::kUnresolved);
+
+ // Whether this invocation won the settlement race. If so, it will now keep
+ // dispatching all nodes in as many attempts as it takes to win the race
+ // against Insert()'s.
+ bool did_set_state = false;
+
+ // This load, and the ones in for compare_exchange_weak failures can be
+ // std::memory_order_relaxed as we do not make any ordering guarantee when
+ // this method returns false.
+ uintptr_t prev_data = data_.load(std::memory_order_seq_cst);
+ while (true) {
+ if (!did_set_state && ExtractState(prev_data) != State::kUnresolved) {
+ // Somebody else set the state.
+ return false;
+ }
+
+ if (!IsListEmpty(prev_data)) {
+ // List is not empty and we might have set the state previously
+ // We need to:
+ // * Settle the state (or leave as is if we already did).
+ // * allow_inserts. As we are going to dispatch the nodes we need to
+ // make sure that other threads can still insert to the list
+ // (instead of just resolving themselves) to preserve dispatch order.
+ // * Take ownership of all nodes currently in the queue, i.e. set the
+ // head to nullptr, marking it empty
+ DCHECK_EQ(ExtractState(prev_data),
+ did_set_state ? settled_state : State::kUnresolved);
+ DCHECK(IsAllowingInserts(prev_data));
+ uintptr_t new_data = CreateData(nullptr, settled_state, kAllowInserts);
+
+ // On success std::memory_order_acquire for happens-after relation with
+ // with the last successful Insert().
+ if (!data_.compare_exchange_weak(prev_data, new_data,
+ std::memory_order_seq_cst,
+ std::memory_order_relaxed)) {
+ continue;
+ }
+ did_set_state = true;
+ // We don't want to retain prerequisites when cancelling.
+ DispatchAll(ExtractHead(prev_data), visitor,
+ settled_state != State::kCanceled);
+ prev_data = new_data;
}
- node->next = reinterpret_cast<Node*>(prev_head);
- } while (!head_.compare_exchange_weak(
- prev_head, reinterpret_cast<uintptr_t>(node), std::memory_order_release,
- std::memory_order_acquire));
- return InsertResult::SUCCESS;
-}
-
-DependentList::Node* DependentList::ConsumeOnceForResolve() {
- // The Consume*() methods require std::memory_order_acq_rel semantics because:
- // * Need release semantics to ensure that future calls to Insert() (which
- // will fail) happen-after memory modifications performed prior to this
- // Consume*().
- // * Need acquire semantics to synchronize with the last Insert() and ensure
- // all memory modifications applied to |head_| before the last Insert()
- // happen-before this Consume*().
- uintptr_t prev_head = std::atomic_exchange_explicit(
- &head_, kResolvedSentinel, std::memory_order_acq_rel);
- DCHECK_NE(prev_head, kResolvedSentinel);
- DCHECK_NE(prev_head, kRejectedSentinel);
- DCHECK_NE(prev_head, kCanceledSentinel);
- return reinterpret_cast<Node*>(prev_head);
-}
-
-DependentList::Node* DependentList::ConsumeOnceForReject() {
- uintptr_t prev_head = std::atomic_exchange_explicit(
- &head_, kRejectedSentinel, std::memory_order_acq_rel);
- DCHECK_NE(prev_head, kResolvedSentinel);
- DCHECK_NE(prev_head, kRejectedSentinel);
- DCHECK_NE(prev_head, kCanceledSentinel);
- return reinterpret_cast<Node*>(prev_head);
-}
-
-DependentList::Node* DependentList::ConsumeOnceForCancel() {
- uintptr_t prev_head = std::atomic_exchange_explicit(
- &head_, kCanceledSentinel, std::memory_order_acq_rel);
- DCHECK_NE(prev_head, kResolvedSentinel);
- DCHECK_NE(prev_head, kRejectedSentinel);
- DCHECK_NE(prev_head, kCanceledSentinel);
- return reinterpret_cast<Node*>(prev_head);
+ // List is empty and we might have set the state previously, so we
+ // can settle the state (or leave as is if we already did) and freeze
+ // the list.
+ DCHECK(IsListEmpty(prev_data));
+ DCHECK_EQ(ExtractState(prev_data),
+ did_set_state ? settled_state : State::kUnresolved);
+ // On success std::memory_order_release for happens-before relation with
+ // Insert returning an error.
+ if (data_.compare_exchange_weak(
+ prev_data, CreateData(nullptr, settled_state, kBlockInserts),
+ std::memory_order_seq_cst, std::memory_order_relaxed)) {
+ // Inserts no longer allowed, state settled and list is empty. We are
+ // done!
+ return true;
+ }
+ }
}
+// The following methods do not make any ordering guarantees, false return
+// values are always stale. A true return value just means that
+// SettleAndDispatchAllDependents was called but it gives no guarantees as
+// whether any of the nodes was dispatched or the call finished. Thus
+// std::memory_order_relaxed.
+
bool DependentList::IsSettled() const {
- uintptr_t value = head_.load(std::memory_order_acquire);
- return value == kResolvedSentinel || value == kRejectedSentinel ||
- value == kCanceledSentinel;
+ return ExtractState(data_.load(std::memory_order_seq_cst)) !=
+ State::kUnresolved;
}
bool DependentList::IsResolved() const {
- return head_.load(std::memory_order_acquire) == kResolvedSentinel;
+ DCHECK(IsSettled()) << "This check is racy";
+ return ExtractState(data_.load(std::memory_order_seq_cst)) ==
+ State::kResolved;
}
bool DependentList::IsRejected() const {
- return head_.load(std::memory_order_acquire) == kRejectedSentinel;
+ DCHECK(IsSettled()) << "This check is racy";
+ return ExtractState(data_.load(std::memory_order_seq_cst)) ==
+ State::kRejected;
}
bool DependentList::IsCanceled() const {
- return head_.load(std::memory_order_acquire) == kCanceledSentinel;
+ return ExtractState(data_.load(std::memory_order_seq_cst)) ==
+ State::kCanceled;
}
-constexpr uintptr_t DependentList::kResolvedSentinel;
-constexpr uintptr_t DependentList::kRejectedSentinel;
-constexpr uintptr_t DependentList::kCanceledSentinel;
+bool DependentList::IsResolvedForTesting() const {
+ return ExtractState(data_.load(std::memory_order_seq_cst)) ==
+ State::kResolved;
+}
+
+bool DependentList::IsRejectedForTesting() const {
+ return ExtractState(data_.load(std::memory_order_seq_cst)) ==
+ State::kRejected;
+}
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/dependent_list.h b/chromium/base/task/promise/dependent_list.h
index 02e80e41ec9..020bdbfc77f 100644
--- a/chromium/base/task/promise/dependent_list.h
+++ b/chromium/base/task/promise/dependent_list.h
@@ -6,6 +6,8 @@
#define BASE_TASK_PROMISE_DEPENDENT_LIST_H_
#include <atomic>
+#include <cstdint>
+#include <type_traits>
#include "base/base_export.h"
#include "base/logging.h"
@@ -15,13 +17,24 @@
namespace base {
namespace internal {
+// Returns 2^N where N is the smallest N possible so that 2^N > value.
+constexpr uintptr_t NextPowerOfTwo(uintptr_t value) {
+ // Keep setting 1's to the right of the first one until there are only 1's. In
+ // each iteration we double the number of 1's that we set. At last add 1 and
+ // we have the next power of 2.
+ for (size_t i = 1; i < sizeof(uintptr_t) * 8; i <<= 1) {
+ value |= value >> i;
+ }
+ return value + 1;
+}
+
class AbstractPromise;
// AbstractPromise needs to know which promises depend upon it. This lock free
// class stores the list of dependents. This is not a general purpose list
-// because the data can only be consumed once. This class' methods have implicit
-// acquire/release semantics (i.e., callers can assume the result they get
-// happens-after memory changes which lead to it).
+// because the data can only be consumed once.
+//
+// This class is thread safe.
class BASE_EXPORT DependentList {
public:
struct ConstructUnresolved {};
@@ -31,8 +44,12 @@ class BASE_EXPORT DependentList {
explicit DependentList(ConstructUnresolved);
explicit DependentList(ConstructResolved);
explicit DependentList(ConstructRejected);
+
~DependentList();
+ DependentList(const DependentList&) = delete;
+ DependentList& operator=(const DependentList&) = delete;
+
enum class InsertResult {
SUCCESS,
FAIL_PROMISE_RESOLVED,
@@ -40,44 +57,244 @@ class BASE_EXPORT DependentList {
FAIL_PROMISE_CANCELED,
};
- struct BASE_EXPORT Node {
+ // Align Node on an 8-byte boundary to ensure the first 3 bits are 0 and can
+ // be used to store additional state (see static_asserts below).
+ class BASE_EXPORT alignas(8) Node {
+ public:
Node();
explicit Node(Node&& other) noexcept;
+
+ // Constructs a Node, |prerequisite| will not be retained unless
+ // RetainSettledPrerequisite is called.
+ Node(AbstractPromise* prerequisite,
+ scoped_refptr<AbstractPromise> dependent);
~Node();
- scoped_refptr<AbstractPromise> dependent;
- std::atomic<Node*> next{nullptr};
+ // Caution this is not thread safe.
+ void Reset(AbstractPromise* prerequisite,
+ scoped_refptr<AbstractPromise> dependent);
+
+ // Expected prerequisite usage:
+ // 1. prerequisite = null on creation (or is constructed with a value)
+ // 2. (optional, once only) SetPrerequisite(value)
+ // 3. (maybe, once only) RetainSettledPrerequisite();
+ // 4. (maybe) ClearPrerequisite()
+ // 5. Destructor called
+
+ // Can be called on any thread.
+ void SetPrerequisite(AbstractPromise* prerequisite);
+
+ // Can be called on any thread.
+ AbstractPromise* prerequisite() const;
+
+ scoped_refptr<AbstractPromise>& dependent() { return dependent_; }
+
+ const scoped_refptr<AbstractPromise>& dependent() const {
+ return dependent_;
+ }
+
+ Node* next() const { return next_; }
+
+ // Calls AddRef on |prerequisite()| and marks the prerequisite as being
+ // retained. The |prerequisite()| will be released by Node's destructor or
+ // a call to ClearPrerequisite. Does nothing if called more than once.
+ // Can be called on any thread at any time. Can be called once only.
+ void RetainSettledPrerequisite();
+
+ // Calls Release() if the rerequsite was retained and then sets
+ // |prerequisite_| to zero. Can be called on any thread at any time. Can be
+ // called more than once.
+ void ClearPrerequisite();
+
+ private:
+ friend class DependentList;
+
+ void MarkAsRetained() { prerequisite_ |= kIsRetained; }
+
+ // An AbstractPromise* where the LSB is a flag which specified if it's
+ // retained or not.
+ // A reference for |prerequisite_| is acquired with an explicit call to
+ // AddRef() if it's resolved or rejected.
+ std::atomic<intptr_t> prerequisite_{0};
+
+ scoped_refptr<AbstractPromise> dependent_;
+ Node* next_ = nullptr;
+
+ static constexpr intptr_t kIsRetained = 1;
};
- // Insert will only succeed if one of the Consume operations hasn't been
- // called yet. |node| must outlive DependentList, and it can't be altered
- // after Insert or the release barrier will be ineffective.
+ // Insert will only succeed if neither ResolveAndConsumeAllDependents nor
+ // RejectAndConsumeAllDependents nor CancelAndConsumeAllDependents have been
+ // called yet. If the call succeeds, |node| must remain valid pointer until it
+ // is consumed by one of the *AndConsumeAllDependents methods. If none of
+ // those methods is called |node| must only be valid for the duration of this
+ // call. Nodes will be consumed in the same order as they are inserted.
InsertResult Insert(Node* node);
- // A ConsumeXXX function may only be called once.
- Node* ConsumeOnceForResolve();
+ // Callback for *AndConsumeAllDependents methods.
+ // TODO(carlscab): Consider using a callable object instead.
+ class BASE_EXPORT Visitor {
+ public:
+ virtual ~Visitor();
+ // Called from the *AndConsumeAllDependents methods for each node.
+ // |dependent| is the consumed (i.e. moved) from the one associated with the
+ // node. It is fine if the pointer to the node becomes invalid inside this
+ // call (i.e it is fine to delete the node).
+ virtual void Visit(scoped_refptr<AbstractPromise> dependent) = 0;
+ };
+
+ // The following *AndConsumeAllDependents methods will settle the list and
+ // consume all previously inserted nodes. It is guaranteed that Insert()
+ // failures will happen-after all nodes have been consumed. In particular that
+ // means that if an Insert happens while we are still consuming nodes the
+ // Insert will succeed and the node will be appended to the list of nodes to
+ // consume and eventually be consumed.
+ //
+ // ATTENTION: Calls to any of this methods will fail if itself or a different
+ // consume method has been previously called. ResolveAndConsumeAllDependents
+ // and RejectAndConsumeAllDependents will DCHECK on failures and
+ // CancelAndConsumeAllDependents will return false if it fails.
+
+ void ResolveAndConsumeAllDependents(Visitor* visitor) {
+ const bool success =
+ SettleAndDispatchAllDependents(State::kResolved, visitor);
+ DCHECK(success) << "Was already settled";
+ }
- // A ConsumeXXX function may only be called once.
- Node* ConsumeOnceForReject();
+ void RejectAndConsumeAllDependents(Visitor* visitor) {
+ const bool success =
+ SettleAndDispatchAllDependents(State::kRejected, visitor);
+ DCHECK(success) << "Was already settled";
+ }
- // A ConsumeXXX function may only be called once.
- Node* ConsumeOnceForCancel();
+ // TODO(alexclarke): Consider DCHECK for failures which would also allow us to
+ // greatly simplify SettleAndDispatchAllDependents
+ bool CancelAndConsumeAllDependents(Visitor* visitor) {
+ return SettleAndDispatchAllDependents(State::kCanceled, visitor);
+ }
+ // Returns true if any of IsResolved, IsRejected, or IsCanceled would return
+ // true
bool IsSettled() const;
+
+ // Returns true if (Resolve/Reject/Cancel)AndConsumeAllDependents
+ // has resolved/rejected/canceled the promise, respectively.
+ //
+ // ATTENTION: No guarantees are made as of whether the
+ // (Resolve/Reject/Cancel)AndConsumeAllDependents method is still executing.
+ bool IsCanceled() const;
+
+ // DCHECKs if not settled.
bool IsResolved() const;
+
+ // DCHECKs if not settled.
bool IsRejected() const;
- bool IsCanceled() const;
+
+ // Like the above but doesn't DCHECK if unsettled.
+ bool IsResolvedForTesting() const;
+ bool IsRejectedForTesting() const;
private:
- std::atomic<uintptr_t> head_;
+ // The data for this class is:
+ // * head: Pointer to the head of the list of Node instances
+ // * allow_inserts: flag indicating whether further inserts are allowed
+ // * state: State value
+ //
+ // We store all this information in a uintptr_t to support atomic operations
+ // as follows:
+ // PP...PPPFSS
+ // * P: Pointer to the head of the list of Node instances (head)
+ // * F: Flag inidicating whether further inserts are allowed (allow_inserts)
+ // * S: State value (state)
+ //
+ // The various *Mask constants contain the bit masks for the various fields.
+ //
+ // Inserts can be allowed in any of the states, but they MUST be allowed in
+ // State::kUnresolved. Inserts are allowed while in one of the settled states
+ // while the SettleAndDispatchAllDependents is dispatching nodes. This is done
+ // so to preserve dispatch order. Once all nodes have been dispatched (i.e.
+ // the list is empty), the allow_inserts is atomically (making sure list is
+ // still empty) set to false. From that point on Inserts will fail.
+ //
+ // All valid state transitions start from State::kUnresolved i.e. only the
+ // first call to SettleAndDispatchAllDependents will be able to settle the
+ // state and succeed, all others will fail.
+ //
+ // The Is(Resolved|Rejected|Canceled) methods must return true while we are
+ // dispatching nodes. That is we need to access the settled state while we are
+ // still dispatching nodes. Thus we need and extra bit (allow_inserts) so that
+ // Insert can determine whether to insert or fail when there is a settled
+ // state.
+
+ enum class InsertPolicy {
+ kAllow,
+ kBlock,
+ };
+ static constexpr auto kAllowInserts = InsertPolicy::kAllow;
+ static constexpr auto kBlockInserts = InsertPolicy::kBlock;
+
+ enum class State {
+ kUnresolved = 0,
+ kResolved,
+ kRejected,
+ kCanceled,
+ kLastValue = kCanceled
+ };
+
+ static constexpr uintptr_t kStateMask =
+ NextPowerOfTwo(static_cast<uintptr_t>(State::kLastValue)) - 1;
+ static constexpr uintptr_t kAllowInsertsBitMask = kStateMask + 1;
+ static constexpr uintptr_t kHeadMask = ~(kAllowInsertsBitMask | kStateMask);
+
+ static_assert(
+ std::alignment_of<Node>() > kAllowInsertsBitMask,
+ "Will not be able to hold the Node* and all the state in a uintptr_t");
+
+ static State ExtractState(uintptr_t data) {
+ return static_cast<State>(data & kStateMask);
+ }
+
+ static DependentList::Node* ExtractHead(uintptr_t data) {
+ return reinterpret_cast<DependentList::Node*>(data & kHeadMask);
+ }
+
+ static bool IsListEmpty(uintptr_t data) {
+ return ExtractHead(data) == nullptr;
+ }
+
+ static bool IsAllowingInserts(uintptr_t data) {
+ return data & kAllowInsertsBitMask;
+ }
+
+ static uintptr_t CreateData(Node* head,
+ State state,
+ InsertPolicy insert_policy) {
+ DCHECK_EQ(uintptr_t(head), uintptr_t(head) & kHeadMask)
+ << "Node doesn't have enough alignment";
+ DCHECK(insert_policy == kAllowInserts || head == nullptr)
+ << "List must be empty if no more inserts are allowed";
+ DCHECK(insert_policy == kAllowInserts || state != State::kUnresolved)
+ << "Can not block inserts and remain in kUnresolved state";
+ return reinterpret_cast<uintptr_t>(head) |
+ (insert_policy == kAllowInserts ? kAllowInsertsBitMask : 0) |
+ (static_cast<uintptr_t>(state) & kStateMask);
+ }
+
+ explicit DependentList(State initial_state);
+
+ // Settles the list and consumes all previously inserted nodes. If the list is
+ // already settled it does nothing and returns false, true otherwise.
+ bool SettleAndDispatchAllDependents(State settled_state, Visitor* visitor);
+
+ static DependentList::Node* ReverseList(DependentList::Node* list);
- // Special values for |head_| which correspond to various states. If |head_|
- // contains one of these then Insert() will fail.
- static constexpr uintptr_t kResolvedSentinel = 1;
- static constexpr uintptr_t kRejectedSentinel = 2;
- static constexpr uintptr_t kCanceledSentinel = 3;
+ // Goes through the list starting at |head| consuming node->dependent and
+ // passing it to the provided |visitor|.
+ static void DispatchAll(DependentList::Node* head,
+ DependentList::Visitor* visitor,
+ bool retain_prerequsites);
- DISALLOW_COPY_AND_ASSIGN(DependentList);
+ std::atomic<uintptr_t> data_;
};
} // namespace internal
diff --git a/chromium/base/task/promise/dependent_list_unittest.cc b/chromium/base/task/promise/dependent_list_unittest.cc
index 7c3c1e3fc15..9373aff595a 100644
--- a/chromium/base/task/promise/dependent_list_unittest.cc
+++ b/chromium/base/task/promise/dependent_list_unittest.cc
@@ -3,18 +3,52 @@
// found in the LICENSE file.
#include "base/task/promise/dependent_list.h"
+
+#include <cstdint>
+#include <limits>
+
+#include "base/memory/scoped_refptr.h"
+#include "base/task/promise/abstract_promise.h"
+#include "base/test/do_nothing_promise.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace internal {
+namespace {
+
+class PushBackVisitor : public DependentList::Visitor {
+ public:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ dependents_.push_back(dependent.get());
+ }
+
+ const std::vector<AbstractPromise*> visited_dependents() const {
+ return dependents_;
+ }
+
+ private:
+ std::vector<AbstractPromise*> dependents_;
+};
+
+class FailTestVisitor : public DependentList::Visitor {
+ public:
+ void Visit(scoped_refptr<AbstractPromise> dependent) override {
+ ADD_FAILURE();
+ }
+};
+
+} // namespace
+
+using ::testing::ElementsAre;
TEST(DependentList, ConstructUnresolved) {
DependentList list(DependentList::ConstructUnresolved{});
DependentList::Node node;
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node));
- EXPECT_FALSE(list.IsRejected());
+ EXPECT_FALSE(list.IsRejectedForTesting());
EXPECT_FALSE(list.IsCanceled());
- EXPECT_FALSE(list.IsResolved());
+ EXPECT_FALSE(list.IsResolvedForTesting());
EXPECT_FALSE(list.IsSettled());
}
@@ -40,7 +74,7 @@ TEST(DependentList, ConstructRejected) {
EXPECT_TRUE(list.IsSettled());
}
-TEST(DependentList, ConsumeOnceForResolve) {
+TEST(DependentList, ResolveAndConsumeAllDependents) {
DependentList list(DependentList::ConstructUnresolved{});
DependentList::Node node1;
DependentList::Node node2;
@@ -49,18 +83,19 @@ TEST(DependentList, ConsumeOnceForResolve) {
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node2));
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node3));
- EXPECT_FALSE(list.IsResolved());
+ EXPECT_FALSE(list.IsResolvedForTesting());
EXPECT_FALSE(list.IsSettled());
- DependentList::Node* result = list.ConsumeOnceForResolve();
+
+ std::vector<AbstractPromise*> expected_dependants = {node1.dependent().get(),
+ node2.dependent().get(),
+ node3.dependent().get()};
+
+ PushBackVisitor visitor;
+ list.ResolveAndConsumeAllDependents(&visitor);
EXPECT_TRUE(list.IsResolved());
- EXPECT_FALSE(list.IsRejected());
- EXPECT_FALSE(list.IsCanceled());
EXPECT_TRUE(list.IsSettled());
- EXPECT_EQ(&node3, result);
- EXPECT_EQ(&node2, result->next.load());
- EXPECT_EQ(&node1, result->next.load()->next.load());
- EXPECT_EQ(nullptr, result->next.load()->next.load()->next.load());
+ EXPECT_EQ(expected_dependants, visitor.visited_dependents());
// Can't insert any more nodes.
DependentList::Node node4;
@@ -68,7 +103,7 @@ TEST(DependentList, ConsumeOnceForResolve) {
list.Insert(&node4));
}
-TEST(DependentList, ConsumeOnceForReject) {
+TEST(DependentList, RejectAndConsumeAllDependents) {
DependentList list(DependentList::ConstructUnresolved{});
DependentList::Node node1;
DependentList::Node node2;
@@ -77,18 +112,18 @@ TEST(DependentList, ConsumeOnceForReject) {
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node2));
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node3));
- EXPECT_FALSE(list.IsRejected());
+ EXPECT_FALSE(list.IsResolvedForTesting());
EXPECT_FALSE(list.IsSettled());
- DependentList::Node* result = list.ConsumeOnceForReject();
+ std::vector<AbstractPromise*> expected_dependants = {node1.dependent().get(),
+ node2.dependent().get(),
+ node3.dependent().get()};
+
+ PushBackVisitor visitor;
+ list.RejectAndConsumeAllDependents(&visitor);
EXPECT_TRUE(list.IsRejected());
- EXPECT_FALSE(list.IsResolved());
- EXPECT_FALSE(list.IsCanceled());
EXPECT_TRUE(list.IsSettled());
- EXPECT_EQ(&node3, result);
- EXPECT_EQ(&node2, result->next.load());
- EXPECT_EQ(&node1, result->next.load()->next.load());
- EXPECT_EQ(nullptr, result->next.load()->next.load()->next.load());
+ EXPECT_EQ(expected_dependants, visitor.visited_dependents());
// Can't insert any more nodes.
DependentList::Node node4;
@@ -96,7 +131,7 @@ TEST(DependentList, ConsumeOnceForReject) {
list.Insert(&node4));
}
-TEST(DependentList, ConsumeOnceForCancel) {
+TEST(DependentList, CancelAndConsumeAllDependents) {
DependentList list(DependentList::ConstructUnresolved{});
DependentList::Node node1;
DependentList::Node node2;
@@ -105,18 +140,18 @@ TEST(DependentList, ConsumeOnceForCancel) {
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node2));
EXPECT_EQ(DependentList::InsertResult::SUCCESS, list.Insert(&node3));
- EXPECT_FALSE(list.IsCanceled());
+ EXPECT_FALSE(list.IsResolvedForTesting());
EXPECT_FALSE(list.IsSettled());
- DependentList::Node* result = list.ConsumeOnceForCancel();
+ std::vector<AbstractPromise*> expected_dependants = {node1.dependent().get(),
+ node2.dependent().get(),
+ node3.dependent().get()};
+
+ PushBackVisitor visitor;
+ EXPECT_TRUE(list.CancelAndConsumeAllDependents(&visitor));
EXPECT_TRUE(list.IsCanceled());
- EXPECT_FALSE(list.IsResolved());
- EXPECT_FALSE(list.IsRejected());
EXPECT_TRUE(list.IsSettled());
- EXPECT_EQ(&node3, result);
- EXPECT_EQ(&node2, result->next.load());
- EXPECT_EQ(&node1, result->next.load()->next.load());
- EXPECT_EQ(nullptr, result->next.load()->next.load()->next.load());
+ EXPECT_EQ(expected_dependants, visitor.visited_dependents());
// Can't insert any more nodes.
DependentList::Node node4;
@@ -124,5 +159,51 @@ TEST(DependentList, ConsumeOnceForCancel) {
list.Insert(&node4));
}
+TEST(DependentList, CancelAndConsumeAllDependentsFailsIfAlreadySettled) {
+ DependentList list(DependentList::ConstructUnresolved{});
+
+ FailTestVisitor visitor;
+ list.ResolveAndConsumeAllDependents(&visitor);
+
+ EXPECT_FALSE(list.CancelAndConsumeAllDependents(&visitor));
+
+ EXPECT_FALSE(list.IsCanceled());
+ EXPECT_TRUE(list.IsResolved());
+}
+
+TEST(DependentList, NextPowerOfTwo) {
+ static_assert(NextPowerOfTwo(0) == 1u, "");
+ static_assert(NextPowerOfTwo(1) == 2u, "");
+ static_assert(NextPowerOfTwo(2) == 4u, "");
+ static_assert(NextPowerOfTwo(3) == 4u, "");
+ static_assert(NextPowerOfTwo(4) == 8u, "");
+ static_assert(NextPowerOfTwo((1ull << 21) + (1ull << 19)) == 1ull << 22, "");
+ static_assert(NextPowerOfTwo(std::numeric_limits<uintptr_t>::max() >> 1) ==
+ 1ull << (sizeof(uintptr_t) * 8 - 1),
+ "");
+ static_assert(NextPowerOfTwo(std::numeric_limits<uintptr_t>::max()) == 0u,
+ "");
+}
+
+TEST(DependentListNode, Simple) {
+ DependentList::Node node;
+ EXPECT_EQ(nullptr, node.prerequisite());
+
+ scoped_refptr<AbstractPromise> p = DoNothingPromiseBuilder(FROM_HERE);
+ EXPECT_TRUE(p->HasOneRef());
+ node.SetPrerequisite(p.get());
+ EXPECT_EQ(p.get(), node.prerequisite());
+ EXPECT_TRUE(p->HasOneRef());
+
+ EXPECT_TRUE(p->HasOneRef());
+ node.RetainSettledPrerequisite();
+ EXPECT_EQ(p.get(), node.prerequisite());
+ EXPECT_FALSE(p->HasOneRef());
+
+ node.ClearPrerequisite();
+ EXPECT_EQ(nullptr, node.prerequisite());
+ EXPECT_TRUE(p->HasOneRef());
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/finally_executor.cc b/chromium/base/task/promise/finally_executor.cc
index 27cc023bfb0..52da56785be 100644
--- a/chromium/base/task/promise/finally_executor.cc
+++ b/chromium/base/task/promise/finally_executor.cc
@@ -7,7 +7,8 @@
namespace base {
namespace internal {
-FinallyExecutorCommon::FinallyExecutorCommon(internal::CallbackBase&& callback)
+FinallyExecutorCommon::FinallyExecutorCommon(
+ internal::CallbackBase&& callback) noexcept
: callback_(std::move(callback)) {}
FinallyExecutorCommon::~FinallyExecutorCommon() = default;
diff --git a/chromium/base/task/promise/finally_executor.h b/chromium/base/task/promise/finally_executor.h
index ec47b7433b7..6dcffd8a267 100644
--- a/chromium/base/task/promise/finally_executor.h
+++ b/chromium/base/task/promise/finally_executor.h
@@ -14,10 +14,10 @@ namespace internal {
// Exists to reduce template bloat.
class BASE_EXPORT FinallyExecutorCommon {
public:
- explicit FinallyExecutorCommon(CallbackBase&& callback);
+ explicit FinallyExecutorCommon(CallbackBase&& callback) noexcept;
~FinallyExecutorCommon();
- // AbstractPromise::Executor:
+ // PromiseExecutor:
bool IsCancelled() const;
CallbackBase callback_;
@@ -31,19 +31,15 @@ class FinallyExecutor {
public:
using CallbackReturnT = typename CallbackTraits<CallbackT>::ReturnType;
- explicit FinallyExecutor(CallbackT&& callback)
- : common_(std::move(callback)) {
- static_assert(sizeof(CallbackBase) == sizeof(CallbackT),
- "We assume it's possible to cast from CallbackBase to "
- "CallbackT");
- }
+ explicit FinallyExecutor(CallbackBase&& callback) noexcept
+ : common_(std::move(callback)) {}
~FinallyExecutor() = default;
bool IsCancelled() const { return common_.IsCancelled(); }
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const {
- return AbstractPromise::Executor::PrerequisitePolicy::kAll;
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const {
+ return PromiseExecutor::PrerequisitePolicy::kAll;
}
void Execute(AbstractPromise* promise) {
@@ -62,14 +58,12 @@ class FinallyExecutor {
}
#if DCHECK_IS_ON()
- AbstractPromise::Executor::ArgumentPassingType ResolveArgumentPassingType()
- const {
- return AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const {
+ return PromiseExecutor::ArgumentPassingType::kNormal;
}
- AbstractPromise::Executor::ArgumentPassingType RejectArgumentPassingType()
- const {
- return AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const {
+ return PromiseExecutor::ArgumentPassingType::kNormal;
}
bool CanResolve() const {
diff --git a/chromium/base/task/promise/helpers.cc b/chromium/base/task/promise/helpers.cc
new file mode 100644
index 00000000000..35dc37d69fb
--- /dev/null
+++ b/chromium/base/task/promise/helpers.cc
@@ -0,0 +1,64 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/promise/helpers.h"
+
+#include "base/bind_helpers.h"
+#include "base/task/promise/no_op_promise_executor.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+
+namespace base {
+namespace internal {
+
+PromiseHolder::PromiseHolder(scoped_refptr<internal::AbstractPromise> promise)
+ : promise_(std::move(promise)) {}
+
+PromiseHolder::~PromiseHolder() {
+ // Detect if the promise was not executed and if so cancel to ensure memory
+ // is released.
+ if (promise_)
+ promise_->OnCanceled();
+}
+
+PromiseHolder::PromiseHolder(PromiseHolder&& other)
+ : promise_(std::move(other.promise_)) {}
+
+scoped_refptr<internal::AbstractPromise> PromiseHolder::Unwrap() const {
+ return std::move(promise_);
+}
+
+scoped_refptr<TaskRunner> GetCurrentSequence() {
+ return SequencedTaskRunnerHandle::Get();
+}
+
+DoNothing ToCallbackBase(DoNothing task) {
+ return task;
+}
+
+scoped_refptr<AbstractPromise> ConstructAbstractPromiseWithSinglePrerequisite(
+ const scoped_refptr<TaskRunner>& task_runner,
+ const Location& from_here,
+ AbstractPromise* prerequsite,
+ internal::PromiseExecutor::Data&& executor_data) noexcept {
+ return internal::AbstractPromise::Create(
+ task_runner, from_here,
+ std::make_unique<AbstractPromise::AdjacencyList>(prerequsite),
+ RejectPolicy::kMustCatchRejection,
+ internal::DependentList::ConstructUnresolved(), std::move(executor_data));
+}
+
+scoped_refptr<AbstractPromise> ConstructManualPromiseResolverPromise(
+ const Location& from_here,
+ RejectPolicy reject_policy,
+ bool can_resolve,
+ bool can_reject) {
+ return internal::AbstractPromise::CreateNoPrerequisitePromise(
+ from_here, reject_policy, internal::DependentList::ConstructUnresolved(),
+ internal::PromiseExecutor::Data(
+ in_place_type_t<internal::NoOpPromiseExecutor>(), can_resolve,
+ can_reject));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/promise/helpers.h b/chromium/base/task/promise/helpers.h
index a428992a5ec..9a14be91114 100644
--- a/chromium/base/task/promise/helpers.h
+++ b/chromium/base/task/promise/helpers.h
@@ -15,11 +15,23 @@
#include "base/task/promise/promise_result.h"
namespace base {
+class DoNothing;
+
namespace internal {
+// A wrapper around SequencedTaskRunnerHandle::Get(). This file is included by
+// base/task_runner.h which means we can't include anything that depends on
+// that!
+scoped_refptr<TaskRunner> BASE_EXPORT GetCurrentSequence();
+
template <typename T>
using ToNonVoidT = std::conditional_t<std::is_void<T>::value, Void, T>;
+// Tag dispatch helper for PostTaskExecutor and ThenAndCatchExecutor.
+struct CouldResolveOrReject {};
+struct CanOnlyResolve {};
+struct CanOnlyReject {};
+
// PromiseCallbackTraits computes the resolve and reject types of a Promise
// from the return type of a resolve or reject callback.
//
@@ -35,6 +47,7 @@ template <typename T>
struct PromiseCallbackTraits {
using ResolveType = T;
using RejectType = NoReject;
+ using TagType = CanOnlyResolve;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = false;
};
@@ -43,6 +56,7 @@ template <typename T>
struct PromiseCallbackTraits<Resolved<T>> {
using ResolveType = T;
using RejectType = NoReject;
+ using TagType = CanOnlyResolve;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = false;
};
@@ -51,6 +65,7 @@ template <typename T>
struct PromiseCallbackTraits<Rejected<T>> {
using ResolveType = NoResolve;
using RejectType = T;
+ using TagType = CanOnlyReject;
static constexpr bool could_resolve = false;
static constexpr bool could_reject = true;
};
@@ -59,6 +74,7 @@ template <typename Reject>
struct PromiseCallbackTraits<Promise<NoResolve, Reject>> {
using ResolveType = NoResolve;
using RejectType = Reject;
+ using TagType = CanOnlyReject;
static constexpr bool could_resolve = false;
static constexpr bool could_reject = true;
};
@@ -67,6 +83,7 @@ template <typename Resolve>
struct PromiseCallbackTraits<Promise<Resolve, NoReject>> {
using ResolveType = Resolve;
using RejectType = NoReject;
+ using TagType = CanOnlyResolve;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = false;
};
@@ -75,6 +92,7 @@ template <typename Resolve, typename Reject>
struct PromiseCallbackTraits<Promise<Resolve, Reject>> {
using ResolveType = Resolve;
using RejectType = Reject;
+ using TagType = CouldResolveOrReject;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = true;
};
@@ -83,6 +101,7 @@ template <typename Reject>
struct PromiseCallbackTraits<PromiseResult<NoResolve, Reject>> {
using ResolveType = NoResolve;
using RejectType = Reject;
+ using TagType = CanOnlyReject;
static constexpr bool could_resolve = false;
static constexpr bool could_reject = true;
};
@@ -91,6 +110,7 @@ template <typename Resolve>
struct PromiseCallbackTraits<PromiseResult<Resolve, NoReject>> {
using ResolveType = Resolve;
using RejectType = NoReject;
+ using TagType = CanOnlyResolve;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = false;
};
@@ -99,6 +119,7 @@ template <typename Resolve, typename Reject>
struct PromiseCallbackTraits<PromiseResult<Resolve, Reject>> {
using ResolveType = Resolve;
using RejectType = Reject;
+ using TagType = CouldResolveOrReject;
static constexpr bool could_resolve = true;
static constexpr bool could_reject = true;
};
@@ -136,11 +157,9 @@ struct UseMoveSemantics : public std::integral_constant<bool, UseMove<T>()> {
static_assert(!std::is_rvalue_reference<T>::value,
"Promise<T&&> not supported");
- static constexpr AbstractPromise::Executor::ArgumentPassingType
- argument_passing_type =
- UseMove<T>()
- ? AbstractPromise::Executor::ArgumentPassingType::kMove
- : AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ UseMove<T>() ? PromiseExecutor::ArgumentPassingType::kMove
+ : PromiseExecutor::ArgumentPassingType::kNormal;
};
// A std::tuple is deemed to need move semantics if any of it's members need
@@ -148,11 +167,10 @@ struct UseMoveSemantics : public std::integral_constant<bool, UseMove<T>()> {
template <typename... Ts>
struct UseMoveSemantics<std::tuple<Ts...>>
: public std::integral_constant<bool, any_of({UseMove<Ts>()...})> {
- static constexpr AbstractPromise::Executor::ArgumentPassingType
- argument_passing_type =
- any_of({UseMove<Ts>()...})
- ? AbstractPromise::Executor::ArgumentPassingType::kMove
- : AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ any_of({UseMove<Ts>()...})
+ ? PromiseExecutor::ArgumentPassingType::kMove
+ : PromiseExecutor::ArgumentPassingType::kNormal;
};
// CallbackTraits extracts properties relevant to Promises from a callback.
@@ -179,9 +197,8 @@ struct CallbackTraits<T()> {
using ArgType = void;
using ReturnType = T;
using SignatureType = T();
- static constexpr AbstractPromise::Executor::ArgumentPassingType
- argument_passing_type =
- AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ PromiseExecutor::ArgumentPassingType::kNormal;
};
template <typename T, typename Arg>
@@ -191,8 +208,8 @@ struct CallbackTraits<T(Arg)> {
using ArgType = Arg;
using ReturnType = T;
using SignatureType = T(Arg);
- static constexpr AbstractPromise::Executor::ArgumentPassingType
- argument_passing_type = UseMoveSemantics<Arg>::argument_passing_type;
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ UseMoveSemantics<Arg>::argument_passing_type;
};
template <typename T, typename... Args>
@@ -205,11 +222,21 @@ struct CallbackTraits<T(Args...)> {
using SignatureType = T(Args...);
// If any arguments need move semantics, treat as if they all do.
- static constexpr AbstractPromise::Executor::ArgumentPassingType
- argument_passing_type =
- any_of({UseMoveSemantics<Args>::value...})
- ? AbstractPromise::Executor::ArgumentPassingType::kMove
- : AbstractPromise::Executor::ArgumentPassingType::kNormal;
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ any_of({UseMoveSemantics<Args>::value...})
+ ? PromiseExecutor::ArgumentPassingType::kMove
+ : PromiseExecutor::ArgumentPassingType::kNormal;
+};
+
+template <>
+struct CallbackTraits<DoNothing> {
+ using ResolveType = void;
+ using RejectType = NoReject;
+ using ArgType = void;
+ using ReturnType = void;
+ using SignatureType = void();
+ static constexpr PromiseExecutor::ArgumentPassingType argument_passing_type =
+ PromiseExecutor::ArgumentPassingType::kNormal;
};
// Adaptors for OnceCallback and RepeatingCallback
@@ -553,49 +580,77 @@ struct RunHelper<OnceCallback<CbResult(CbArgs...)>,
}
};
+// For use with base::Bind*. Cancels the promise if the callback was not run by
+// the time the callback is deleted.
+class BASE_EXPORT PromiseHolder {
+ public:
+ explicit PromiseHolder(scoped_refptr<internal::AbstractPromise> promise);
+
+ ~PromiseHolder();
+
+ PromiseHolder(PromiseHolder&& other);
+
+ scoped_refptr<internal::AbstractPromise> Unwrap() const;
+
+ private:
+ mutable scoped_refptr<internal::AbstractPromise> promise_;
+};
+
+} // namespace internal
+
+template <>
+struct BindUnwrapTraits<internal::PromiseHolder> {
+ static scoped_refptr<internal::AbstractPromise> Unwrap(
+ const internal::PromiseHolder& o) {
+ return o.Unwrap();
+ }
+};
+
+namespace internal {
+
// Used by ManualPromiseResolver<> to generate callbacks.
template <typename T, typename... Args>
class PromiseCallbackHelper {
public:
- using Callback = base::OnceCallback<void(Args&&...)>;
- using RepeatingCallback = base::RepeatingCallback<void(Args&&...)>;
+ using Callback = base::OnceCallback<void(Args...)>;
+ using RepeatingCallback = base::RepeatingCallback<void(Args...)>;
static Callback GetResolveCallback(scoped_refptr<AbstractPromise>& promise) {
return base::BindOnce(
- [](scoped_refptr<AbstractPromise> promise, Args&&... args) {
+ [](scoped_refptr<AbstractPromise> promise, Args... args) {
promise->emplace(Resolved<T>{std::forward<Args>(args)...});
promise->OnResolved();
},
- promise);
+ PromiseHolder(promise));
}
static RepeatingCallback GetRepeatingResolveCallback(
scoped_refptr<AbstractPromise>& promise) {
return base::BindRepeating(
- [](scoped_refptr<AbstractPromise> promise, Args&&... args) {
+ [](scoped_refptr<AbstractPromise> promise, Args... args) {
promise->emplace(Resolved<T>{std::forward<Args>(args)...});
promise->OnResolved();
},
- promise);
+ PromiseHolder(promise));
}
static Callback GetRejectCallback(scoped_refptr<AbstractPromise>& promise) {
return base::BindOnce(
- [](scoped_refptr<AbstractPromise> promise, Args&&... args) {
+ [](scoped_refptr<AbstractPromise> promise, Args... args) {
promise->emplace(Rejected<T>{std::forward<Args>(args)...});
promise->OnRejected();
},
- promise);
+ PromiseHolder(promise));
}
static RepeatingCallback GetRepeatingRejectCallback(
scoped_refptr<AbstractPromise>& promise) {
return base::BindRepeating(
- [](scoped_refptr<AbstractPromise> promise, Args&&... args) {
+ [](scoped_refptr<AbstractPromise> promise, Args... args) {
promise->emplace(Rejected<T>{std::forward<Args>(args)...});
promise->OnRejected();
},
- promise);
+ PromiseHolder(promise));
}
};
@@ -605,13 +660,13 @@ class PromiseCallbackHelper {
template <typename PromiseType, typename CallbackArgType>
struct IsValidPromiseArg {
static constexpr bool value =
- std::is_same<PromiseType, std::decay_t<CallbackArgType>>::value;
+ std::is_convertible<PromiseType, std::decay_t<CallbackArgType>>::value;
};
template <typename PromiseType, typename CallbackArgType>
struct IsValidPromiseArg<PromiseType&, CallbackArgType> {
static constexpr bool value =
- std::is_same<PromiseType&, CallbackArgType>::value;
+ std::is_convertible<PromiseType&, CallbackArgType>::value;
};
// This template helps assign the reject value from a prerequisite into the
@@ -626,6 +681,41 @@ struct AllPromiseRejectHelper {
// TODO(alexclarke): Specalize AllPromiseRejectHelper for variants.
+// To reduce template bloat executors hold CallbackBase. These functions convert
+// various types to CallbackBase.
+DoNothing BASE_EXPORT ToCallbackBase(DoNothing task);
+
+template <typename CallbackT>
+CallbackBase&& ToCallbackBase(CallbackT&& task) {
+ static_assert(sizeof(CallbackBase) == sizeof(CallbackT),
+ "We assume it's possible to cast from CallbackBase to "
+ "CallbackT");
+ return static_cast<CallbackBase&&>(task);
+}
+
+template <typename CallbackT>
+CallbackBase&& ToCallbackBase(const CallbackT&& task) {
+ static_assert(sizeof(CallbackBase) == sizeof(CallbackT),
+ "We assume it's possible to cast from CallbackBase to "
+ "CallbackT");
+ return static_cast<CallbackBase&&>(const_cast<CallbackT&&>(task));
+}
+
+// Helps reduce template bloat by moving AbstractPromise construction out of
+// line.
+scoped_refptr<AbstractPromise> BASE_EXPORT
+ConstructAbstractPromiseWithSinglePrerequisite(
+ const scoped_refptr<TaskRunner>& task_runner,
+ const Location& from_here,
+ AbstractPromise* prerequsite,
+ internal::PromiseExecutor::Data&& executor_data) noexcept;
+
+scoped_refptr<AbstractPromise> BASE_EXPORT
+ConstructManualPromiseResolverPromise(const Location& from_here,
+ RejectPolicy reject_policy,
+ bool can_resolve,
+ bool can_reject);
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/helpers_unittest.cc b/chromium/base/task/promise/helpers_unittest.cc
index edaecce62d2..a8fe61dd3af 100644
--- a/chromium/base/task/promise/helpers_unittest.cc
+++ b/chromium/base/task/promise/helpers_unittest.cc
@@ -6,8 +6,10 @@
#include "base/bind.h"
#include "base/task/promise/promise.h"
+#include "base/task_runner.h"
#include "base/test/bind_test_util.h"
#include "base/test/do_nothing_promise.h"
+#include "base/test/gtest_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -280,5 +282,125 @@ TEST(RunHelper, CallbackIntArgumentArgumentVoidResult) {
EXPECT_EQ(result->value().type(), TypeId::From<Resolved<void>>());
}
+TEST(PromiseCallbackHelper, GetResolveCallback) {
+ PromiseCallbackHelper<int, int> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ OnceCallback<void(int)> resolve_cb = helper.GetResolveCallback(promise);
+
+ std::move(resolve_cb).Run(1234);
+
+ EXPECT_EQ(unique_any_cast<Resolved<int>>(promise->value()).value, 1234);
+}
+
+TEST(PromiseCallbackHelper, GetResolveReferenceCallback) {
+ int foo = 123;
+ PromiseCallbackHelper<int&, int&> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ OnceCallback<void(int&)> resolve_cb = helper.GetResolveCallback(promise);
+
+ std::move(resolve_cb).Run(foo);
+
+ EXPECT_EQ(&unique_any_cast<Resolved<int&>>(promise->value()).value, &foo);
+}
+
+TEST(PromiseCallbackHelper, GetRejectCallback) {
+ PromiseCallbackHelper<int, int> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
+ RejectPolicy::kCatchNotRequired);
+
+ OnceCallback<void(int)> reject_cb = helper.GetRejectCallback(promise);
+
+ std::move(reject_cb).Run(1234);
+
+ EXPECT_EQ(unique_any_cast<Rejected<int>>(promise->value()).value, 1234);
+}
+
+TEST(PromiseCallbackHelper, GetRejectReferenceCallback) {
+ int foo = 123;
+ PromiseCallbackHelper<int&, int&> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
+ RejectPolicy::kCatchNotRequired);
+
+ OnceCallback<void(int&)> reject_cb = helper.GetRejectCallback(promise);
+
+ std::move(reject_cb).Run(foo);
+
+ EXPECT_EQ(&unique_any_cast<Rejected<int&>>(promise->value()).value, &foo);
+}
+
+TEST(PromiseCallbackHelper, GetRepeatingResolveCallback) {
+ PromiseCallbackHelper<int, int> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ RepeatingCallback<void(int)> resolve_cb =
+ helper.GetRepeatingResolveCallback(promise);
+
+ resolve_cb.Run(1234);
+
+ EXPECT_EQ(unique_any_cast<Resolved<int>>(promise->value()).value, 1234);
+
+ // Can't run |resolve_cb| more than once.
+ EXPECT_DCHECK_DEATH({ resolve_cb.Run(1234); });
+}
+
+TEST(PromiseCallbackHelper, GetRepeatingResolveReferenceCallback) {
+ int foo = 123;
+ PromiseCallbackHelper<int&, int&> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanResolve(true);
+
+ RepeatingCallback<void(int&)> resolve_cb =
+ helper.GetRepeatingResolveCallback(promise);
+
+ resolve_cb.Run(foo);
+
+ EXPECT_EQ(&unique_any_cast<Resolved<int&>>(promise->value()).value, &foo);
+
+ // Can't run |resolve_cb| more than once.
+ EXPECT_DCHECK_DEATH({ resolve_cb.Run(foo); });
+}
+
+TEST(PromiseCallbackHelper, GetRepeatingRejectCallback) {
+ PromiseCallbackHelper<int, int> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
+ RejectPolicy::kCatchNotRequired);
+
+ RepeatingCallback<void(int)> reject_cb =
+ helper.GetRepeatingRejectCallback(promise);
+
+ reject_cb.Run(1234);
+
+ EXPECT_EQ(unique_any_cast<Rejected<int>>(promise->value()).value, 1234);
+
+ // Can't run |reject_cb| more than once.
+ EXPECT_DCHECK_DEATH({ reject_cb.Run(1234); });
+}
+
+TEST(PromiseCallbackHelper, GetRepeatingRejectReferenceCallback) {
+ int foo = 123;
+ PromiseCallbackHelper<int&, int&> helper;
+ scoped_refptr<AbstractPromise> promise =
+ DoNothingPromiseBuilder(FROM_HERE).SetCanReject(true).SetRejectPolicy(
+ RejectPolicy::kCatchNotRequired);
+
+ RepeatingCallback<void(int&)> reject_cb =
+ helper.GetRepeatingRejectCallback(promise);
+
+ reject_cb.Run(foo);
+
+ EXPECT_EQ(&unique_any_cast<Rejected<int&>>(promise->value()).value, &foo);
+
+ // Can't run |reject_cb| more than once.
+ EXPECT_DCHECK_DEATH({ reject_cb.Run(foo); });
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/promise/no_op_promise_executor.cc b/chromium/base/task/promise/no_op_promise_executor.cc
index 36d5d47aca7..a74cde60bee 100644
--- a/chromium/base/task/promise/no_op_promise_executor.cc
+++ b/chromium/base/task/promise/no_op_promise_executor.cc
@@ -18,9 +18,9 @@ NoOpPromiseExecutor::NoOpPromiseExecutor(bool can_resolve, bool can_reject)
NoOpPromiseExecutor::~NoOpPromiseExecutor() {}
-AbstractPromise::Executor::PrerequisitePolicy
-NoOpPromiseExecutor::GetPrerequisitePolicy() const {
- return AbstractPromise::Executor::PrerequisitePolicy::kNever;
+PromiseExecutor::PrerequisitePolicy NoOpPromiseExecutor::GetPrerequisitePolicy()
+ const {
+ return PromiseExecutor::PrerequisitePolicy::kNever;
}
bool NoOpPromiseExecutor::IsCancelled() const {
@@ -28,14 +28,14 @@ bool NoOpPromiseExecutor::IsCancelled() const {
}
#if DCHECK_IS_ON()
-AbstractPromise::Executor::ArgumentPassingType
+PromiseExecutor::ArgumentPassingType
NoOpPromiseExecutor::ResolveArgumentPassingType() const {
- return AbstractPromise::Executor::ArgumentPassingType::kNoCallback;
+ return PromiseExecutor::ArgumentPassingType::kNoCallback;
}
-AbstractPromise::Executor::ArgumentPassingType
+PromiseExecutor::ArgumentPassingType
NoOpPromiseExecutor::RejectArgumentPassingType() const {
- return AbstractPromise::Executor::ArgumentPassingType::kNoCallback;
+ return PromiseExecutor::ArgumentPassingType::kNoCallback;
}
bool NoOpPromiseExecutor::CanResolve() const {
@@ -55,12 +55,10 @@ scoped_refptr<internal::AbstractPromise> NoOpPromiseExecutor::Create(
bool can_resolve,
bool can_reject,
RejectPolicy reject_policy) {
- return internal::AbstractPromise::Create(
- nullptr, from_here, nullptr, reject_policy,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::NoOpPromiseExecutor>(),
- can_resolve, can_reject);
+ return AbstractPromise::CreateNoPrerequisitePromise(
+ from_here, reject_policy, DependentList::ConstructUnresolved(),
+ PromiseExecutor::Data(in_place_type_t<NoOpPromiseExecutor>(), can_resolve,
+ can_reject));
}
} // namespace internal
diff --git a/chromium/base/task/promise/no_op_promise_executor.h b/chromium/base/task/promise/no_op_promise_executor.h
index d65dbc36b4a..e3e03a7cc74 100644
--- a/chromium/base/task/promise/no_op_promise_executor.h
+++ b/chromium/base/task/promise/no_op_promise_executor.h
@@ -24,14 +24,12 @@ class BASE_EXPORT NoOpPromiseExecutor {
bool can_reject,
RejectPolicy reject_policy);
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const;
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const;
bool IsCancelled() const;
#if DCHECK_IS_ON()
- AbstractPromise::Executor::ArgumentPassingType ResolveArgumentPassingType()
- const;
- AbstractPromise::Executor::ArgumentPassingType RejectArgumentPassingType()
- const;
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const;
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const;
bool CanResolve() const;
bool CanReject() const;
#endif
diff --git a/chromium/base/task/promise/post_task_executor.h b/chromium/base/task/promise/post_task_executor.h
new file mode 100644
index 00000000000..0be6ce298ac
--- /dev/null
+++ b/chromium/base/task/promise/post_task_executor.h
@@ -0,0 +1,95 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_PROMISE_POST_TASK_EXECUTOR_H_
+#define BASE_TASK_PROMISE_POST_TASK_EXECUTOR_H_
+
+#include "base/bind_helpers.h"
+#include "base/macros.h"
+#include "base/task/promise/abstract_promise.h"
+#include "base/task/promise/helpers.h"
+
+namespace base {
+namespace internal {
+
+// PromiseExecutor for use by PostTask.
+template <typename ReturnType>
+class PostTaskExecutor {
+ public:
+ // Extract properties from |ReturnType|.
+ using CallbackTraits = PromiseCallbackTraits<ReturnType>;
+ using ReturnedPromiseResolveT = typename CallbackTraits::ResolveType;
+ using ReturnedPromiseRejectT = typename CallbackTraits::RejectType;
+ using ResolveStorage = Resolved<ReturnedPromiseResolveT>;
+ using RejectStorage = Rejected<ReturnedPromiseRejectT>;
+
+ explicit PostTaskExecutor(CallbackBase&& task) noexcept
+ : task_(std::move(task)) {}
+
+ explicit PostTaskExecutor(DoNothing task) noexcept : task_(task.Once()) {}
+
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const {
+ return PromiseExecutor::PrerequisitePolicy::kAll;
+ }
+
+ bool IsCancelled() const { return task_.IsCancelled(); }
+
+#if DCHECK_IS_ON()
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const {
+ return PromiseExecutor::ArgumentPassingType::kNoCallback;
+ }
+
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const {
+ return PromiseExecutor::ArgumentPassingType::kNoCallback;
+ }
+
+ bool CanResolve() const { return CallbackTraits::could_resolve; }
+
+ bool CanReject() const { return CallbackTraits::could_reject; }
+#endif
+
+ NOINLINE void Execute(AbstractPromise* promise) {
+ static_assert(sizeof(CallbackBase) == sizeof(OnceCallback<ReturnType()>),
+ "We assume it's possible to cast from CallbackBase to "
+ "OnceCallback<ReturnType()>");
+ OnceCallback<ReturnType()>* task =
+ static_cast<OnceCallback<ReturnType()>*>(&task_);
+ internal::RunHelper<OnceCallback<ReturnType()>, void, ResolveStorage,
+ RejectStorage>::Run(std::move(*task), nullptr, promise);
+
+ using CheckResultTagType =
+ typename internal::PromiseCallbackTraits<ReturnType>::TagType;
+
+ CheckResultType(promise, CheckResultTagType());
+ }
+
+ private:
+ static void CheckResultType(AbstractPromise* promise, CouldResolveOrReject) {
+ if (promise->IsResolvedWithPromise() ||
+ promise->value().type() == TypeId::From<ResolveStorage>()) {
+ promise->OnResolved();
+ } else {
+ DCHECK_EQ(promise->value().type(), TypeId::From<RejectStorage>())
+ << " See " << promise->from_here().ToString();
+ promise->OnRejected();
+ }
+ }
+
+ static void CheckResultType(AbstractPromise* promise, CanOnlyResolve) {
+ promise->OnResolved();
+ }
+
+ static void CheckResultType(AbstractPromise* promise, CanOnlyReject) {
+ promise->OnRejected();
+ }
+
+ CallbackBase task_;
+
+ DISALLOW_COPY_AND_ASSIGN(PostTaskExecutor);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_PROMISE_POST_TASK_EXECUTOR_H_
diff --git a/chromium/base/task/promise/post_task_executor_unittest.cc b/chromium/base/task/promise/post_task_executor_unittest.cc
new file mode 100644
index 00000000000..8e86aaefad4
--- /dev/null
+++ b/chromium/base/task/promise/post_task_executor_unittest.cc
@@ -0,0 +1,68 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/promise/post_task_executor.h"
+
+#include "base/bind.h"
+#include "base/task/promise/abstract_promise.h"
+#include "base/task/promise/helpers.h"
+#include "base/task_runner.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+class PostTaskExecutorTest : public testing::Test {
+ public:
+ template <typename CallbackT>
+ scoped_refptr<internal::AbstractPromise> CreatePostTaskPromise(
+ const Location& from_here,
+ CallbackT&& task) {
+ // Extract properties from |task| callback.
+ using CallbackTraits = CallbackTraits<std::decay_t<CallbackT>>;
+
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<
+ internal::PostTaskExecutor<typename CallbackTraits::ReturnType>>(),
+ internal::ToCallbackBase(std::move(task)));
+
+ return AbstractPromise::CreateNoPrerequisitePromise(
+ from_here, RejectPolicy::kMustCatchRejection,
+ internal::DependentList::ConstructUnresolved(),
+ std::move(executor_data));
+ }
+};
+
+TEST_F(PostTaskExecutorTest, OnceClosure) {
+ bool run = false;
+
+ scoped_refptr<AbstractPromise> p = CreatePostTaskPromise(
+ FROM_HERE, BindOnce([](bool* run) { *run = true; }, &run));
+
+ p->Execute();
+
+ EXPECT_TRUE(run);
+}
+
+TEST_F(PostTaskExecutorTest, RepeatingClosure) {
+ bool run = false;
+
+ scoped_refptr<AbstractPromise> p = CreatePostTaskPromise(
+ FROM_HERE, BindRepeating([](bool* run) { *run = true; }, &run));
+
+ p->Execute();
+
+ EXPECT_TRUE(run);
+}
+
+TEST_F(PostTaskExecutorTest, DoNothing) {
+ // Check it compiles and the executor doesn't crash when run.
+ scoped_refptr<AbstractPromise> p =
+ CreatePostTaskPromise(FROM_HERE, DoNothing());
+
+ p->Execute();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/promise/promise.h b/chromium/base/task/promise/promise.h
index 793f605fc73..4927bde1cac 100644
--- a/chromium/base/task/promise/promise.h
+++ b/chromium/base/task/promise/promise.h
@@ -5,7 +5,7 @@
#ifndef BASE_TASK_PROMISE_PROMISE_H_
#define BASE_TASK_PROMISE_PROMISE_H_
-#include "base/task/post_task.h"
+#include "base/run_loop.h"
#include "base/task/promise/all_container_executor.h"
#include "base/task/promise/all_tuple_executor.h"
#include "base/task/promise/finally_executor.h"
@@ -13,10 +13,14 @@
#include "base/task/promise/no_op_promise_executor.h"
#include "base/task/promise/promise_result.h"
#include "base/task/promise/then_and_catch_executor.h"
-#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/task/task_traits.h"
namespace base {
+// We can't include post_task.h here so we forward declare it.
+BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunner(
+ const TaskTraits& traits);
+
// Inspired by ES6 promises, Promise<> is a PostTask based callback system for
// asynchronous operations. An operation can resolve (succeed) with a value and
// optionally reject (fail) with a different result. Interested parties can be
@@ -47,22 +51,6 @@ class Promise {
scoped_refptr<internal::AbstractPromise> abstract_promise) noexcept
: abstract_promise_(std::move(abstract_promise)) {}
- // Constructs an unresolved promise for use by a ManualPromiseResolver<> and
- // TaskRunner::PostPromise.
- Promise(scoped_refptr<TaskRunner> task_runner,
- const Location& location,
- RejectPolicy reject_policy)
- : abstract_promise_(internal::AbstractPromise::Create(
- std::move(task_runner),
- location,
- nullptr,
- reject_policy,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::NoOpPromiseExecutor>(),
- /* can_resolve */ !std::is_same<ResolveType, NoResolve>::value,
- /* can_reject */ !std::is_same<RejectType, NoReject>::value)) {}
-
NOINLINE ~Promise() = default;
operator bool() const { return !!abstract_promise_; }
@@ -72,6 +60,57 @@ class Promise {
return abstract_promise_->IsCanceled();
}
+ // Waits until the promise has settled and if resolved it returns the resolved
+ // value.
+ template <typename T = ResolveType,
+ std::enable_if_t<!std::is_reference<T>::value &&
+ !std::is_void<T>::value>* = nullptr>
+ T TakeResolveValueForTesting() {
+ static_assert(!std::is_same<NoResolve, T>::value,
+ "A NoResolve promise can't resolve.");
+ if (!abstract_promise_->IsSettled()) {
+ RunLoop run_loop;
+ FinallyHere(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ }
+ DCHECK(abstract_promise_->IsResolved())
+ << "Can't take resolved value, promise wasn't resolved.";
+ return std::move(
+ unique_any_cast<Resolved<T>>(&abstract_promise_->TakeValue().value())
+ ->value);
+ }
+
+ // Waits until the promise has settled and if rejected it returns the rejected
+ // value.
+ template <typename T = RejectType,
+ std::enable_if_t<!std::is_reference<T>::value &&
+ !std::is_void<T>::value>* = nullptr>
+ T TakeRejectValueForTesting() {
+ static_assert(!std::is_same<NoReject, T>::value,
+ "A NoReject promise can't reject.");
+ if (!abstract_promise_->IsSettled()) {
+ RunLoop run_loop;
+ FinallyHere(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ }
+ abstract_promise_->IgnoreUncaughtCatchForTesting();
+ DCHECK(abstract_promise_->IsRejected())
+ << "Can't take rejected value, promise wasn't rejected.";
+ return std::move(
+ unique_any_cast<Rejected<T>>(&abstract_promise_->TakeValue().value())
+ ->value);
+ }
+
+ bool IsResolvedForTesting() const {
+ DCHECK(abstract_promise_);
+ return abstract_promise_->IsResolvedForTesting();
+ }
+
+ bool IsRejectedForTesting() const {
+ DCHECK(abstract_promise_);
+ return abstract_promise_->IsRejectedForTesting();
+ }
+
// A task to execute |on_reject| is posted on |task_runner| as soon as this
// promise (or an uncaught ancestor) is rejected. A Promise<> for the return
// value of |on_reject| is returned.
@@ -82,11 +121,15 @@ class Promise {
// 2. Promise<Resolve, Reject> where the result is a curried promise.
//
// If a promise has multiple Catches they will be run in order of creation.
+ //
+ // |task_runner| is const-ref to avoid bloat due the destructor (which posts a
+ // task).
template <typename RejectCb>
- NOINLINE auto CatchOn(scoped_refptr<TaskRunner> task_runner,
+ NOINLINE auto CatchOn(const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
- RejectCb&& on_reject) noexcept {
+ RejectCb on_reject) noexcept {
DCHECK(abstract_promise_);
+ DCHECK(!on_reject.is_null());
// Extract properties from the |on_reject| callback.
using RejectCallbackTraits = internal::CallbackTraits<RejectCb>;
@@ -117,36 +160,31 @@ class Promise {
std::is_const<std::remove_reference_t<RejectCallbackArgT>>::value,
"Google C++ Style: References in function parameters must be const.");
- return Promise<ReturnedPromiseResolveT,
- ReturnedPromiseRejectT>(internal::AbstractPromise::Create(
- std::move(task_runner), from_here,
- std::make_unique<internal::AbstractPromise::AdjacencyList>(
- abstract_promise_),
- RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::ThenAndCatchExecutor<
- OnceClosure, // Never called.
- OnceCallback<typename RejectCallbackTraits::SignatureType>,
- internal::NoCallback, RejectType,
- Resolved<ReturnedPromiseResolveT>,
- Rejected<ReturnedPromiseRejectT>>>(),
- OnceClosure(),
- static_cast<OnceCallback<typename RejectCallbackTraits::SignatureType>>(
- std::forward<RejectCb>(on_reject))));
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::ThenAndCatchExecutor<
+ OnceClosure, // Never called.
+ OnceCallback<typename RejectCallbackTraits::SignatureType>,
+ internal::NoCallback, RejectType, Resolved<ReturnedPromiseResolveT>,
+ Rejected<ReturnedPromiseRejectT>>>(),
+ OnceClosure(), internal::ToCallbackBase(std::move(on_reject)));
+
+ return Promise<ReturnedPromiseResolveT, ReturnedPromiseRejectT>(
+ ConstructAbstractPromiseWithSinglePrerequisite(
+ task_runner, from_here, abstract_promise_.get(),
+ std::move(executor_data)));
}
template <typename RejectCb>
auto CatchOn(const TaskTraits& traits,
const Location& from_here,
RejectCb&& on_reject) noexcept {
- return CatchOn(CreateTaskRunnerWithTraits(traits), from_here,
+ return CatchOn(CreateTaskRunner(traits), from_here,
std::forward<RejectCb>(on_reject));
}
template <typename RejectCb>
auto CatchHere(const Location& from_here, RejectCb&& on_reject) noexcept {
- return CatchOn(SequencedTaskRunnerHandle::Get(), from_here,
+ return CatchOn(internal::GetCurrentSequence(), from_here,
std::forward<RejectCb>(on_reject));
}
@@ -160,11 +198,15 @@ class Promise {
// 2. Promise<Resolve, Reject> where the result is a curried promise.
//
// If a promise has multiple Thens they will be run in order of creation.
+ //
+ // |task_runner| is const-ref to avoid bloat due the destructor (which posts a
+ // task).
template <typename ResolveCb>
- NOINLINE auto ThenOn(scoped_refptr<TaskRunner> task_runner,
+ NOINLINE auto ThenOn(const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
- ResolveCb&& on_resolve) noexcept {
+ ResolveCb on_resolve) noexcept {
DCHECK(abstract_promise_);
+ DCHECK(!on_resolve.is_null());
// Extract properties from the |on_resolve| callback.
using ResolveCallbackTraits =
@@ -194,33 +236,31 @@ class Promise {
std::is_const<std::remove_reference_t<ResolveCallbackArgT>>::value,
"Google C++ Style: References in function parameters must be const.");
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::ThenAndCatchExecutor<
+ OnceCallback<typename ResolveCallbackTraits::SignatureType>,
+ OnceClosure, ResolveType, internal::NoCallback,
+ Resolved<ReturnedPromiseResolveT>,
+ Rejected<ReturnedPromiseRejectT>>>(),
+ internal::ToCallbackBase(std::move(on_resolve)), OnceClosure());
+
return Promise<ReturnedPromiseResolveT, ReturnedPromiseRejectT>(
- internal::AbstractPromise::Create(
- std::move(task_runner), from_here,
- std::make_unique<internal::AbstractPromise::AdjacencyList>(
- abstract_promise_),
- RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::ThenAndCatchExecutor<
- OnceCallback<typename ResolveCallbackTraits::SignatureType>,
- OnceClosure, ResolveType, internal::NoCallback,
- Resolved<ReturnedPromiseResolveT>,
- Rejected<ReturnedPromiseRejectT>>>(),
- std::forward<ResolveCb>(on_resolve), OnceClosure()));
+ ConstructAbstractPromiseWithSinglePrerequisite(
+ task_runner, from_here, abstract_promise_.get(),
+ std::move(executor_data)));
}
template <typename ResolveCb>
auto ThenOn(const TaskTraits& traits,
const Location& from_here,
ResolveCb&& on_resolve) noexcept {
- return ThenOn(CreateTaskRunnerWithTraits(traits), from_here,
+ return ThenOn(CreateTaskRunner(traits), from_here,
std::forward<ResolveCb>(on_resolve));
}
template <typename ResolveCb>
auto ThenHere(const Location& from_here, ResolveCb&& on_resolve) noexcept {
- return ThenOn(SequencedTaskRunnerHandle::Get(), from_here,
+ return ThenOn(internal::GetCurrentSequence(), from_here,
std::forward<ResolveCb>(on_resolve));
}
@@ -241,12 +281,17 @@ class Promise {
// Note if either |on_resolve| or |on_reject| are canceled (due to weak
// pointer invalidation), then the other must be canceled at the same time as
// well. This restriction only applies to this form of ThenOn/ThenHere.
+ //
+ // |task_runner| is const-ref to avoid bloat due the destructor (which posts a
+ // task).
template <typename ResolveCb, typename RejectCb>
- NOINLINE auto ThenOn(scoped_refptr<TaskRunner> task_runner,
+ NOINLINE auto ThenOn(const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
- ResolveCb&& on_resolve,
- RejectCb&& on_reject) noexcept {
+ ResolveCb on_resolve,
+ RejectCb on_reject) noexcept {
DCHECK(abstract_promise_);
+ DCHECK(!on_resolve.is_null());
+ DCHECK(!on_reject.is_null());
// Extract properties from the |on_resolve| and |on_reject| callbacks.
using ResolveCallbackTraits = internal::CallbackTraits<ResolveCb>;
@@ -290,24 +335,19 @@ class Promise {
std::is_const<std::remove_reference_t<RejectCallbackArgT>>::value,
"Google C++ Style: References in function parameters must be const.");
- return Promise<ReturnedPromiseResolveT,
- ReturnedPromiseRejectT>(internal::AbstractPromise::Create(
- std::move(task_runner), from_here,
- std::make_unique<internal::AbstractPromise::AdjacencyList>(
- abstract_promise_),
- RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::ThenAndCatchExecutor<
- OnceCallback<typename ResolveCallbackTraits::SignatureType>,
- OnceCallback<typename RejectCallbackTraits::SignatureType>,
- ResolveType, RejectType, Resolved<ReturnedPromiseResolveT>,
- Rejected<ReturnedPromiseRejectT>>>(),
- static_cast<
- OnceCallback<typename ResolveCallbackTraits::SignatureType>>(
- std::forward<ResolveCb>(on_resolve)),
- static_cast<OnceCallback<typename RejectCallbackTraits::SignatureType>>(
- std::forward<RejectCb>(on_reject))));
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::ThenAndCatchExecutor<
+ OnceCallback<typename ResolveCallbackTraits::SignatureType>,
+ OnceCallback<typename RejectCallbackTraits::SignatureType>,
+ ResolveType, RejectType, Resolved<ReturnedPromiseResolveT>,
+ Rejected<ReturnedPromiseRejectT>>>(),
+ internal::ToCallbackBase(std::move(on_resolve)),
+ internal::ToCallbackBase(std::move(on_reject)));
+
+ return Promise<ReturnedPromiseResolveT, ReturnedPromiseRejectT>(
+ ConstructAbstractPromiseWithSinglePrerequisite(
+ task_runner, from_here, abstract_promise_.get(),
+ std::move(executor_data)));
}
template <typename ResolveCb, typename RejectCb>
@@ -315,7 +355,7 @@ class Promise {
const Location& from_here,
ResolveCb&& on_resolve,
RejectCb&& on_reject) noexcept {
- return ThenOn(CreateTaskRunnerWithTraits(traits), from_here,
+ return ThenOn(CreateTaskRunner(traits), from_here,
std::forward<ResolveCb>(on_resolve),
std::forward<RejectCb>(on_reject));
}
@@ -324,7 +364,7 @@ class Promise {
auto ThenHere(const Location& from_here,
ResolveCb&& on_resolve,
RejectCb&& on_reject) noexcept {
- return ThenOn(SequencedTaskRunnerHandle::Get(), from_here,
+ return ThenOn(internal::GetCurrentSequence(), from_here,
std::forward<ResolveCb>(on_resolve),
std::forward<RejectCb>(on_reject));
}
@@ -335,10 +375,13 @@ class Promise {
// promises, this doesn't return a Promise that is resolved or rejected with
// the parent's value if |finally_callback| returns void. (We could support
// this if needed it but it seems unlikely to be used).
+ //
+ // |task_runner| is const-ref to avoid bloat due the destructor (which posts a
+ // task).
template <typename FinallyCb>
- NOINLINE auto FinallyOn(scoped_refptr<TaskRunner> task_runner,
+ NOINLINE auto FinallyOn(const scoped_refptr<TaskRunner>& task_runner,
const Location& from_here,
- FinallyCb&& finally_callback) noexcept {
+ FinallyCb finally_callback) noexcept {
DCHECK(abstract_promise_);
// Extract properties from |finally_callback| callback.
@@ -350,33 +393,31 @@ class Promise {
static_assert(std::is_void<CallbackArgT>::value,
"|finally_callback| callback must have no arguments");
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::FinallyExecutor<
+ OnceCallback<typename CallbackTraits::ReturnType()>,
+ Resolved<ReturnedPromiseResolveT>,
+ Rejected<ReturnedPromiseRejectT>>>(),
+ internal::ToCallbackBase(std::move(finally_callback)));
+
return Promise<ReturnedPromiseResolveT, ReturnedPromiseRejectT>(
- internal::AbstractPromise::Create(
- std::move(task_runner), from_here,
- std::make_unique<internal::AbstractPromise::AdjacencyList>(
- abstract_promise_),
- RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::FinallyExecutor<
- OnceCallback<typename CallbackTraits::ReturnType()>,
- Resolved<ReturnedPromiseResolveT>,
- Rejected<ReturnedPromiseRejectT>>>(),
- std::forward<FinallyCb>(finally_callback)));
+ ConstructAbstractPromiseWithSinglePrerequisite(
+ task_runner, from_here, abstract_promise_.get(),
+ std::move(executor_data)));
}
template <typename FinallyCb>
auto FinallyOn(const TaskTraits& traits,
const Location& from_here,
FinallyCb&& finally_callback) noexcept {
- return FinallyOn(CreateTaskRunnerWithTraits(traits), from_here,
+ return FinallyOn(CreateTaskRunner(traits), from_here,
std::move(finally_callback));
}
template <typename FinallyCb>
auto FinallyHere(const Location& from_here,
FinallyCb&& finally_callback) noexcept {
- return FinallyOn(SequencedTaskRunnerHandle::Get(), from_here,
+ return FinallyOn(internal::GetCurrentSequence(), from_here,
std::move(finally_callback));
}
@@ -384,14 +425,16 @@ class Promise {
NOINLINE static Promise<ResolveType, RejectType> CreateResolved(
const Location& from_here,
Args&&... args) noexcept {
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::NoOpPromiseExecutor>(),
+ /* can_resolve */ true,
+ /* can_reject */ false);
+
scoped_refptr<internal::AbstractPromise> promise(
internal::AbstractPromise::Create(
nullptr, from_here, nullptr, RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructResolved,
- internal::NoOpPromiseExecutor>(),
- /* can_resolve */ true,
- /* can_reject */ false));
+ internal::DependentList::ConstructResolved(),
+ std::move(executor_data)));
promise->emplace(in_place_type_t<Resolved<ResolveType>>(),
std::forward<Args>(args)...);
return Promise<ResolveType, RejectType>(std::move(promise));
@@ -401,14 +444,16 @@ class Promise {
NOINLINE static Promise<ResolveType, RejectType> CreateRejected(
const Location& from_here,
Args&&... args) noexcept {
+ internal::PromiseExecutor::Data executor_data(
+ in_place_type_t<internal::NoOpPromiseExecutor>(),
+ /* can_resolve */ false,
+ /* can_reject */ true);
+
scoped_refptr<internal::AbstractPromise> promise(
internal::AbstractPromise::Create(
nullptr, from_here, nullptr, RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructRejected,
- internal::NoOpPromiseExecutor>(),
- /* can_resolve */ false,
- /* can_reject */ true));
+ internal::DependentList::ConstructResolved(),
+ std::move(executor_data)));
promise->emplace(in_place_type_t<Rejected<RejectType>>(),
std::forward<Args>(args)...);
return Promise<ResolveType, RejectType>(std::move(promise));
@@ -459,20 +504,24 @@ class ManualPromiseResolver {
ManualPromiseResolver(
const Location& from_here,
- RejectPolicy reject_policy = RejectPolicy::kMustCatchRejection)
- : promise_(SequencedTaskRunnerHandle::Get(), from_here, reject_policy) {}
+ RejectPolicy reject_policy = RejectPolicy::kMustCatchRejection) {
+ promise_ = Promise<ResolveType, RejectType>(
+ internal::ConstructManualPromiseResolverPromise(
+ from_here, reject_policy,
+ /* can_resolve */ !std::is_same<ResolveType, NoResolve>::value,
+ /* can_reject */ !std::is_same<RejectType, NoReject>::value));
+ }
+
+ ~ManualPromiseResolver() = default;
- ~ManualPromiseResolver() {
- // If the promise wasn't resolved or rejected, then cancel it to make sure
- // we don't leak memory.
- if (!promise_.abstract_promise_->IsSettled())
- promise_.abstract_promise_->OnCanceled();
+ void Resolve(Promise<ResolveType, RejectType> promise) noexcept {
+ promise_.abstract_promise_->emplace(std::move(promise.abstract_promise_));
+ promise_.abstract_promise_->OnResolved();
}
template <typename... Args>
void Resolve(Args&&... arg) noexcept {
- DCHECK(!promise_.abstract_promise_->IsResolved());
- DCHECK(!promise_.abstract_promise_->IsRejected());
+ DCHECK(!promise_.abstract_promise_->IsSettled());
static_assert(!std::is_same<NoResolve, ResolveType>::value,
"Can't resolve a NoResolve promise.");
promise_.abstract_promise_->emplace(
@@ -482,8 +531,7 @@ class ManualPromiseResolver {
template <typename... Args>
void Reject(Args&&... arg) noexcept {
- DCHECK(!promise_.abstract_promise_->IsResolved());
- DCHECK(!promise_.abstract_promise_->IsRejected());
+ DCHECK(!promise_.abstract_promise_->IsSettled());
static_assert(!std::is_same<NoReject, RejectType>::value,
"Can't reject a NoReject promise.");
promise_.abstract_promise_->emplace(
@@ -585,22 +633,25 @@ class Promises {
std::tuple<internal::ToNonVoidT<Resolve>...>;
using ReturnedPromiseRejectT = Reject;
- std::vector<internal::AbstractPromise::AdjacencyListNode> prerequisite_list(
+ std::vector<internal::DependentList::Node> prerequisite_list(
sizeof...(promises));
int i = 0;
for (auto&& p : {promises.abstract_promise_...}) {
- prerequisite_list[i++].prerequisite = std::move(p);
+ prerequisite_list[i++].SetPrerequisite(p.get());
}
+
+ internal::PromiseExecutor::Data executor_data(
+ (in_place_type_t<internal::AllTuplePromiseExecutor<
+ ReturnedPromiseResolveT, ReturnedPromiseRejectT>>()));
+
return Promise<ReturnedPromiseResolveT, ReturnedPromiseRejectT>(
internal::AbstractPromise::Create(
nullptr, from_here,
std::make_unique<internal::AbstractPromise::AdjacencyList>(
std::move(prerequisite_list)),
RejectPolicy::kMustCatchRejection,
- internal::AbstractPromise::ConstructWith<
- internal::DependentList::ConstructUnresolved,
- internal::AllTuplePromiseExecutor<ReturnedPromiseResolveT,
- ReturnedPromiseRejectT>>()));
+ internal::DependentList::ConstructUnresolved(),
+ std::move(executor_data)));
}
template <typename Resolve, typename Reject>
diff --git a/chromium/base/task/promise/promise_executor.cc b/chromium/base/task/promise/promise_executor.cc
new file mode 100644
index 00000000000..5d8642f2cee
--- /dev/null
+++ b/chromium/base/task/promise/promise_executor.cc
@@ -0,0 +1,50 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/promise/promise_executor.h"
+
+namespace base {
+namespace internal {
+
+PromiseExecutor::~PromiseExecutor() {
+ if (data_.vtable_)
+ data_.vtable_->destructor(data_.storage_.array);
+ data_.vtable_ = nullptr;
+}
+
+PromiseExecutor::PrerequisitePolicy PromiseExecutor::GetPrerequisitePolicy()
+ const {
+ return data_.vtable_->get_prerequisite_policy(data_.storage_.array);
+}
+
+bool PromiseExecutor::IsCancelled() const {
+ return data_.vtable_->is_cancelled(data_.storage_.array);
+}
+
+#if DCHECK_IS_ON()
+PromiseExecutor::ArgumentPassingType
+PromiseExecutor::ResolveArgumentPassingType() const {
+ return data_.vtable_->resolve_argument_passing_type(data_.storage_.array);
+}
+
+PromiseExecutor::ArgumentPassingType
+PromiseExecutor::RejectArgumentPassingType() const {
+ return data_.vtable_->reject_argument_passing_type(data_.storage_.array);
+}
+
+bool PromiseExecutor::CanResolve() const {
+ return data_.vtable_->can_resolve(data_.storage_.array);
+}
+
+bool PromiseExecutor::CanReject() const {
+ return data_.vtable_->can_reject(data_.storage_.array);
+}
+#endif
+
+void PromiseExecutor::Execute(AbstractPromise* promise) {
+ return data_.vtable_->execute(data_.storage_.array, promise);
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/promise/promise_executor.h b/chromium/base/task/promise/promise_executor.h
new file mode 100644
index 00000000000..f893a2cefef
--- /dev/null
+++ b/chromium/base/task/promise/promise_executor.h
@@ -0,0 +1,222 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_PROMISE_PROMISE_EXECUTOR_H_
+#define BASE_TASK_PROMISE_PROMISE_EXECUTOR_H_
+
+#include "base/base_export.h"
+#include "base/containers/unique_any.h"
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+class AbstractPromise;
+
+// Unresolved promises have an executor which invokes one of the callbacks
+// associated with the promise. Once the callback has been invoked the
+// Executor is destroyed.
+//
+// Ideally Executor would be a pure virtual class, but we want to store these
+// inline to reduce the number of memory allocations (small object
+// optimization). The problem is even though placement new returns the same
+// address it was allocated at, you have to use the returned pointer. Casting
+// the buffer to the derived class is undefined behavior. STL implementations
+// usually store an extra pointer, but there we have opted for implementing
+// our own VTable to save a little bit of memory.
+class BASE_EXPORT PromiseExecutor {
+ private:
+ static constexpr size_t MaxSize = sizeof(void*) * 2;
+ struct VTable;
+
+ public:
+ // We could just construct Executor in place, but that means templates need
+ // to inline the AbstractPromise constructor which we'd like to avoid due to
+ // binary size concerns. Despite containing refcounted objects, Data is
+ // intended to be memcopied into the Executor and it deliberately does not
+ // have a destructor. The type erasure provided by Executor allows us to
+ // move the AbstractPromise construction out of line.
+ class Data {
+ public:
+ // Constructs |Derived| in place.
+ template <typename Derived, typename... Args>
+ explicit Data(in_place_type_t<Derived>, Args&&... args) {
+ static_assert(sizeof(Derived) <= MaxSize, "Derived is too big");
+ static_assert(sizeof(PromiseExecutor) <= sizeof(AnyInternal::InlineAlloc),
+ "Executor is too big");
+ vtable_ = &VTableHelper<Derived>::vtable_;
+ new (storage_.array) Derived(std::forward<Args>(args)...);
+ }
+
+ Data(Data&& other) noexcept
+ : vtable_(other.vtable_), storage_(other.storage_) {
+#if DCHECK_IS_ON()
+ other.vtable_ = nullptr;
+#endif
+ }
+
+ Data(const Data& other) = delete;
+
+ ~Data() { DCHECK_EQ(vtable_, nullptr); }
+
+ private:
+ friend class PromiseExecutor;
+
+ const VTable* vtable_;
+ struct {
+ char array[MaxSize];
+ } storage_;
+ };
+
+ // Caution it's an error to use |data| after this.
+ explicit PromiseExecutor(Data&& data) : data_(std::move(data)) {}
+
+ PromiseExecutor(PromiseExecutor&& other) noexcept
+ : data_(std::move(other.data_)) {
+ other.data_.vtable_ = nullptr;
+ }
+
+ PromiseExecutor(const PromiseExecutor& other) = delete;
+
+ ~PromiseExecutor();
+
+ PromiseExecutor& operator=(const PromiseExecutor& other) = delete;
+
+ // Controls whether or not a promise should wait for its prerequisites
+ // before becoming eligible for execution.
+ enum class PrerequisitePolicy : uint8_t {
+ // Wait for all prerequisites to resolve (or any to reject) before
+ // becoming eligible for execution. If any prerequisites are canceled it
+ // will be canceled too.
+ kAll,
+
+ // Wait for any prerequisite to resolve or reject before becoming eligible
+ // for execution. If all prerequisites are canceled it will be canceled
+ // too.
+ kAny,
+
+ // Never become eligible for execution. Cancellation is ignored.
+ kNever,
+ };
+
+ // Returns the associated PrerequisitePolicy.
+ PrerequisitePolicy GetPrerequisitePolicy() const;
+
+ // NB if there is both a resolve and a reject executor we require them to
+ // be both canceled at the same time.
+ bool IsCancelled() const;
+
+ // Describes an executor callback.
+ enum class ArgumentPassingType : uint8_t {
+ // No callback. E.g. the RejectArgumentPassingType in a promise with a
+ // resolve callback but no reject callback.
+ kNoCallback,
+
+ // Executor callback argument passed by value or by reference.
+ kNormal,
+
+ // Executor callback argument passed by r-value reference.
+ kMove,
+ };
+
+#if DCHECK_IS_ON()
+ // Returns details of the resolve and reject executor callbacks if any. This
+ // data is used to diagnose double moves and missing catches.
+ ArgumentPassingType ResolveArgumentPassingType() const;
+ ArgumentPassingType RejectArgumentPassingType() const;
+ bool CanResolve() const;
+ bool CanReject() const;
+#endif
+
+ // Invokes the associate callback for |promise|. If the callback was
+ // cancelled it should call |promise->OnCanceled()|. If the callback
+ // resolved it should store the resolve result via |promise->emplace()| and
+ // call |promise->OnResolved()|. If the callback was rejected it should
+ // store the reject result in |promise->state()| and call
+ // |promise->OnResolved()|.
+ // Caution the Executor will be destructed when |promise->state()| is
+ // written to.
+ void Execute(AbstractPromise* promise);
+
+ private:
+ struct VTable {
+ void (*destructor)(void* self);
+ PrerequisitePolicy (*get_prerequisite_policy)(const void* self);
+ bool (*is_cancelled)(const void* self);
+#if DCHECK_IS_ON()
+ ArgumentPassingType (*resolve_argument_passing_type)(const void* self);
+ ArgumentPassingType (*reject_argument_passing_type)(const void* self);
+ bool (*can_resolve)(const void* self);
+ bool (*can_reject)(const void* self);
+#endif
+ void (*execute)(void* self, AbstractPromise* promise);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(VTable);
+ };
+
+ template <typename DerivedType>
+ struct VTableHelper {
+ VTableHelper(const VTableHelper& other) = delete;
+ VTableHelper& operator=(const VTableHelper& other) = delete;
+
+ static void Destructor(void* self) {
+ static_cast<DerivedType*>(self)->~DerivedType();
+ }
+
+ static PrerequisitePolicy GetPrerequisitePolicy(const void* self) {
+ return static_cast<const DerivedType*>(self)->GetPrerequisitePolicy();
+ }
+
+ static bool IsCancelled(const void* self) {
+ return static_cast<const DerivedType*>(self)->IsCancelled();
+ }
+
+#if DCHECK_IS_ON()
+ static ArgumentPassingType ResolveArgumentPassingType(const void* self) {
+ return static_cast<const DerivedType*>(self)
+ ->ResolveArgumentPassingType();
+ }
+
+ static ArgumentPassingType RejectArgumentPassingType(const void* self) {
+ return static_cast<const DerivedType*>(self)->RejectArgumentPassingType();
+ }
+
+ static bool CanResolve(const void* self) {
+ return static_cast<const DerivedType*>(self)->CanResolve();
+ }
+
+ static bool CanReject(const void* self) {
+ return static_cast<const DerivedType*>(self)->CanReject();
+ }
+#endif
+
+ static void Execute(void* self, AbstractPromise* promise) {
+ return static_cast<DerivedType*>(self)->Execute(promise);
+ }
+
+ static constexpr VTable vtable_ = {
+ &VTableHelper::Destructor,
+ &VTableHelper::GetPrerequisitePolicy,
+ &VTableHelper::IsCancelled,
+#if DCHECK_IS_ON()
+ &VTableHelper::ResolveArgumentPassingType,
+ &VTableHelper::RejectArgumentPassingType,
+ &VTableHelper::CanResolve,
+ &VTableHelper::CanReject,
+#endif
+ &VTableHelper::Execute,
+ };
+ };
+
+ Data data_;
+};
+
+// static
+template <typename T>
+const PromiseExecutor::VTable PromiseExecutor::VTableHelper<T>::vtable_;
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_PROMISE_PROMISE_EXECUTOR_H_
diff --git a/chromium/base/task/promise/promise_unittest.cc b/chromium/base/task/promise/promise_unittest.cc
index 134720d91f6..a892f167f28 100644
--- a/chromium/base/task/promise/promise_unittest.cc
+++ b/chromium/base/task/promise/promise_unittest.cc
@@ -2,18 +2,22 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "base/task/promise/promise.h"
+
#include <memory>
#include <string>
#include "base/bind.h"
#include "base/run_loop.h"
-#include "base/task/promise/promise.h"
+#include "base/task/post_task.h"
#include "base/test/bind_test_util.h"
#include "base/test/do_nothing_promise.h"
#include "base/test/gtest_util.h"
#include "base/test/scoped_task_environment.h"
#include "base/test/test_mock_time_task_runner.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -57,7 +61,7 @@ class MockObject {
struct DummyError {};
struct Cancelable {
- Cancelable() : weak_ptr_factory(this) {}
+ Cancelable() {}
void LogTask(std::vector<std::string>* log, std::string value) {
log->push_back(value);
@@ -65,7 +69,7 @@ struct Cancelable {
void NopTask() {}
- WeakPtrFactory<Cancelable> weak_ptr_factory;
+ WeakPtrFactory<Cancelable> weak_ptr_factory{this};
};
} // namespace
@@ -130,6 +134,10 @@ TEST_F(PromiseTest, GetResolveCallbackThenWithConstInt) {
TEST_F(PromiseTest, GetResolveCallbackMultipleArgs) {
ManualPromiseResolver<std::tuple<int, bool, float>> p(FROM_HERE);
+ static_assert(
+ std::is_same<OnceCallback<void(int, bool, float)>,
+ decltype(p.GetResolveCallback<int, bool, float>())>::value,
+ "");
p.GetResolveCallback<int, bool, float>().Run(123, true, 1.5f);
RunLoop run_loop;
@@ -144,6 +152,24 @@ TEST_F(PromiseTest, GetResolveCallbackMultipleArgs) {
run_loop.Run();
}
+TEST_F(PromiseTest, ManualPromiseResolverCallbackLifetimeCanOutliveParent) {
+ OnceCallback<void(int)> resolve_cb;
+
+ RunLoop run_loop;
+ {
+ ManualPromiseResolver<int> p(FROM_HERE);
+ resolve_cb = p.GetResolveCallback();
+
+ p.promise().ThenHere(FROM_HERE, BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(123, result);
+ run_loop.Quit();
+ }));
+ }
+
+ std::move(resolve_cb).Run(123);
+ run_loop.Run();
+}
+
TEST_F(PromiseTest, ResolveWithTuple) {
ManualPromiseResolver<void> p(FROM_HERE);
p.Resolve();
@@ -1030,6 +1056,96 @@ TEST_F(PromiseTest, CurriedIntPromise) {
run_loop.Run();
}
+TEST_F(PromiseTest, CurriedIntPromiseChain) {
+ Promise<int> p = Promise<int>::CreateResolved(FROM_HERE, 1000);
+
+ ManualPromiseResolver<int> promise_resolver_1(FROM_HERE);
+ ManualPromiseResolver<int> promise_resolver_2(FROM_HERE);
+ promise_resolver_2.Resolve(promise_resolver_1.promise());
+ promise_resolver_1.Resolve(123);
+
+ RunLoop run_loop;
+ p.ThenHere(FROM_HERE, BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(1000, result);
+ return promise_resolver_2.promise();
+ }))
+ .ThenHere(FROM_HERE, BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(123, result);
+ run_loop.Quit();
+ }));
+
+ run_loop.Run();
+}
+
+TEST_F(PromiseTest, CurriedIntPromiseChain2) {
+ Promise<int> p1 = Promise<int>::CreateResolved(FROM_HERE, 1000);
+ Promise<int> p2 = Promise<int>::CreateResolved(FROM_HERE, 789);
+ Promise<int> then2;
+
+ {
+ Promise<int> then1 =
+ Promise<int>::CreateResolved(FROM_HERE, 789)
+ .ThenHere(FROM_HERE, BindLambdaForTesting([&]() { return p2; }));
+ then2 = Promise<int>::CreateResolved(FROM_HERE, 789)
+ .ThenHere(
+ FROM_HERE,
+ BindOnce([&](Promise<int> then1) { return then1; }, then1));
+ }
+
+ RunLoop run_loop;
+ p1.ThenHere(FROM_HERE, BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(1000, result);
+ return then2;
+ }))
+ .ThenHere(FROM_HERE, BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(789, result);
+ run_loop.Quit();
+ }));
+
+ run_loop.Run();
+}
+
+TEST_F(PromiseTest, CurriedIntPromiseChainThenAddedAfterInitialResolve) {
+ ManualPromiseResolver<int> promise_resolver_1(FROM_HERE);
+ ManualPromiseResolver<int> promise_resolver_2(FROM_HERE);
+ ManualPromiseResolver<int> promise_resolver_3(FROM_HERE);
+ promise_resolver_2.Resolve(promise_resolver_1.promise());
+ promise_resolver_3.Resolve(promise_resolver_2.promise());
+
+ RunLoop run_loop;
+ promise_resolver_3.promise().ThenHere(FROM_HERE,
+ BindLambdaForTesting([&](int result) {
+ EXPECT_EQ(123, result);
+ run_loop.Quit();
+ }));
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindLambdaForTesting([&]() { promise_resolver_1.Resolve(123); }));
+
+ run_loop.Run();
+}
+
+TEST_F(PromiseTest, CurriedVoidPromiseModified) {
+ for (size_t i = 0; i < 1000; ++i) {
+ Promise<void> p = Promise<void>::CreateResolved(FROM_HERE);
+ std::unique_ptr<ManualPromiseResolver<int>> promise_resolver =
+ std::make_unique<ManualPromiseResolver<int>>(FROM_HERE);
+ RunLoop run_loop;
+ p.ThenHere(FROM_HERE, BindOnce([](Promise<int> promise) { return promise; },
+ promise_resolver->promise()))
+ .ThenHere(FROM_HERE, base::BindOnce([](int v) { EXPECT_EQ(v, 42); }))
+ .ThenHere(FROM_HERE, run_loop.QuitClosure());
+ PostTaskWithTraits(FROM_HERE, {ThreadPool()},
+ base::BindLambdaForTesting([&]() {
+ promise_resolver->Resolve(42);
+ promise_resolver.reset();
+ }));
+ run_loop.Run();
+ scoped_task_environment_.RunUntilIdle();
+ }
+}
+
TEST_F(PromiseTest, PromiseResultReturningAPromise) {
Promise<int> p = Promise<int>::CreateResolved(FROM_HERE, 1000);
ManualPromiseResolver<int> promise_resolver(FROM_HERE);
@@ -1559,7 +1675,7 @@ TEST_F(PromiseTest, ManualPromiseResolverRepeatingResolveCallbackCalledTwice) {
#if DCHECK_IS_ON()
ManualPromiseResolver<void, void> promise_resolver(
FROM_HERE, RejectPolicy::kCatchNotRequired);
- RepeatingCallback<void(void)> resolve =
+ RepeatingCallback<void()> resolve =
promise_resolver.GetRepeatingResolveCallback();
resolve.Run();
@@ -1572,7 +1688,7 @@ TEST_F(PromiseTest, ManualPromiseResolverRepeatingRejectCallbackCalledTwice) {
#if DCHECK_IS_ON()
ManualPromiseResolver<void, void> promise_resolver(
FROM_HERE, RejectPolicy::kCatchNotRequired);
- RepeatingCallback<void(void)> resolve =
+ RepeatingCallback<void()> resolve =
promise_resolver.GetRepeatingRejectCallback();
resolve.Run();
@@ -1620,16 +1736,15 @@ TEST_F(MultiThreadedPromiseTest, SimpleThreadHopping) {
thread_c_->task_runner(), FROM_HERE, BindLambdaForTesting([&]() {
EXPECT_TRUE(thread_c_->task_runner()->RunsTasksInCurrentSequence());
}))
- .ThenHere(
- FROM_HERE, BindLambdaForTesting([&]() {
- EXPECT_FALSE(
- thread_a_->task_runner()->RunsTasksInCurrentSequence());
- EXPECT_FALSE(
- thread_b_->task_runner()->RunsTasksInCurrentSequence());
- EXPECT_FALSE(
- thread_c_->task_runner()->RunsTasksInCurrentSequence());
- run_loop.Quit();
- }));
+ .ThenHere(FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(
+ thread_a_->task_runner()->RunsTasksInCurrentSequence());
+ EXPECT_FALSE(
+ thread_b_->task_runner()->RunsTasksInCurrentSequence());
+ EXPECT_FALSE(
+ thread_c_->task_runner()->RunsTasksInCurrentSequence());
+ run_loop.Quit();
+ }));
promise_resolver.Resolve();
run_loop.Run();
@@ -1673,25 +1788,74 @@ TEST_F(MultiThreadedPromiseTest, CrossThreadThens) {
run_loop.Run();
}
+TEST_F(MultiThreadedPromiseTest, CrossThreadThensOrdering) {
+ constexpr int kNumThenTasks = 1000;
+ constexpr int kNumRepetitions = 25;
+ for (int repetition = 0; repetition < kNumRepetitions; ++repetition) {
+ RunLoop run_loop;
+
+ std::vector<int> order;
+ std::vector<OnceCallback<void()>> then_tasks;
+
+ for (int i = 0; i < kNumThenTasks; ++i) {
+ then_tasks.push_back(
+ BindOnce(BindLambdaForTesting([&order, &run_loop, i]() {
+ order.push_back(i);
+ if (i == (kNumThenTasks - 1)) {
+ run_loop.Quit();
+ }
+ })));
+ }
+
+ ManualPromiseResolver<void> promise_resolver(FROM_HERE);
+ auto resolve_callback = promise_resolver.GetResolveCallback();
+
+ thread_a_->task_runner()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ // Post 500 thens.
+ for (int i = 0; i < kNumThenTasks / 2; ++i) {
+ promise_resolver.promise().ThenOn(
+ thread_c_->task_runner(), FROM_HERE, std::move(then_tasks[i]));
+ }
+
+ // Post a task onto |thread_b| to resolve |promise_resolver|.
+ // This should run at an undefined time yet all the thens should run.
+ thread_b_->task_runner()->PostTask(FROM_HERE,
+ std::move(resolve_callback));
+
+ // Post another 500 thens.
+ for (int i = kNumThenTasks / 2; i < kNumThenTasks; ++i) {
+ promise_resolver.promise().ThenOn(
+ thread_c_->task_runner(), FROM_HERE, std::move(then_tasks[i]));
+ }
+ }));
+
+ run_loop.Run();
+ for (int i = 0; i < kNumThenTasks; ++i) {
+ EXPECT_EQ(order[i], i);
+ }
+ }
+}
+
TEST_F(PromiseTest, ThreadPoolThenChain) {
ManualPromiseResolver<std::vector<size_t>> p(FROM_HERE);
auto main_sequence = SequencedTaskRunnerHandle::Get();
RunLoop run_loop;
p.promise()
- .ThenOn({TaskPriority::USER_BLOCKING}, FROM_HERE,
+ .ThenOn({ThreadPool(), TaskPriority::USER_BLOCKING}, FROM_HERE,
BindLambdaForTesting([&](std::vector<size_t> result) {
EXPECT_FALSE(main_sequence->RunsTasksInCurrentSequence());
result.push_back(1);
return result;
}))
- .ThenOn({TaskPriority::USER_BLOCKING}, FROM_HERE,
+ .ThenOn({ThreadPool(), TaskPriority::USER_BLOCKING}, FROM_HERE,
BindLambdaForTesting([&](std::vector<size_t> result) {
EXPECT_FALSE(main_sequence->RunsTasksInCurrentSequence());
result.push_back(2);
return result;
}))
- .ThenOn({TaskPriority::USER_BLOCKING}, FROM_HERE,
+ .ThenOn({ThreadPool(), TaskPriority::USER_BLOCKING}, FROM_HERE,
BindLambdaForTesting([&](std::vector<size_t> result) {
EXPECT_FALSE(main_sequence->RunsTasksInCurrentSequence());
result.push_back(3);
@@ -1921,17 +2085,20 @@ TEST_F(PromiseTest, AllVoidContainer) {
promises.push_back(mpr4.promise());
RunLoop run_loop;
- Promises::All(FROM_HERE, promises)
- .ThenHere(FROM_HERE, BindLambdaForTesting([&](std::vector<Void> result) {
- EXPECT_EQ(4u, result.size());
- run_loop.Quit();
- }));
+ Promise<void> result =
+ Promises::All(FROM_HERE, promises)
+ .ThenHere(FROM_HERE,
+ BindLambdaForTesting([&]() { run_loop.Quit(); }));
mpr1.Resolve();
mpr2.Resolve();
mpr3.Resolve();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(result.IsResolvedForTesting());
+
mpr4.Resolve();
run_loop.Run();
+ EXPECT_TRUE(result.IsResolvedForTesting());
}
TEST_F(PromiseTest, AllVoidIntContainerReject) {
@@ -1948,7 +2115,7 @@ TEST_F(PromiseTest, AllVoidIntContainerReject) {
RunLoop run_loop;
Promises::All(FROM_HERE, promises)
- .ThenHere(FROM_HERE, BindLambdaForTesting([&](std::vector<Void> result) {
+ .ThenHere(FROM_HERE, BindLambdaForTesting([&]() {
FAIL() << "We shouldn't get here, the promise was rejected!";
run_loop.Quit();
}),
@@ -2037,4 +2204,64 @@ TEST_F(PromiseTest, AllVoidContainerMultipleRejectsAfterExecute) {
mpr4.Reject();
}
+TEST_F(PromiseTest, TakeResolveValueForTesting) {
+ ManualPromiseResolver<void> p1(FROM_HERE);
+
+ Promise<int> p2 =
+ p1.promise().ThenHere(FROM_HERE, BindOnce([]() { return 123; }));
+
+ p1.Resolve();
+
+ EXPECT_EQ(123, p2.TakeResolveValueForTesting());
+}
+
+TEST_F(PromiseTest, TakeResolveValueForTestingMoveOnlyType) {
+ ManualPromiseResolver<void> p1(FROM_HERE);
+
+ Promise<std::unique_ptr<int>> p2 = p1.promise().ThenHere(
+ FROM_HERE, BindOnce([]() { return std::make_unique<int>(123); }));
+
+ p1.Resolve();
+
+ EXPECT_EQ(123, *p2.TakeResolveValueForTesting());
+}
+
+TEST_F(PromiseTest, TakeResolveValueForTestingNotResolved) {
+ ManualPromiseResolver<int, int> p1(FROM_HERE,
+ RejectPolicy::kCatchNotRequired);
+
+ p1.Reject(123);
+
+ EXPECT_DCHECK_DEATH({ p1.promise().TakeResolveValueForTesting(); });
+}
+
+TEST_F(PromiseTest, TakeRejectedValueForTesting) {
+ ManualPromiseResolver<void, void> p1(FROM_HERE);
+
+ Promise<int, int> p2 = p1.promise().ThenHere(
+ FROM_HERE, BindOnce([]() { return Resolved<int>(123); }),
+ BindOnce([]() { return Rejected<int>(456); }));
+
+ p1.Reject();
+
+ EXPECT_EQ(456, p2.TakeRejectValueForTesting());
+}
+
+TEST_F(PromiseTest, TakeRejectedValueForTestingMoveOnlyType) {
+ ManualPromiseResolver<void, std::unique_ptr<int>> p1(FROM_HERE);
+
+ p1.Reject(std::make_unique<int>(456));
+
+ EXPECT_EQ(456, *p1.promise().TakeRejectValueForTesting());
+}
+
+TEST_F(PromiseTest, TakeRejectedValueForTestingNotRejected) {
+ ManualPromiseResolver<int, int> p1(FROM_HERE,
+ RejectPolicy::kCatchNotRequired);
+
+ p1.Resolve(123);
+
+ EXPECT_DCHECK_DEATH({ p1.promise().TakeRejectValueForTesting(); });
+}
+
} // namespace base
diff --git a/chromium/base/task/promise/promise_unittest.nc b/chromium/base/task/promise/promise_unittest.nc
index 261a87f20a2..dca01e02fb0 100644
--- a/chromium/base/task/promise/promise_unittest.nc
+++ b/chromium/base/task/promise/promise_unittest.nc
@@ -5,6 +5,7 @@
// This is a "No Compile Test" suite.
// http://dev.chromium.org/developers/testing/no-compile-tests
+#include "base/task_runner.h"
#include "base/task/promise/promise.h"
#include "base/task/promise/promise_result.h"
@@ -23,7 +24,7 @@ void WontCompile() {
#elif defined(NCTEST_METHOD_RESOLVE_CALLBACK_TYPE_MISSMATCH) // [r"fatal error: static_assert failed .*\"|on_resolve| callback must accept Promise::ResolveType or void\."]
void WontCompile() {
Promise<int, void> p;
- p.ThenHere(FROM_HERE, BindOnce([](bool) { }));
+ p.ThenHere(FROM_HERE, BindOnce([](std::string) { }));
}
#elif defined(NCTEST_METHOD_REJECT_CALLBACK_TYPE_MISSMATCH) // [r"fatal error: static_assert failed .*\"|on_reject| callback must accept Promise::ResolveType or void\."]
void WontCompile() {
diff --git a/chromium/base/task/promise/then_and_catch_executor.cc b/chromium/base/task/promise/then_and_catch_executor.cc
index 003eb574bd9..b30f29cff25 100644
--- a/chromium/base/task/promise/then_and_catch_executor.cc
+++ b/chromium/base/task/promise/then_and_catch_executor.cc
@@ -7,14 +7,6 @@
namespace base {
namespace internal {
-ThenAndCatchExecutorCommon::ThenAndCatchExecutorCommon(
- internal::CallbackBase&& resolve_executor,
- internal::CallbackBase&& reject_executor)
- : resolve_callback_(std::move(resolve_executor)),
- reject_callback_(std::move(reject_executor)) {}
-
-ThenAndCatchExecutorCommon::~ThenAndCatchExecutorCommon() = default;
-
bool ThenAndCatchExecutorCommon::IsCancelled() const {
if (!resolve_callback_.is_null()) {
// If there is both a resolve and a reject executor they must be canceled
@@ -26,9 +18,9 @@ bool ThenAndCatchExecutorCommon::IsCancelled() const {
return reject_callback_.IsCancelled();
}
-AbstractPromise::Executor::PrerequisitePolicy
+PromiseExecutor::PrerequisitePolicy
ThenAndCatchExecutorCommon::GetPrerequisitePolicy() const {
- return AbstractPromise::Executor::PrerequisitePolicy::kAll;
+ return PromiseExecutor::PrerequisitePolicy::kAll;
}
void ThenAndCatchExecutorCommon::Execute(AbstractPromise* promise,
diff --git a/chromium/base/task/promise/then_and_catch_executor.h b/chromium/base/task/promise/then_and_catch_executor.h
index 6cc7bb56b63..da9faf9f882 100644
--- a/chromium/base/task/promise/then_and_catch_executor.h
+++ b/chromium/base/task/promise/then_and_catch_executor.h
@@ -17,14 +17,18 @@ namespace internal {
// Exists to reduce template bloat.
class BASE_EXPORT ThenAndCatchExecutorCommon {
public:
- ThenAndCatchExecutorCommon(CallbackBase&& resolve_callback,
- CallbackBase&& reject_callback);
+ ThenAndCatchExecutorCommon(internal::CallbackBase&& resolve_executor,
+ internal::CallbackBase&& reject_executor) noexcept
+ : resolve_callback_(std::move(resolve_executor)),
+ reject_callback_(std::move(reject_executor)) {
+ DCHECK(!resolve_callback_.is_null() || !reject_callback_.is_null());
+ }
- ~ThenAndCatchExecutorCommon();
+ ~ThenAndCatchExecutorCommon() = default;
- // AbstractPromise::Executor:
+ // PromiseExecutor:
bool IsCancelled() const;
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const;
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const;
using ExecuteCallback = void (*)(AbstractPromise* prerequisite,
AbstractPromise* promise,
@@ -47,28 +51,6 @@ class BASE_EXPORT ThenAndCatchExecutorCommon {
// Tag signals no callback which is used to eliminate dead code.
struct NoCallback {};
-struct CouldResolveOrReject {};
-struct CanOnlyResolve {};
-struct CanOnlyReject {};
-
-template <bool can_resolve, bool can_reject>
-struct CheckResultHelper;
-
-template <>
-struct CheckResultHelper<true, false> {
- using TagType = CanOnlyResolve;
-};
-
-template <>
-struct CheckResultHelper<true, true> {
- using TagType = CouldResolveOrReject;
-};
-
-template <>
-struct CheckResultHelper<false, true> {
- using TagType = CanOnlyReject;
-};
-
template <typename ResolveOnceCallback,
typename RejectOnceCallback,
typename ArgResolve,
@@ -86,20 +68,13 @@ class ThenAndCatchExecutor {
using PrerequisiteCouldReject =
std::integral_constant<bool, !std::is_same<ArgReject, NoCallback>::value>;
- ThenAndCatchExecutor(ResolveOnceCallback&& resolve_callback,
- RejectOnceCallback&& reject_callback)
- : common_(std::move(resolve_callback), std::move(reject_callback)) {
- static_assert(sizeof(CallbackBase) == sizeof(ResolveOnceCallback),
- "We assume it's possible to cast from CallbackBase to "
- "ResolveOnceCallback");
- static_assert(sizeof(CallbackBase) == sizeof(RejectOnceCallback),
- "We assume it's possible to cast from CallbackBase to "
- "RejectOnceCallback");
- }
+ ThenAndCatchExecutor(CallbackBase&& resolve_callback,
+ CallbackBase&& reject_callback) noexcept
+ : common_(std::move(resolve_callback), std::move(reject_callback)) {}
bool IsCancelled() const { return common_.IsCancelled(); }
- AbstractPromise::Executor::PrerequisitePolicy GetPrerequisitePolicy() const {
+ PromiseExecutor::PrerequisitePolicy GetPrerequisitePolicy() const {
return common_.GetPrerequisitePolicy();
}
@@ -110,17 +85,15 @@ class ThenAndCatchExecutor {
}
#if DCHECK_IS_ON()
- AbstractPromise::Executor::ArgumentPassingType ResolveArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType ResolveArgumentPassingType() const {
return common_.resolve_callback_.is_null()
- ? AbstractPromise::Executor::ArgumentPassingType::kNoCallback
+ ? PromiseExecutor::ArgumentPassingType::kNoCallback
: CallbackTraits<ResolveOnceCallback>::argument_passing_type;
}
- AbstractPromise::Executor::ArgumentPassingType RejectArgumentPassingType()
- const {
+ PromiseExecutor::ArgumentPassingType RejectArgumentPassingType() const {
return common_.reject_callback_.is_null()
- ? AbstractPromise::Executor::ArgumentPassingType::kNoCallback
+ ? PromiseExecutor::ArgumentPassingType::kNoCallback
: CallbackTraits<RejectOnceCallback>::argument_passing_type;
}
@@ -164,9 +137,8 @@ class ThenAndCatchExecutor {
RejectStorage>::Run(std::move(*resolve_callback), prerequisite,
promise);
- using CheckResultTagType = typename CheckResultHelper<
- PromiseCallbackTraits<ResolveReturnT>::could_resolve,
- PromiseCallbackTraits<ResolveReturnT>::could_reject>::TagType;
+ using CheckResultTagType =
+ typename PromiseCallbackTraits<ResolveReturnT>::TagType;
CheckResultType(promise, CheckResultTagType());
}
@@ -188,9 +160,8 @@ class ThenAndCatchExecutor {
RejectStorage>::Run(std::move(*reject_callback), prerequisite,
promise);
- using CheckResultTagType = typename CheckResultHelper<
- PromiseCallbackTraits<RejectReturnT>::could_resolve,
- PromiseCallbackTraits<RejectReturnT>::could_reject>::TagType;
+ using CheckResultTagType =
+ typename PromiseCallbackTraits<RejectReturnT>::TagType;
CheckResultType(promise, CheckResultTagType());
}
diff --git a/chromium/base/task/sequence_manager/OWNERS b/chromium/base/task/sequence_manager/OWNERS
index 2ef3011d62b..b9ec8dfdc9b 100644
--- a/chromium/base/task/sequence_manager/OWNERS
+++ b/chromium/base/task/sequence_manager/OWNERS
@@ -1,5 +1,6 @@
altimin@chromium.org
alexclarke@chromium.org
+carlscab@google.com
skyostil@chromium.org
# TEAM: scheduler-dev@chromium.org
diff --git a/chromium/base/task/sequence_manager/enqueue_order.h b/chromium/base/task/sequence_manager/enqueue_order.h
index fac1d179b04..0fc491f156f 100644
--- a/chromium/base/task/sequence_manager/enqueue_order.h
+++ b/chromium/base/task/sequence_manager/enqueue_order.h
@@ -7,14 +7,12 @@
#include <stdint.h>
-#include <atomic>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-
namespace base {
namespace sequence_manager {
+
namespace internal {
+class EnqueueOrderGenerator;
+}
// 64-bit number which is used to order tasks.
// SequenceManager assumes this number will never overflow.
@@ -34,25 +32,11 @@ class EnqueueOrder {
return EnqueueOrder(value);
}
- // EnqueueOrder can't be created from a raw number in non-test code.
- // Generator is used to create it with strictly monotonic guarantee.
- class BASE_EXPORT Generator {
- public:
- Generator();
- ~Generator();
-
- // Can be called from any thread.
- EnqueueOrder GenerateNext() {
- return EnqueueOrder(std::atomic_fetch_add_explicit(
- &counter_, uint64_t(1), std::memory_order_relaxed));
- }
-
- private:
- std::atomic<uint64_t> counter_;
- DISALLOW_COPY_AND_ASSIGN(Generator);
- };
-
private:
+ // EnqueueOrderGenerator is the only class allowed to create an EnqueueOrder
+ // with a non-default constructor.
+ friend class internal::EnqueueOrderGenerator;
+
explicit EnqueueOrder(uint64_t value) : value_(value) {}
enum SpecialValues : uint64_t {
@@ -64,7 +48,6 @@ class EnqueueOrder {
uint64_t value_;
};
-} // namespace internal
} // namespace sequence_manager
} // namespace base
diff --git a/chromium/base/task/sequence_manager/enqueue_order.cc b/chromium/base/task/sequence_manager/enqueue_order_generator.cc
index 066ef0382ee..50d37a3cf07 100644
--- a/chromium/base/task/sequence_manager/enqueue_order.cc
+++ b/chromium/base/task/sequence_manager/enqueue_order_generator.cc
@@ -2,15 +2,16 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/enqueue_order_generator.h"
namespace base {
namespace sequence_manager {
namespace internal {
-EnqueueOrder::Generator::Generator() : counter_(kFirst) {}
+EnqueueOrderGenerator::EnqueueOrderGenerator()
+ : counter_(EnqueueOrder::kFirst) {}
-EnqueueOrder::Generator::~Generator() = default;
+EnqueueOrderGenerator::~EnqueueOrderGenerator() = default;
} // namespace internal
} // namespace sequence_manager
diff --git a/chromium/base/task/sequence_manager/enqueue_order_generator.h b/chromium/base/task/sequence_manager/enqueue_order_generator.h
new file mode 100644
index 00000000000..36d19e2b156
--- /dev/null
+++ b/chromium/base/task/sequence_manager/enqueue_order_generator.h
@@ -0,0 +1,43 @@
+
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
+#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
+
+#include <stdint.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task/sequence_manager/enqueue_order.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// EnqueueOrder can't be created from a raw number in non-test code.
+// EnqueueOrderGenerator is used to create it with strictly monotonic guarantee.
+class BASE_EXPORT EnqueueOrderGenerator {
+ public:
+ EnqueueOrderGenerator();
+ ~EnqueueOrderGenerator();
+
+ // Can be called from any thread.
+ EnqueueOrder GenerateNext() {
+ return EnqueueOrder(std::atomic_fetch_add_explicit(
+ &counter_, uint64_t(1), std::memory_order_relaxed));
+ }
+
+ private:
+ std::atomic<uint64_t> counter_;
+ DISALLOW_COPY_AND_ASSIGN(EnqueueOrderGenerator);
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
diff --git a/chromium/base/task/sequence_manager/sequence_manager.cc b/chromium/base/task/sequence_manager/sequence_manager.cc
index b4fb7db8c27..a7fc2ff3f80 100644
--- a/chromium/base/task/sequence_manager/sequence_manager.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager.cc
@@ -7,6 +7,8 @@
namespace base {
namespace sequence_manager {
+NativeWorkHandle::~NativeWorkHandle() = default;
+
SequenceManager::MetricRecordingSettings::MetricRecordingSettings(
double task_thread_time_sampling_rate)
: task_sampling_rate_for_recording_cpu_time(
@@ -48,6 +50,14 @@ SequenceManager::Settings::Builder::SetAddQueueTimeToTasks(
return *this;
}
+SequenceManager::Settings::Builder&
+SequenceManager::Settings::Builder::SetAntiStarvationLogicForPrioritiesDisabled(
+ bool anti_starvation_logic_for_priorities_disabled_val) {
+ settings_.anti_starvation_logic_for_priorities_disabled =
+ anti_starvation_logic_for_priorities_disabled_val;
+ return *this;
+}
+
#if DCHECK_IS_ON()
SequenceManager::Settings::Builder&
diff --git a/chromium/base/task/sequence_manager/sequence_manager.h b/chromium/base/task/sequence_manager/sequence_manager.h
index bdb8d5997e3..fa198528396 100644
--- a/chromium/base/task/sequence_manager/sequence_manager.h
+++ b/chromium/base/task/sequence_manager/sequence_manager.h
@@ -21,6 +21,18 @@ namespace sequence_manager {
class TimeDomain;
+// Represent outstanding work the sequence underlying a SequenceManager (e.g.,
+// a native system task for drawing the UI). As long as this handle is alive,
+// the work is considered to be pending.
+class NativeWorkHandle {
+ public:
+ virtual ~NativeWorkHandle();
+ NativeWorkHandle(const NativeWorkHandle&) = delete;
+
+ protected:
+ NativeWorkHandle() = default;
+};
+
// SequenceManager manages TaskQueues which have different properties
// (e.g. priority, common task type) multiplexing all posted tasks into
// a single backing sequence (currently bound to a single thread, which is
@@ -75,6 +87,12 @@ class BASE_EXPORT SequenceManager {
// If true, add the timestamp the task got queued to the task.
bool add_queue_time_to_tasks = false;
+ // If true, the scheduler will bypass the priority-based anti-starvation
+ // logic that prevents indefinite starvation of lower priority tasks in the
+ // presence of higher priority tasks by occasionally selecting lower
+ // priority task queues over higher priority task queues.
+ bool anti_starvation_logic_for_priorities_disabled = false;
+
#if DCHECK_IS_ON()
// TODO(alexclarke): Consider adding command line flags to control these.
enum class TaskLogging {
@@ -206,6 +224,17 @@ class BASE_EXPORT SequenceManager {
// Returns a JSON string which describes all pending tasks.
virtual std::string DescribeAllPendingTasks() const = 0;
+ // Indicates that the underlying sequence (e.g., the message pump) has pending
+ // work at priority |priority|. If the priority of the work in this
+ // SequenceManager is lower, it will yield to let the native work run. The
+ // native work is assumed to remain pending while the returned handle is
+ // valid.
+ //
+ // Must be called on the main thread, and the returned handle must also be
+ // deleted on the main thread.
+ virtual std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
+ TaskQueue::QueuePriority priority) = 0;
+
protected:
virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
const TaskQueue::Spec& spec) = 0;
@@ -227,6 +256,18 @@ class BASE_EXPORT SequenceManager::Settings::Builder {
// Whether or not queueing timestamp will be added to tasks.
Builder& SetAddQueueTimeToTasks(bool add_queue_time_to_tasks);
+ // Sets whether priority-based anti-starvation logic is disabled. By default,
+ // the scheduler uses priority-based anti-starvation logic that prevents
+ // indefinite starvation of lower priority tasks in the presence of higher
+ // priority tasks by occasionally selecting lower priority task queues over
+ // higher priority task queues.
+ //
+ // Note: this does not affect the anti-starvation logic that is in place for
+ // preventing delayed tasks from starving immediate tasks, which is always
+ // enabled.
+ Builder& SetAntiStarvationLogicForPrioritiesDisabled(
+ bool anti_starvation_logic_for_priorities_disabled);
+
#if DCHECK_IS_ON()
// Controls task execution logging.
Builder& SetTaskLogging(TaskLogging task_execution_logging);
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.cc b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
index 55d82917a4e..6bfe2aa4e5c 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
@@ -140,6 +140,40 @@ char* PrependHexAddress(char* output, const void* address) {
} // namespace
+class SequenceManagerImpl::NativeWorkHandleImpl : public NativeWorkHandle {
+ public:
+ NativeWorkHandleImpl(SequenceManagerImpl* sequence_manager,
+ TaskQueue::QueuePriority priority)
+ : sequence_manager_(sequence_manager->GetWeakPtr()), priority_(priority) {
+ TRACE_EVENT_ASYNC_BEGIN1("sequence_manager", "NativeWork", this, "priority",
+ TaskQueue::PriorityToString(priority_));
+ sequence_manager_->main_thread_only().pending_native_work.insert(priority_);
+ }
+
+ ~NativeWorkHandleImpl() final {
+ TRACE_EVENT_ASYNC_END0("sequence_manager", "NativeWork", this);
+ if (!sequence_manager_)
+ return;
+ TaskQueue::QueuePriority prev_priority = effective_priority();
+ sequence_manager_->main_thread_only().pending_native_work.erase(priority_);
+ // We should always have at least one instance of pending native work. By
+ // default it is of the lowest priority, which doesn't cause SequenceManager
+ // to yield.
+ DCHECK_GE(sequence_manager_->main_thread_only().pending_native_work.size(),
+ 1u);
+ if (prev_priority != effective_priority())
+ sequence_manager_->ScheduleWork();
+ }
+
+ TaskQueue::QueuePriority effective_priority() const {
+ return *sequence_manager_->main_thread_only().pending_native_work.begin();
+ }
+
+ private:
+ WeakPtr<SequenceManagerImpl> sequence_manager_;
+ const TaskQueue::QueuePriority priority_;
+};
+
// static
SequenceManagerImpl* SequenceManagerImpl::GetCurrent() {
return GetTLSSequenceManagerImpl()->Get();
@@ -157,8 +191,7 @@ SequenceManagerImpl::SequenceManagerImpl(
empty_queues_to_reload_(associated_thread_),
memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
- main_thread_only_(associated_thread_, settings_),
- weak_factory_(this) {
+ main_thread_only_(associated_thread_, settings_) {
TRACE_EVENT_OBJECT_CREATED_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("sequence_manager"), "SequenceManager", this);
main_thread_only().selector.SetTaskQueueSelectorObserver(this);
@@ -455,8 +488,8 @@ const char* RunTaskTraceNameForPriority(TaskQueue::QueuePriority priority) {
} // namespace
-Optional<PendingTask> SequenceManagerImpl::TakeTask() {
- Optional<PendingTask> task = TakeTaskImpl();
+Optional<Task> SequenceManagerImpl::TakeTask() {
+ Optional<Task> task = TakeTaskImpl();
if (!task)
return base::nullopt;
@@ -520,7 +553,7 @@ void SequenceManagerImpl::LogTaskDebugInfo(
}
#endif // DCHECK_IS_ON() && !defined(OS_NACL)
-Optional<PendingTask> SequenceManagerImpl::TakeTaskImpl() {
+Optional<Task> SequenceManagerImpl::TakeTaskImpl() {
CHECK(Validate());
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
@@ -549,11 +582,12 @@ Optional<PendingTask> SequenceManagerImpl::TakeTaskImpl() {
return nullopt;
// If the head task was canceled, remove it and run the selector again.
- if (work_queue->RemoveAllCanceledTasksFromFront())
+ if (UNLIKELY(work_queue->RemoveAllCanceledTasksFromFront()))
continue;
- if (work_queue->GetFrontTask()->nestable == Nestable::kNonNestable &&
- main_thread_only().nesting_depth > 0) {
+ if (UNLIKELY(work_queue->GetFrontTask()->nestable ==
+ Nestable::kNonNestable &&
+ main_thread_only().nesting_depth > 0)) {
// Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
// the additional delay should not be a problem.
// Note because we don't delete queues while nested, it's perfectly OK to
@@ -566,6 +600,13 @@ Optional<PendingTask> SequenceManagerImpl::TakeTaskImpl() {
continue;
}
+ if (UNLIKELY(!ShouldRunTaskOfPriority(
+ work_queue->task_queue()->GetQueuePriority()))) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "SequenceManager.YieldToNative");
+ return nullopt;
+ }
+
main_thread_only().task_execution_stack.emplace_back(
work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue(),
InitializeTaskTiming(work_queue->task_queue()));
@@ -578,6 +619,11 @@ Optional<PendingTask> SequenceManagerImpl::TakeTaskImpl() {
}
}
+bool SequenceManagerImpl::ShouldRunTaskOfPriority(
+ TaskQueue::QueuePriority priority) const {
+ return priority <= *main_thread_only().pending_native_work.begin();
+}
+
void SequenceManagerImpl::DidRunTask() {
LazyNow lazy_now(controller_->GetClock());
ExecutingTask& executing_task =
@@ -597,22 +643,36 @@ void SequenceManagerImpl::DidRunTask() {
TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- // If the selector has non-empty queues we trivially know there is immediate
- // work to be done.
- if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
+ if (auto priority = main_thread_only().selector.GetHighestPendingPriority()) {
+ // If the selector has non-empty queues we trivially know there is immediate
+ // work to be done. However we may want to yield to native work if it is
+ // more important.
+ if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
+ return GetDelayTillNextDelayedTask(lazy_now);
return TimeDelta();
+ }
// There may be some incoming immediate work which we haven't accounted for.
// NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
// do this always.
ReloadEmptyWorkQueues();
- if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
+ if (auto priority = main_thread_only().selector.GetHighestPendingPriority()) {
+ if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
+ return GetDelayTillNextDelayedTask(lazy_now);
return TimeDelta();
+ }
// Otherwise we need to find the shortest delay, if any. NB we don't need to
// call MoveReadyDelayedTasksToWorkQueues because it's assumed
// DelayTillNextTask will return TimeDelta>() if the delayed task is due to
// run now.
+ return GetDelayTillNextDelayedTask(lazy_now);
+}
+
+TimeDelta SequenceManagerImpl::GetDelayTillNextDelayedTask(
+ LazyNow* lazy_now) const {
+ DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+
TimeDelta delay_till_next_task = TimeDelta::Max();
for (TimeDomain* time_domain : main_thread_only().time_domains) {
Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
@@ -818,7 +878,7 @@ bool SequenceManagerImpl::GetAndClearSystemIsQuiescentBit() {
return !task_was_run;
}
-internal::EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
+EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
return enqueue_order_generator_.GenerateNext();
}
@@ -850,6 +910,9 @@ SequenceManagerImpl::AsValueWithSelectorResult(
selected_work_queue->task_queue()->GetName());
state->SetString("work_queue_name", selected_work_queue->name());
}
+ state->SetString("native_work_priority",
+ TaskQueue::PriorityToString(
+ *main_thread_only().pending_native_work.begin()));
state->BeginArray("time_domains");
for (auto* time_domain : main_thread_only().time_domains)
@@ -993,7 +1056,7 @@ void SequenceManagerImpl::AttachToMessagePump() {
bool SequenceManagerImpl::IsIdleForTesting() {
ReloadEmptyWorkQueues();
RemoveAllCanceledTasksFromFrontOfWorkQueues();
- return main_thread_only().selector.AllEnabledWorkQueuesAreEmpty();
+ return !main_thread_only().selector.GetHighestPendingPriority().has_value();
}
size_t SequenceManagerImpl::GetPendingTaskCountForTesting() const {
@@ -1014,6 +1077,11 @@ std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
->ToString();
}
+std::unique_ptr<NativeWorkHandle> SequenceManagerImpl::OnNativeWorkPending(
+ TaskQueue::QueuePriority priority) {
+ return std::make_unique<NativeWorkHandleImpl>(this, priority);
+}
+
void SequenceManagerImpl::AddDestructionObserver(
MessageLoopCurrent::DestructionObserver* destruction_observer) {
main_thread_only().destruction_observers.AddObserver(destruction_observer);
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.h b/chromium/base/task/sequence_manager/sequence_manager_impl.h
index eb80e6a0792..b70d1282051 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl.h
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.h
@@ -29,6 +29,7 @@
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/enqueue_order.h"
+#include "base/task/sequence_manager/enqueue_order_generator.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/task_queue_selector.h"
@@ -120,9 +121,11 @@ class BASE_EXPORT SequenceManagerImpl
scoped_refptr<TaskQueue> CreateTaskQueue(
const TaskQueue::Spec& spec) override;
std::string DescribeAllPendingTasks() const override;
+ std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
+ TaskQueue::QueuePriority priority) override;
// SequencedTaskSource implementation:
- Optional<PendingTask> TakeTask() override;
+ Optional<Task> TakeTask() override;
void DidRunTask() override;
TimeDelta DelayTillNextTask(LazyNow* lazy_now) const override;
bool HasPendingHighResolutionTasks() override;
@@ -203,6 +206,8 @@ class BASE_EXPORT SequenceManagerImpl
friend class ::base::sequence_manager::SequenceManagerForTest;
private:
+ class NativeWorkHandleImpl;
+
// Returns the SequenceManager running the
// current thread. It must only be used on the thread it was obtained.
// Only to be used by MessageLoopCurrent for the moment
@@ -304,6 +309,10 @@ class BASE_EXPORT SequenceManagerImpl
ObserverList<MessageLoopCurrent::DestructionObserver>::Unchecked
destruction_observers;
+
+ // By default native work is not prioritized at all.
+ std::multiset<TaskQueue::QueuePriority> pending_native_work{
+ TaskQueue::kBestEffortPriority};
};
void CompleteInitializationOnBoundThread();
@@ -327,7 +336,7 @@ class BASE_EXPORT SequenceManagerImpl
void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
void NotifyDidProcessTask(ExecutingTask* task, LazyNow* time_after_task);
- internal::EnqueueOrder GetNextSequenceNumber();
+ EnqueueOrder GetNextSequenceNumber();
bool GetAddQueueTimeToTasks();
@@ -366,7 +375,14 @@ class BASE_EXPORT SequenceManagerImpl
// Helper to terminate all scoped trace events to allow starting new ones
// in TakeTask().
- Optional<PendingTask> TakeTaskImpl();
+ Optional<Task> TakeTaskImpl();
+
+ // Check if a task of priority |priority| should run given the pending set of
+ // native work.
+ bool ShouldRunTaskOfPriority(TaskQueue::QueuePriority priority) const;
+
+ // Ignores any immediate work.
+ TimeDelta GetDelayTillNextDelayedTask(LazyNow* lazy_now) const;
#if DCHECK_IS_ON()
void LogTaskDebugInfo(const ExecutingTask& executing_task);
@@ -379,7 +395,7 @@ class BASE_EXPORT SequenceManagerImpl
scoped_refptr<AssociatedThreadId> associated_thread_;
- internal::EnqueueOrder::Generator enqueue_order_generator_;
+ EnqueueOrderGenerator enqueue_order_generator_;
const std::unique_ptr<internal::ThreadController> controller_;
const Settings settings_;
@@ -407,7 +423,7 @@ class BASE_EXPORT SequenceManagerImpl
return main_thread_only_;
}
- WeakPtrFactory<SequenceManagerImpl> weak_factory_;
+ WeakPtrFactory<SequenceManagerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SequenceManagerImpl);
};
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
index 22dedcb2c4d..f5103644d8b 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
@@ -10,6 +10,7 @@
#include "base/auto_reset.h"
#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/cancelable_callback.h"
@@ -53,7 +54,7 @@
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
-using base::sequence_manager::internal::EnqueueOrder;
+using base::sequence_manager::EnqueueOrder;
using testing::_;
using testing::AnyNumber;
using testing::Contains;
@@ -72,12 +73,19 @@ namespace internal {
// To avoid symbol collisions in jumbo builds.
namespace sequence_manager_impl_unittest {
+constexpr TimeDelta kDelay = TimeDelta::FromSeconds(42);
+
enum class TestType {
kMockTaskRunner,
kMessageLoop,
kMessagePump,
};
+enum class AntiStarvationLogic {
+ kEnabled,
+ kDisabled,
+};
+
std::string ToString(TestType type) {
switch (type) {
case TestType::kMockTaskRunner:
@@ -89,8 +97,21 @@ std::string ToString(TestType type) {
}
}
-std::string GetTestNameSuffix(const testing::TestParamInfo<TestType>& info) {
- return StrCat({"With", ToString(info.param).substr(1)});
+std::string ToString(AntiStarvationLogic type) {
+ switch (type) {
+ case AntiStarvationLogic::kEnabled:
+ return "AntiStarvationLogicEnabled";
+ case AntiStarvationLogic::kDisabled:
+ return "AntiStarvationLogicDisabled";
+ }
+}
+
+using SequenceManagerTestParams = std::pair<TestType, AntiStarvationLogic>;
+
+std::string GetTestNameSuffix(
+ const testing::TestParamInfo<SequenceManagerTestParams>& info) {
+ return StrCat({"With", ToString(info.param.first).substr(1), "And",
+ ToString(info.param.second)});
}
void PrintTo(const TestType type, std::ostream* os) {
@@ -147,6 +168,9 @@ class CallCountingTickClock : public TickClock {
class FixtureWithMockTaskRunner final : public Fixture {
public:
FixtureWithMockTaskRunner()
+ : FixtureWithMockTaskRunner(AntiStarvationLogic::kEnabled) {}
+
+ explicit FixtureWithMockTaskRunner(AntiStarvationLogic anti_starvation_logic)
: test_task_runner_(MakeRefCounted<TestMockTimeTaskRunner>(
TestMockTimeTaskRunner::Type::kBoundToThread)),
call_counting_clock_(BindRepeating(&TestMockTimeTaskRunner::NowTicks,
@@ -159,6 +183,8 @@ class FixtureWithMockTaskRunner final : public Fixture {
.SetMessagePumpType(MessagePump::Type::DEFAULT)
.SetRandomisedSamplingEnabled(false)
.SetTickClock(mock_tick_clock())
+ .SetAntiStarvationLogicForPrioritiesDisabled(
+ anti_starvation_logic == AntiStarvationLogic::kDisabled)
.Build())) {
// A null clock triggers some assertions.
AdvanceMockTickClock(TimeDelta::FromMilliseconds(1));
@@ -220,17 +246,21 @@ class FixtureWithMockTaskRunner final : public Fixture {
class FixtureWithMockMessagePump : public Fixture {
public:
- FixtureWithMockMessagePump() : call_counting_clock_(&mock_clock_) {
+ explicit FixtureWithMockMessagePump(AntiStarvationLogic anti_starvation_logic)
+ : call_counting_clock_(&mock_clock_) {
// A null clock triggers some assertions.
mock_clock_.Advance(TimeDelta::FromMilliseconds(1));
auto pump = std::make_unique<MockTimeMessagePump>(&mock_clock_);
pump_ = pump.get();
- auto settings = SequenceManager::Settings::Builder()
- .SetMessagePumpType(MessagePump::Type::DEFAULT)
- .SetRandomisedSamplingEnabled(false)
- .SetTickClock(mock_tick_clock())
- .Build();
+ auto settings =
+ SequenceManager::Settings::Builder()
+ .SetMessagePumpType(MessagePump::Type::DEFAULT)
+ .SetRandomisedSamplingEnabled(false)
+ .SetTickClock(mock_tick_clock())
+ .SetAntiStarvationLogicForPrioritiesDisabled(
+ anti_starvation_logic == AntiStarvationLogic::kDisabled)
+ .Build();
sequence_manager_ = SequenceManagerForTest::Create(
std::make_unique<ThreadControllerWithMessagePumpImpl>(std::move(pump),
settings),
@@ -299,7 +329,7 @@ class FixtureWithMockMessagePump : public Fixture {
class FixtureWithMessageLoop : public Fixture {
public:
- FixtureWithMessageLoop()
+ explicit FixtureWithMessageLoop(AntiStarvationLogic anti_starvation_logic)
: call_counting_clock_(&mock_clock_),
auto_reset_global_clock_(&global_clock_, &call_counting_clock_) {
// A null clock triggers some assertions.
@@ -317,6 +347,8 @@ class FixtureWithMessageLoop : public Fixture {
.SetMessagePumpType(MessagePump::Type::DEFAULT)
.SetRandomisedSamplingEnabled(false)
.SetTickClock(mock_tick_clock())
+ .SetAntiStarvationLogicForPrioritiesDisabled(
+ anti_starvation_logic == AntiStarvationLogic::kDisabled)
.Build());
// The SequenceManager constructor calls Now() once for setting up
@@ -391,19 +423,24 @@ TickClock* FixtureWithMessageLoop::global_clock_;
// Convenience wrapper around the fixtures so that we can use parametrized tests
// instead of templated ones. The latter would be more verbose as all method
// calls to the fixture would need to be like this->method()
-class SequenceManagerTest : public testing::TestWithParam<TestType>,
- public Fixture {
+class SequenceManagerTest
+ : public testing::TestWithParam<SequenceManagerTestParams>,
+ public Fixture {
public:
SequenceManagerTest() {
- switch (GetParam()) {
+ AntiStarvationLogic anti_starvation_logic = GetAntiStarvationLogicType();
+ switch (GetUnderlyingRunnerType()) {
case TestType::kMockTaskRunner:
- fixture_ = std::make_unique<FixtureWithMockTaskRunner>();
+ fixture_ =
+ std::make_unique<FixtureWithMockTaskRunner>(anti_starvation_logic);
break;
case TestType::kMessagePump:
- fixture_ = std::make_unique<FixtureWithMockMessagePump>();
+ fixture_ =
+ std::make_unique<FixtureWithMockMessagePump>(anti_starvation_logic);
break;
case TestType::kMessageLoop:
- fixture_ = std::make_unique<FixtureWithMessageLoop>();
+ fixture_ =
+ std::make_unique<FixtureWithMessageLoop>(anti_starvation_logic);
break;
default:
NOTREACHED();
@@ -474,16 +511,27 @@ class SequenceManagerTest : public testing::TestWithParam<TestType>,
return fixture_->GetNowTicksCallCount();
}
+ TestType GetUnderlyingRunnerType() { return GetParam().first; }
+
+ AntiStarvationLogic GetAntiStarvationLogicType() { return GetParam().second; }
+
private:
std::unique_ptr<Fixture> fixture_;
};
-INSTANTIATE_TEST_SUITE_P(,
- SequenceManagerTest,
- testing::Values(TestType::kMockTaskRunner,
- TestType::kMessageLoop,
- TestType::kMessagePump),
- GetTestNameSuffix);
+INSTANTIATE_TEST_SUITE_P(
+ ,
+ SequenceManagerTest,
+ testing::Values(
+ std::make_pair(TestType::kMockTaskRunner,
+ AntiStarvationLogic::kEnabled),
+ std::make_pair(TestType::kMockTaskRunner,
+ AntiStarvationLogic::kDisabled),
+ std::make_pair(TestType::kMessageLoop, AntiStarvationLogic::kEnabled),
+ std::make_pair(TestType::kMessageLoop, AntiStarvationLogic::kDisabled),
+ std::make_pair(TestType::kMessagePump, AntiStarvationLogic::kEnabled),
+ std::make_pair(TestType::kMessagePump, AntiStarvationLogic::kDisabled)),
+ GetTestNameSuffix);
void PostFromNestedRunloop(scoped_refptr<TestTaskQueue> runner,
std::vector<std::pair<OnceClosure, bool>>* tasks) {
@@ -655,7 +703,7 @@ TEST_P(SequenceManagerTest, NonNestableTaskExecutesInExpectedOrder) {
}
TEST_P(SequenceManagerTest, NonNestableTasksDoesntExecuteInNestedLoop) {
- if (GetParam() == TestType::kMockTaskRunner)
+ if (GetUnderlyingRunnerType() == TestType::kMockTaskRunner)
return;
auto queue = CreateTaskQueue();
@@ -701,7 +749,7 @@ void InsertFenceAndPostTestTask(int id,
} // namespace
TEST_P(SequenceManagerTest, TaskQueueDisabledFromNestedLoop) {
- if (GetParam() == TestType::kMockTaskRunner)
+ if (GetUnderlyingRunnerType() == TestType::kMockTaskRunner)
return;
auto queue = CreateTaskQueue();
std::vector<EnqueueOrder> run_order;
@@ -1445,7 +1493,7 @@ TEST_P(SequenceManagerTest, NoTasksAfterShutdown) {
DestroySequenceManager();
queue->task_runner()->PostTask(FROM_HERE, counter.WrapCallback(task.Get()));
- if (GetParam() != TestType::kMessagePump) {
+ if (GetUnderlyingRunnerType() != TestType::kMessagePump) {
RunLoop().RunUntilIdle();
}
@@ -1897,7 +1945,7 @@ void PostAndQuitFromNestedRunloop(RunLoop* run_loop,
}
TEST_P(SequenceManagerTest, QuitWhileNested) {
- if (GetParam() == TestType::kMockTaskRunner)
+ if (GetUnderlyingRunnerType() == TestType::kMockTaskRunner)
return;
// This test makes sure we don't continue running a work batch after a nested
// run loop has been exited in the middle of the batch.
@@ -2282,7 +2330,8 @@ class MockTaskQueueObserver : public TaskQueue::Observer {
public:
~MockTaskQueueObserver() override = default;
- MOCK_METHOD2(OnQueueNextWakeUpChanged, void(TaskQueue*, TimeTicks));
+ MOCK_METHOD2(OnPostTask, void(Location, TimeDelta));
+ MOCK_METHOD1(OnQueueNextWakeUpChanged, void(TimeTicks));
};
} // namespace
@@ -2293,14 +2342,17 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_ImmediateTask) {
MockTaskQueueObserver observer;
queue->SetObserver(&observer);
- // We should get a notification when a task is posted on an empty queue.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(queue.get(), _));
+ // We should get a OnQueueNextWakeUpChanged notification when a task is posted
+ // on an empty queue.
+ EXPECT_CALL(observer, OnPostTask(_, TimeDelta()));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_));
queue->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
sequence_manager()->ReloadEmptyWorkQueues();
Mock::VerifyAndClearExpectations(&observer);
// But not subsequently.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer, OnPostTask(_, TimeDelta()));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
queue->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
sequence_manager()->ReloadEmptyWorkQueues();
Mock::VerifyAndClearExpectations(&observer);
@@ -2310,7 +2362,8 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_ImmediateTask) {
sequence_manager()->DidRunTask();
sequence_manager()->TakeTask();
sequence_manager()->DidRunTask();
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(queue.get(), _));
+ EXPECT_CALL(observer, OnPostTask(_, TimeDelta()));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_));
queue->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
sequence_manager()->ReloadEmptyWorkQueues();
Mock::VerifyAndClearExpectations(&observer);
@@ -2330,23 +2383,25 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTask) {
MockTaskQueueObserver observer;
queue->SetObserver(&observer);
- // We should get a notification when a delayed task is posted on an empty
- // queue.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queue.get(), start_time + delay10s));
+ // We should get OnQueueNextWakeUpChanged notification when a delayed task is
+ // is posted on an empty queue.
+ EXPECT_CALL(observer, OnPostTask(_, delay10s));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(start_time + delay10s));
queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
delay10s);
Mock::VerifyAndClearExpectations(&observer);
- // We should not get a notification for a longer delay.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ // We should not get an OnQueueNextWakeUpChanged notification for a longer
+ // delay.
+ EXPECT_CALL(observer, OnPostTask(_, delay100s));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
delay100s);
Mock::VerifyAndClearExpectations(&observer);
- // We should get a notification for a shorter delay.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queue.get(), start_time + delay1s));
+ // We should get an OnQueueNextWakeUpChanged notification for a shorter delay.
+ EXPECT_CALL(observer, OnPostTask(_, delay1s));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(start_time + delay1s));
queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
Mock::VerifyAndClearExpectations(&observer);
@@ -2357,8 +2412,8 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTask) {
// When a queue has been enabled, we may get a notification if the
// TimeDomain's next scheduled wake-up has changed.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queue.get(), start_time + delay1s));
+ EXPECT_CALL(observer, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(start_time + delay1s));
voter->SetVoteToEnable(true);
Mock::VerifyAndClearExpectations(&observer);
@@ -2369,25 +2424,27 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTask) {
TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTaskMultipleQueues) {
auto queues = CreateTaskQueues(2u);
- MockTaskQueueObserver observer;
- queues[0]->SetObserver(&observer);
- queues[1]->SetObserver(&observer);
+ MockTaskQueueObserver observer0;
+ MockTaskQueueObserver observer1;
+ queues[0]->SetObserver(&observer0);
+ queues[1]->SetObserver(&observer1);
TimeTicks start_time = sequence_manager()->NowTicks();
TimeDelta delay1s(TimeDelta::FromSeconds(1));
TimeDelta delay10s(TimeDelta::FromSeconds(10));
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queues[0].get(), start_time + delay1s))
+ EXPECT_CALL(observer0, OnPostTask(_, delay1s));
+ EXPECT_CALL(observer0, OnQueueNextWakeUpChanged(start_time + delay1s))
.Times(1);
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queues[1].get(), start_time + delay10s))
+ EXPECT_CALL(observer1, OnPostTask(_, delay10s));
+ EXPECT_CALL(observer1, OnQueueNextWakeUpChanged(start_time + delay10s))
.Times(1);
queues[0]->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
delay1s);
queues[1]->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
delay10s);
- testing::Mock::VerifyAndClearExpectations(&observer);
+ testing::Mock::VerifyAndClearExpectations(&observer0);
+ testing::Mock::VerifyAndClearExpectations(&observer1);
std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
queues[0]->CreateQueueEnabledVoter();
@@ -2395,29 +2452,33 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedTaskMultipleQueues) {
queues[1]->CreateQueueEnabledVoter();
// Disabling a queue should not trigger a notification.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer0, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer0, OnQueueNextWakeUpChanged(_)).Times(0);
voter0->SetVoteToEnable(false);
- Mock::VerifyAndClearExpectations(&observer);
+ Mock::VerifyAndClearExpectations(&observer0);
- // Re-enabling it should should also trigger a notification.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queues[0].get(), start_time + delay1s));
+ // But re-enabling it should should trigger an OnQueueNextWakeUpChanged
+ // notification.
+ EXPECT_CALL(observer0, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer0, OnQueueNextWakeUpChanged(start_time + delay1s));
voter0->SetVoteToEnable(true);
- Mock::VerifyAndClearExpectations(&observer);
+ Mock::VerifyAndClearExpectations(&observer0);
// Disabling a queue should not trigger a notification.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer1, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer1, OnQueueNextWakeUpChanged(_)).Times(0);
voter1->SetVoteToEnable(false);
- Mock::VerifyAndClearExpectations(&observer);
+ Mock::VerifyAndClearExpectations(&observer0);
- // Re-enabling it should should trigger a notification.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queues[1].get(), start_time + delay10s));
+ // But re-enabling it should should trigger a notification.
+ EXPECT_CALL(observer1, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer1, OnQueueNextWakeUpChanged(start_time + delay10s));
voter1->SetVoteToEnable(true);
- Mock::VerifyAndClearExpectations(&observer);
+ Mock::VerifyAndClearExpectations(&observer1);
// Tidy up.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(AnyNumber());
+ EXPECT_CALL(observer0, OnQueueNextWakeUpChanged(_)).Times(AnyNumber());
+ EXPECT_CALL(observer1, OnQueueNextWakeUpChanged(_)).Times(AnyNumber());
queues[0]->ShutdownTaskQueue();
queues[1]->ShutdownTaskQueue();
}
@@ -2440,7 +2501,8 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
// We should get a notification when a delayed task is posted on an empty
// queue.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ EXPECT_CALL(observer, OnPostTask(_, _));
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_));
queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), delay1s);
Mock::VerifyAndClearExpectations(&observer);
@@ -2450,7 +2512,8 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
AdvanceMockTickClock(delay10s);
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ EXPECT_CALL(observer, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_));
queue->SetTimeDomain(mock_time_domain.get());
Mock::VerifyAndClearExpectations(&observer);
@@ -2460,15 +2523,14 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
class CancelableTask {
public:
- explicit CancelableTask(const TickClock* clock)
- : clock_(clock), weak_factory_(this) {}
+ explicit CancelableTask(const TickClock* clock) : clock_(clock) {}
void RecordTimeTask(std::vector<TimeTicks>* run_times) {
run_times->push_back(clock_->NowTicks());
}
const TickClock* clock_;
- WeakPtrFactory<CancelableTask> weak_factory_;
+ WeakPtrFactory<CancelableTask> weak_factory_{this};
};
TEST_P(SequenceManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
@@ -2481,9 +2543,8 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
TimeDelta delay1(TimeDelta::FromSeconds(5));
TimeDelta delay2(TimeDelta::FromSeconds(10));
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queue.get(), start_time + delay1))
- .Times(1);
+ EXPECT_CALL(observer, OnPostTask(_, _)).Times(AnyNumber());
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(start_time + delay1)).Times(1);
CancelableTask task1(mock_tick_clock());
CancelableTask task2(mock_tick_clock());
@@ -2502,9 +2563,7 @@ TEST_P(SequenceManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
task1.weak_factory_.InvalidateWeakPtrs();
// Sweeping away canceled delayed tasks should trigger a notification.
- EXPECT_CALL(observer,
- OnQueueNextWakeUpChanged(queue.get(), start_time + delay2))
- .Times(1);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(start_time + delay2)).Times(1);
sequence_manager()->ReclaimMemory();
}
@@ -2796,7 +2855,7 @@ TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_NestedLoop) {
}
TEST_P(SequenceManagerTest, BlameContextAttribution) {
- if (GetParam() == TestType::kMessagePump)
+ if (GetUnderlyingRunnerType() == TestType::kMessagePump)
return;
using trace_analyzer::Query;
@@ -3155,7 +3214,7 @@ void MessageLoopTaskWithDelayedQuit(Fixture* fixture,
} // namespace
TEST_P(SequenceManagerTest, DelayedTaskRunsInNestedMessageLoop) {
- if (GetParam() == TestType::kMockTaskRunner)
+ if (GetUnderlyingRunnerType() == TestType::kMockTaskRunner)
return;
auto queue = CreateTaskQueue();
RunLoop run_loop;
@@ -3179,7 +3238,7 @@ void MessageLoopTaskWithImmediateQuit(OnceClosure non_nested_quit_closure,
} // namespace
TEST_P(SequenceManagerTest, DelayedNestedMessageLoopDoesntPreventTasksRunning) {
- if (GetParam() == TestType::kMockTaskRunner)
+ if (GetUnderlyingRunnerType() == TestType::kMockTaskRunner)
return;
auto queue = CreateTaskQueue();
RunLoop run_loop;
@@ -3247,7 +3306,7 @@ TEST_P(SequenceManagerTest, DelayedDoWorkNotPostedForDisabledQueue) {
queue->CreateQueueEnabledVoter();
voter->SetVoteToEnable(false);
- switch (GetParam()) {
+ switch (GetUnderlyingRunnerType()) {
case TestType::kMessagePump:
EXPECT_EQ(TimeDelta::FromDays(1), NextPendingTaskDelay());
break;
@@ -3287,21 +3346,21 @@ TEST_P(SequenceManagerTest, DisablingQueuesChangesDelayTillNextDoWork) {
EXPECT_EQ(TimeDelta::FromMilliseconds(1), NextPendingTaskDelay());
voter0->SetVoteToEnable(false);
- if (GetParam() == TestType::kMessageLoop) {
+ if (GetUnderlyingRunnerType() == TestType::kMessageLoop) {
EXPECT_EQ(TimeDelta::FromMilliseconds(1), NextPendingTaskDelay());
} else {
EXPECT_EQ(TimeDelta::FromMilliseconds(10), NextPendingTaskDelay());
}
voter1->SetVoteToEnable(false);
- if (GetParam() == TestType::kMessageLoop) {
+ if (GetUnderlyingRunnerType() == TestType::kMessageLoop) {
EXPECT_EQ(TimeDelta::FromMilliseconds(1), NextPendingTaskDelay());
} else {
EXPECT_EQ(TimeDelta::FromMilliseconds(100), NextPendingTaskDelay());
}
voter2->SetVoteToEnable(false);
- switch (GetParam()) {
+ switch (GetUnderlyingRunnerType()) {
case TestType::kMessagePump:
EXPECT_EQ(TimeDelta::FromDays(1), NextPendingTaskDelay());
break;
@@ -3366,7 +3425,7 @@ TEST_P(SequenceManagerTest, SetTimeDomainForDisabledQueue) {
voter->SetVoteToEnable(false);
// We should not get a notification for a disabled queue.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
std::unique_ptr<MockTimeDomain> domain =
std::make_unique<MockTimeDomain>(sequence_manager()->NowTicks());
@@ -3485,7 +3544,8 @@ TEST_P(SequenceManagerTest, ObserverNotFiredAfterTaskQueueDestructed) {
main_tq->SetObserver(&observer);
// We don't expect the observer to fire if the TaskQueue gets destructed.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer, OnPostTask(_, _)).Times(0);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
auto task_runner = main_tq->task_runner();
main_tq = nullptr;
task_runner->PostTask(FROM_HERE, BindOnce(&NopTask));
@@ -3493,7 +3553,8 @@ TEST_P(SequenceManagerTest, ObserverNotFiredAfterTaskQueueDestructed) {
FastForwardUntilNoTasksRemain();
}
-TEST_P(SequenceManagerTest, ObserverNotFiredForDisabledQueuePostTask) {
+TEST_P(SequenceManagerTest,
+ OnQueueNextWakeUpChangedNotFiredForDisabledQueuePostTask) {
scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
auto task_runner = main_tq->task_runner();
@@ -3504,8 +3565,11 @@ TEST_P(SequenceManagerTest, ObserverNotFiredForDisabledQueuePostTask) {
main_tq->CreateQueueEnabledVoter();
voter->SetVoteToEnable(false);
- // We don't expect the observer to fire if the TaskQueue gets disabled.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer, OnPostTask(_, _));
+
+ // We don't expect the OnQueueNextWakeUpChanged to fire if the TaskQueue gets
+ // disabled.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
// Should not fire the observer.
task_runner->PostTask(FROM_HERE, BindOnce(&NopTask));
@@ -3517,7 +3581,7 @@ TEST_P(SequenceManagerTest, ObserverNotFiredForDisabledQueuePostTask) {
}
TEST_P(SequenceManagerTest,
- ObserverNotFiredForCrossThreadDisabledQueuePostTask) {
+ OnQueueNextWakeUpChangedNotFiredForCrossThreadDisabledQueuePostTask) {
scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
auto task_runner = main_tq->task_runner();
@@ -3528,8 +3592,10 @@ TEST_P(SequenceManagerTest,
main_tq->CreateQueueEnabledVoter();
voter->SetVoteToEnable(false);
+ EXPECT_CALL(observer, OnPostTask(_, _));
+
// We don't expect the observer to fire if the TaskQueue gets blocked.
- EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_)).Times(0);
WaitableEvent done_event;
Thread thread("TestThread");
@@ -3629,8 +3695,8 @@ TEST_P(SequenceManagerTest, GracefulShutdown_ManagerDeletedInFlight) {
// thread.
DestroySequenceManager();
- if (GetParam() != TestType::kMessagePump &&
- GetParam() != TestType::kMessageLoop) {
+ if (GetUnderlyingRunnerType() != TestType::kMessagePump &&
+ GetUnderlyingRunnerType() != TestType::kMessageLoop) {
FastForwardUntilNoTasksRemain();
}
@@ -3673,8 +3739,8 @@ TEST_P(SequenceManagerTest,
// Ensure that all queues-to-gracefully-shutdown are properly unregistered.
DestroySequenceManager();
- if (GetParam() != TestType::kMessagePump &&
- GetParam() != TestType::kMessageLoop) {
+ if (GetUnderlyingRunnerType() != TestType::kMessagePump &&
+ GetUnderlyingRunnerType() != TestType::kMessageLoop) {
FastForwardUntilNoTasksRemain();
}
@@ -3808,7 +3874,7 @@ class RunOnDestructionHelper {
base::OnceClosure RunOnDestruction(base::OnceClosure task) {
return base::BindOnce(
[](std::unique_ptr<RunOnDestructionHelper>) {},
- base::Passed(std::make_unique<RunOnDestructionHelper>(std::move(task))));
+ std::make_unique<RunOnDestructionHelper>(std::move(task)));
}
base::OnceClosure PostOnDestruction(scoped_refptr<TestTaskQueue> task_queue,
@@ -4218,7 +4284,7 @@ TEST_P(SequenceManagerTest, DestructionObserverTest) {
}
TEST_P(SequenceManagerTest, GetMessagePump) {
- switch (GetParam()) {
+ switch (GetUnderlyingRunnerType()) {
default:
EXPECT_THAT(sequence_manager()->GetMessagePump(), testing::IsNull());
break;
@@ -4259,7 +4325,7 @@ class MockTimeDomain : public TimeDomain {
} // namespace
TEST_P(SequenceManagerTest, OnSystemIdleTimeDomainNotification) {
- if (GetParam() != TestType::kMessagePump)
+ if (GetUnderlyingRunnerType() != TestType::kMessagePump)
return;
auto queue = CreateTaskQueue();
@@ -4420,26 +4486,40 @@ TEST_P(SequenceManagerTest, TaskPriortyInterleaving) {
RunLoop().RunUntilIdle();
- EXPECT_EQ(order,
- "000000000000000000000000000000000000000000000000000000000000"
- "111121311214131215112314121131211151234112113121114123511211"
- "312411123115121341211131211145123111211314211352232423222322"
- "452322232423222352423222322423252322423222322452322232433353"
- "343333334353333433333345333334333354444445444444544444454444"
- "445444444544444454445555555555555555555555555555555555555555"
- "666666666666666666666666666666666666666666666666666666666666");
+ switch (GetAntiStarvationLogicType()) {
+ case AntiStarvationLogic::kDisabled:
+ EXPECT_EQ(order,
+ "000000000000000000000000000000000000000000000000000000000000"
+ "111111111111111111111111111111111111111111111111111111111111"
+ "222222222222222222222222222222222222222222222222222222222222"
+ "333333333333333333333333333333333333333333333333333333333333"
+ "444444444444444444444444444444444444444444444444444444444444"
+ "555555555555555555555555555555555555555555555555555555555555"
+ "666666666666666666666666666666666666666666666666666666666666");
+ break;
+ case AntiStarvationLogic::kEnabled:
+ EXPECT_EQ(order,
+ "000000000000000000000000000000000000000000000000000000000000"
+ "111121311214131215112314121131211151234112113121114123511211"
+ "312411123115121341211131211145123111211314211352232423222322"
+ "452322232423222352423222322423252322423222322452322232433353"
+ "343333334353333433333345333334333354444445444444544444454444"
+ "445444444544444454445555555555555555555555555555555555555555"
+ "666666666666666666666666666666666666666666666666666666666666");
+ break;
+ }
}
class CancelableTaskWithDestructionObserver {
public:
- CancelableTaskWithDestructionObserver() : weak_factory_(this) {}
+ CancelableTaskWithDestructionObserver() {}
void Task(std::unique_ptr<ScopedClosureRunner> destruction_observer) {
destruction_observer_ = std::move(destruction_observer);
}
std::unique_ptr<ScopedClosureRunner> destruction_observer_;
- WeakPtrFactory<CancelableTaskWithDestructionObserver> weak_factory_;
+ WeakPtrFactory<CancelableTaskWithDestructionObserver> weak_factory_{this};
};
TEST_P(SequenceManagerTest, PeriodicHousekeeping) {
@@ -4615,6 +4695,217 @@ TEST_P(SequenceManagerTest, ReclaimMemoryRemovesCorrectQueueFromSet) {
EXPECT_THAT(order, ElementsAre(1, 2, 3));
}
+TEST_P(SequenceManagerTest, OnNativeWorkPending) {
+ MockTask task;
+ auto queue = CreateTaskQueue();
+ queue->SetQueuePriority(TaskQueue::QueuePriority::kNormalPriority);
+
+ auto CheckPostedTaskRan = [&](bool should_have_run) {
+ EXPECT_CALL(task, Run).Times(should_have_run ? 1 : 0);
+ RunLoop().RunUntilIdle();
+ Mock::VerifyAndClearExpectations(&task);
+ };
+
+ // Scheduling native work with higher priority causes the posted task to be
+ // deferred.
+ auto native_work = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kHighPriority);
+ queue->task_runner()->PostTask(FROM_HERE, task.Get());
+ CheckPostedTaskRan(false);
+
+ // Once the native work completes, the posted task is free to execute.
+ native_work.reset();
+ CheckPostedTaskRan(true);
+
+ // Lower priority native work doesn't preempt posted tasks.
+ native_work = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kLowPriority);
+ queue->task_runner()->PostTask(FROM_HERE, task.Get());
+ CheckPostedTaskRan(true);
+
+ // Equal priority native work doesn't preempt posted tasks.
+ native_work = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kNormalPriority);
+ queue->task_runner()->PostTask(FROM_HERE, task.Get());
+ CheckPostedTaskRan(true);
+
+ // When there are multiple priorities of native work, only the highest
+ // priority matters.
+ native_work = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kNormalPriority);
+ auto native_work_high = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kHighPriority);
+ auto native_work_low = sequence_manager()->OnNativeWorkPending(
+ TaskQueue::QueuePriority::kLowPriority);
+ queue->task_runner()->PostTask(FROM_HERE, task.Get());
+ CheckPostedTaskRan(false);
+ native_work.reset();
+ CheckPostedTaskRan(false);
+ native_work_high.reset();
+ CheckPostedTaskRan(true);
+}
+
+namespace {
+
+EnqueueOrder RunTaskAndCaptureEnqueueOrder(scoped_refptr<TestTaskQueue> queue) {
+ EnqueueOrder enqueue_order;
+ base::RunLoop run_loop;
+ queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(base::BindLambdaForTesting(
+ [&](const Task& task, const TaskQueue::TaskTiming&) {
+ EXPECT_FALSE(enqueue_order);
+ enqueue_order = task.enqueue_order();
+ run_loop.Quit();
+ }));
+ run_loop.Run();
+ queue->GetTaskQueueImpl()->SetOnTaskStartedHandler({});
+ EXPECT_TRUE(enqueue_order);
+ return enqueue_order;
+}
+
+} // namespace
+
+// Post a task. Install a fence at the beginning of time and remove it. The
+// task's EnqueueOrder should be less than GetLastUnblockEnqueueOrder().
+TEST_P(SequenceManagerTest,
+ GetLastUnblockEnqueueOrder_PostInsertFenceBeginningOfTime) {
+ auto queue = CreateTaskQueue();
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ queue->RemoveFence();
+ auto enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_LT(enqueue_order, queue->GetLastUnblockEnqueueOrder());
+}
+
+// Post a 1st task. Install a now fence. Post a 2nd task. Run the first task.
+// Remove the fence. The 2nd task's EnqueueOrder should be less than
+// GetLastUnblockEnqueueOrder().
+TEST_P(SequenceManagerTest, GetLastUnblockEnqueueOrder_PostInsertNowFencePost) {
+ auto queue = CreateTaskQueue();
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ queue->RemoveFence();
+ auto enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_LT(enqueue_order, queue->GetLastUnblockEnqueueOrder());
+}
+
+// Post a 1st task. Install a now fence. Post a 2nd task. Remove the fence.
+// GetLastUnblockEnqueueOrder() should indicate that the queue was never
+// blocked (front task could always run).
+TEST_P(SequenceManagerTest,
+ GetLastUnblockEnqueueOrder_PostInsertNowFencePost2) {
+ auto queue = CreateTaskQueue();
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->RemoveFence();
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+}
+
+// Post a 1st task. Install a now fence. Post a 2nd task. Install a now fence
+// (moves the previous fence). GetLastUnblockEnqueueOrder() should indicate
+// that the queue was never blocked (front task could always run).
+TEST_P(SequenceManagerTest,
+ GetLastUnblockEnqueueOrder_PostInsertNowFencePostInsertNowFence) {
+ auto queue = CreateTaskQueue();
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+}
+
+// Post a 1st task. Install a delayed fence. Post a 2nd task that will run
+// after the fence. Run the first task. Remove the fence. The 2nd task's
+// EnqueueOrder should be less than GetLastUnblockEnqueueOrder().
+TEST_P(SequenceManagerTest,
+ GetLastUnblockEnqueueOrder_PostInsertDelayedFencePostAfterFence) {
+ const TimeTicks start_time = mock_tick_clock()->NowTicks();
+ auto queue =
+ CreateTaskQueue(TaskQueue::Spec("test").SetDelayedFencesAllowed(true));
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFenceAt(start_time + kDelay);
+ queue->task_runner()->PostDelayedTask(FROM_HERE, DoNothing(), 2 * kDelay);
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ FastForwardBy(2 * kDelay);
+ queue->RemoveFence();
+ auto enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_LT(enqueue_order, queue->GetLastUnblockEnqueueOrder());
+}
+
+// Post a 1st task. Install a delayed fence. Post a 2nd task that will run
+// before the fence. GetLastUnblockEnqueueOrder() should indicate that the
+// queue was never blocked (front task could always run).
+TEST_P(SequenceManagerTest,
+ GetLastUnblockEnqueueOrder_PostInsertDelayedFencePostBeforeFence) {
+ const TimeTicks start_time = mock_tick_clock()->NowTicks();
+ auto queue =
+ CreateTaskQueue(TaskQueue::Spec("test").SetDelayedFencesAllowed(true));
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->InsertFenceAt(start_time + 2 * kDelay);
+ queue->task_runner()->PostDelayedTask(FROM_HERE, DoNothing(), kDelay);
+ RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ FastForwardBy(3 * kDelay);
+ EXPECT_FALSE(queue->GetLastUnblockEnqueueOrder());
+ queue->RemoveFence();
+}
+
+// Post a 1st task. Disable the queue and re-enable it. Post a 2nd task. The 1st
+// task's EnqueueOrder should be less than GetLastUnblockEnqueueOrder().
+TEST_P(SequenceManagerTest, GetLastUnblockEnqueueOrder_PostDisablePostEnable) {
+ auto queue = CreateTaskQueue();
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->GetTaskQueueImpl()->SetQueueEnabled(false);
+ queue->GetTaskQueueImpl()->SetQueueEnabled(true);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ auto first_enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_LT(first_enqueue_order, queue->GetLastUnblockEnqueueOrder());
+ auto second_enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_GT(second_enqueue_order, queue->GetLastUnblockEnqueueOrder());
+}
+
+// Disable the queue. Post a 1st task. Re-enable the queue. Post a 2nd task.
+// The 1st task's EnqueueOrder should be less than
+// GetLastUnblockEnqueueOrder().
+TEST_P(SequenceManagerTest, GetLastUnblockEnqueueOrder_DisablePostEnablePost) {
+ auto queue = CreateTaskQueue();
+ queue->GetTaskQueueImpl()->SetQueueEnabled(false);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ queue->GetTaskQueueImpl()->SetQueueEnabled(true);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ auto first_enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_LT(first_enqueue_order, queue->GetLastUnblockEnqueueOrder());
+ auto second_enqueue_order = RunTaskAndCaptureEnqueueOrder(queue);
+ EXPECT_GT(second_enqueue_order, queue->GetLastUnblockEnqueueOrder());
+}
+
+TEST_P(SequenceManagerTest, OnTaskReady) {
+ auto queue = CreateTaskQueue();
+ int task_ready_count = 0;
+
+ queue->GetTaskQueueImpl()->SetOnTaskReadyHandler(
+ BindLambdaForTesting([&](const Task&, LazyNow*) { ++task_ready_count; }));
+
+ EXPECT_EQ(0, task_ready_count);
+ queue->task_runner()->PostTask(FROM_HERE, DoNothing());
+ EXPECT_EQ(1, task_ready_count);
+ queue->task_runner()->PostDelayedTask(FROM_HERE, DoNothing(),
+ base::TimeDelta::FromHours(1));
+ EXPECT_EQ(1, task_ready_count);
+ FastForwardBy(base::TimeDelta::FromHours(1));
+ EXPECT_EQ(2, task_ready_count);
+}
+
} // namespace sequence_manager_impl_unittest
} // namespace internal
} // namespace sequence_manager
diff --git a/chromium/base/task/sequence_manager/sequence_manager_perftest.cc b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
index 9c9cba708e5..5673405ff77 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
@@ -262,8 +262,8 @@ class SingleThreadInThreadPoolPerfTestDelegate : public PerfTestDelegate {
bool MultipleQueuesSupported() const override { return false; }
scoped_refptr<TaskRunner> CreateTaskRunner() override {
- return CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::USER_BLOCKING});
+ return CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::USER_BLOCKING});
}
void WaitUntilDone() override {
diff --git a/chromium/base/task/sequence_manager/sequenced_task_source.h b/chromium/base/task/sequence_manager/sequenced_task_source.h
index 271a6cc7f9e..b1153fb32e8 100644
--- a/chromium/base/task/sequence_manager/sequenced_task_source.h
+++ b/chromium/base/task/sequence_manager/sequenced_task_source.h
@@ -8,6 +8,7 @@
#include "base/optional.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/lazy_now.h"
+#include "base/task/sequence_manager/tasks.h"
namespace base {
namespace sequence_manager {
@@ -21,7 +22,7 @@ class SequencedTaskSource {
// Returns the next task to run from this source or nullopt if
// there're no more tasks ready to run. If a task is returned,
// DidRunTask() must be invoked before the next call to TakeTask().
- virtual Optional<PendingTask> TakeTask() = 0;
+ virtual Optional<Task> TakeTask() = 0;
// Notifies this source that the task previously obtained
// from TakeTask() has been completed.
diff --git a/chromium/base/task/sequence_manager/task_queue.cc b/chromium/base/task/sequence_manager/task_queue.cc
index dfae48957a8..3945d595f2a 100644
--- a/chromium/base/task/sequence_manager/task_queue.cc
+++ b/chromium/base/task/sequence_manager/task_queue.cc
@@ -12,6 +12,7 @@
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/threading/thread_checker.h"
+#include "base/threading/thread_checker_impl.h"
#include "base/time/time.h"
namespace base {
@@ -35,11 +36,15 @@ class NullTaskRunner final : public SingleThreadTaskRunner {
return false;
}
- bool RunsTasksInCurrentSequence() const override { return false; }
+ bool RunsTasksInCurrentSequence() const override {
+ return thread_checker_.CalledOnValidThread();
+ }
private:
// Ref-counted
~NullTaskRunner() override = default;
+
+ ThreadCheckerImpl thread_checker_;
};
// TODO(kraynov): Move NullTaskRunner from //base/test to //base.
@@ -136,7 +141,7 @@ void TaskQueue::ShutdownTaskQueueGracefully() {
// If we've not been unregistered then this must occur on the main thread.
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- impl_->SetOnNextWakeUpChangedCallback(RepeatingCallback<void(TimeTicks)>());
+ impl_->SetObserver(nullptr);
impl_->sequence_manager()->ShutdownTaskQueueGracefully(TakeTaskQueueImpl());
}
@@ -315,6 +320,13 @@ bool TaskQueue::BlockedByFence() const {
return impl_->BlockedByFence();
}
+EnqueueOrder TaskQueue::GetLastUnblockEnqueueOrder() const {
+ DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+ if (!impl_)
+ return EnqueueOrder();
+ return impl_->GetLastUnblockEnqueueOrder();
+}
+
const char* TaskQueue::GetName() const {
return name_;
}
@@ -323,15 +335,14 @@ void TaskQueue::SetObserver(Observer* observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
- if (observer) {
- // Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle
- // is controlled by |this|.
- impl_->SetOnNextWakeUpChangedCallback(
- BindRepeating(&TaskQueue::Observer::OnQueueNextWakeUpChanged,
- Unretained(observer), Unretained(this)));
- } else {
- impl_->SetOnNextWakeUpChangedCallback(RepeatingCallback<void(TimeTicks)>());
- }
+
+ // Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle is
+ // controlled by |this|.
+ impl_->SetObserver(observer);
+}
+
+void TaskQueue::SetShouldReportPostedTasksWhenDisabled(bool should_report) {
+ impl_->SetShouldReportPostedTasksWhenDisabled(should_report);
}
bool TaskQueue::IsOnMainThread() const {
diff --git a/chromium/base/task/sequence_manager/task_queue.h b/chromium/base/task/sequence_manager/task_queue.h
index 673503b78a1..a62613ebff8 100644
--- a/chromium/base/task/sequence_manager/task_queue.h
+++ b/chromium/base/task/sequence_manager/task_queue.h
@@ -49,6 +49,10 @@ class BASE_EXPORT TaskQueue : public RefCountedThreadSafe<TaskQueue> {
public:
virtual ~Observer() = default;
+ // Notify observer that a task has been posted on the TaskQueue. Can be
+ // called on any thread.
+ virtual void OnPostTask(Location from_here, TimeDelta delay) = 0;
+
// Notify observer that the time at which this queue wants to run
// the next task has changed. |next_wakeup| can be in the past
// (e.g. TimeTicks() can be used to notify about immediate work).
@@ -58,8 +62,7 @@ class BASE_EXPORT TaskQueue : public RefCountedThreadSafe<TaskQueue> {
//
// TODO(altimin): Make it Optional<TimeTicks> to tell
// observer about cancellations.
- virtual void OnQueueNextWakeUpChanged(TaskQueue* queue,
- TimeTicks next_wake_up) = 0;
+ virtual void OnQueueNextWakeUpChanged(TimeTicks next_wake_up) = 0;
};
// Shuts down the queue. All tasks currently queued will be discarded.
@@ -320,8 +323,20 @@ class BASE_EXPORT TaskQueue : public RefCountedThreadSafe<TaskQueue> {
// Returns true if the queue has a fence which is blocking execution of tasks.
bool BlockedByFence() const;
+ // Returns an EnqueueOrder generated at the last transition to unblocked. A
+ // queue is unblocked when it is enabled and no fence prevents the front task
+ // from running. If the EnqueueOrder of a task is greater than this when it
+ // starts running, it means that is was never blocked.
+ EnqueueOrder GetLastUnblockEnqueueOrder() const;
+
void SetObserver(Observer* observer);
+ // Controls whether or not the queue will emit traces events when tasks are
+ // posted to it while disabled. This only applies for the current or next
+ // period during which the queue is disabled. When the queue is re-enabled
+ // this will revert back to the default value of false.
+ void SetShouldReportPostedTasksWhenDisabled(bool should_report);
+
// Create a task runner for this TaskQueue which will annotate all
// posted tasks with the given task type.
// May be called on any thread.
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.cc b/chromium/base/task/sequence_manager/task_queue_impl.cc
index 50b18242870..b9a93a4ffe5 100644
--- a/chromium/base/task/sequence_manager/task_queue_impl.cc
+++ b/chromium/base/task/sequence_manager/task_queue_impl.cc
@@ -28,6 +28,8 @@ const char* TaskQueue::PriorityToString(TaskQueue::QueuePriority priority) {
return "control";
case kHighestPriority:
return "highest";
+ case kVeryHighPriority:
+ return "very_high";
case kHighPriority:
return "high";
case kNormalPriority:
@@ -134,6 +136,9 @@ TaskQueueImpl::AnyThread::AnyThread(TimeDomain* time_domain)
TaskQueueImpl::AnyThread::~AnyThread() = default;
+TaskQueueImpl::AnyThread::TracingOnly::TracingOnly() = default;
+TaskQueueImpl::AnyThread::TracingOnly::~TracingOnly() = default;
+
TaskQueueImpl::MainThreadOnly::MainThreadOnly(TaskQueueImpl* task_queue,
TimeDomain* time_domain)
: time_domain(time_domain),
@@ -141,10 +146,7 @@ TaskQueueImpl::MainThreadOnly::MainThreadOnly(TaskQueueImpl* task_queue,
new WorkQueue(task_queue, "delayed", WorkQueue::QueueType::kDelayed)),
immediate_work_queue(new WorkQueue(task_queue,
"immediate",
- WorkQueue::QueueType::kImmediate)),
- is_enabled(true),
- blame_context(nullptr),
- is_enabled_for_test(true) {}
+ WorkQueue::QueueType::kImmediate)) {}
TaskQueueImpl::MainThreadOnly::~MainThreadOnly() = default;
@@ -169,6 +171,7 @@ void TaskQueueImpl::UnregisterTaskQueue() {
any_thread_.unregistered = true;
any_thread_.time_domain = nullptr;
immediate_incoming_queue.swap(any_thread_.immediate_incoming_queue);
+ any_thread_.task_queue_observer = nullptr;
}
if (main_thread_only().time_domain)
@@ -176,8 +179,7 @@ void TaskQueueImpl::UnregisterTaskQueue() {
main_thread_only().on_task_completed_handler = OnTaskCompletedHandler();
main_thread_only().time_domain = nullptr;
- main_thread_only().on_next_wake_up_changed_callback =
- OnNextWakeUpChangedCallback();
+ main_thread_only().task_queue_observer = nullptr;
empty_queues_to_reload_handle_.ReleaseAtomicFlag();
// It is possible for a task to hold a scoped_refptr to this, which
@@ -261,13 +263,12 @@ void TaskQueueImpl::PostImmediateTaskImpl(PostedTask task,
// TODO(alexclarke): Maybe add a main thread only immediate_incoming_queue
// See https://crbug.com/901800
base::internal::CheckedAutoLock lock(any_thread_lock_);
- TimeTicks now;
+ LazyNow lazy_now = any_thread_.time_domain->CreateLazyNow();
+ if (any_thread_.task_queue_observer)
+ any_thread_.task_queue_observer->OnPostTask(task.location, TimeDelta());
bool add_queue_time_to_tasks = sequence_manager_->GetAddQueueTimeToTasks();
- if (delayed_fence_allowed_ || add_queue_time_to_tasks) {
- now = any_thread_.time_domain->Now();
- if (add_queue_time_to_tasks)
- task.queue_time = now;
- }
+ if (add_queue_time_to_tasks)
+ task.queue_time = lazy_now.Now();
// The sequence number must be incremented atomically with pushing onto the
// incoming queue. Otherwise if there are several threads posting task we
@@ -276,8 +277,18 @@ void TaskQueueImpl::PostImmediateTaskImpl(PostedTask task,
EnqueueOrder sequence_number = sequence_manager_->GetNextSequenceNumber();
bool was_immediate_incoming_queue_empty =
any_thread_.immediate_incoming_queue.empty();
- any_thread_.immediate_incoming_queue.push_back(
- Task(std::move(task), now, sequence_number, sequence_number));
+ base::TimeTicks desired_run_time;
+ // The desired run time is only required when delayed fence is allowed.
+ // Avoid evaluating it when not required.
+ if (delayed_fence_allowed_)
+ desired_run_time = lazy_now.Now();
+ any_thread_.immediate_incoming_queue.push_back(Task(
+ std::move(task), desired_run_time, sequence_number, sequence_number));
+
+ if (any_thread_.on_task_ready_handler) {
+ any_thread_.on_task_ready_handler.Run(
+ any_thread_.immediate_incoming_queue.back(), &lazy_now);
+ }
#if DCHECK_IS_ON()
any_thread_.immediate_incoming_queue.back().cross_thread_ =
@@ -286,6 +297,8 @@ void TaskQueueImpl::PostImmediateTaskImpl(PostedTask task,
sequence_manager_->WillQueueTask(
&any_thread_.immediate_incoming_queue.back(), name_);
+ MaybeReportIpcTaskQueuedFromAnyThreadLocked(
+ &any_thread_.immediate_incoming_queue.back(), name_);
// If this queue was completely empty, then the SequenceManager needs to be
// informed so it can reload the work queue and add us to the
@@ -339,9 +352,12 @@ void TaskQueueImpl::PostDelayedTaskImpl(PostedTask task,
TimeTicks time_domain_now = main_thread_only().time_domain->Now();
TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
- if (sequence_manager_->GetAddQueueTimeToTasks()) {
- task.queue_time = time_domain_now;
+ if (main_thread_only().task_queue_observer) {
+ main_thread_only().task_queue_observer->OnPostTask(task.location,
+ task.delay);
}
+ if (sequence_manager_->GetAddQueueTimeToTasks())
+ task.queue_time = time_domain_now;
PushOntoDelayedIncomingQueueFromMainThread(
Task(std::move(task), time_domain_delayed_run_time, sequence_number,
@@ -358,11 +374,12 @@ void TaskQueueImpl::PostDelayedTaskImpl(PostedTask task,
{
base::internal::CheckedAutoLock lock(any_thread_lock_);
time_domain_now = any_thread_.time_domain->Now();
+ if (any_thread_.task_queue_observer)
+ any_thread_.task_queue_observer->OnPostTask(task.location, task.delay);
}
TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
- if (sequence_manager_->GetAddQueueTimeToTasks()) {
+ if (sequence_manager_->GetAddQueueTimeToTasks())
task.queue_time = time_domain_now;
- }
PushOntoDelayedIncomingQueue(
Task(std::move(task), time_domain_delayed_run_time, sequence_number,
@@ -378,8 +395,10 @@ void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread(
pending_task.cross_thread_ = false;
#endif
- if (notify_task_annotator)
+ if (notify_task_annotator) {
sequence_manager_->WillQueueTask(&pending_task, name_);
+ MaybeReportIpcTaskQueuedFromMainThread(&pending_task, name_);
+ }
main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
LazyNow lazy_now(now);
@@ -390,6 +409,7 @@ void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread(
void TaskQueueImpl::PushOntoDelayedIncomingQueue(Task pending_task) {
sequence_manager_->WillQueueTask(&pending_task, name_);
+ MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(&pending_task, name_);
#if DCHECK_IS_ON()
pending_task.cross_thread_ = true;
@@ -431,9 +451,9 @@ void TaskQueueImpl::ReloadEmptyImmediateWorkQueue() {
DCHECK(main_thread_only().immediate_work_queue->Empty());
main_thread_only().immediate_work_queue->TakeImmediateIncomingQueueTasks();
- if (!main_thread_only().on_next_wake_up_changed_callback.is_null() &&
- IsQueueEnabled()) {
- main_thread_only().on_next_wake_up_changed_callback.Run(TimeTicks());
+ if (main_thread_only().task_queue_observer && IsQueueEnabled()) {
+ main_thread_only().task_queue_observer->OnQueueNextWakeUpChanged(
+ TimeTicks());
}
}
@@ -551,6 +571,10 @@ void TaskQueueImpl::MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now) {
ActivateDelayedFenceIfNeeded(task->delayed_run_time);
DCHECK(!task->enqueue_order_set());
task->set_enqueue_order(sequence_manager_->GetNextSequenceNumber());
+
+ if (main_thread_only().on_task_ready_handler)
+ main_thread_only().on_task_ready_handler.Run(*task, lazy_now);
+
delayed_work_queue_task_pusher.Push(task);
main_thread_only().delayed_incoming_queue.pop();
}
@@ -744,28 +768,32 @@ void TaskQueueImpl::InsertFence(TaskQueue::InsertFencePosition position) {
// Tasks posted after this point will have a strictly higher enqueue order
// and will be blocked from running.
main_thread_only().current_fence = current_fence;
- bool task_unblocked =
+ bool front_task_unblocked =
main_thread_only().immediate_work_queue->InsertFence(current_fence);
- task_unblocked |=
+ front_task_unblocked |=
main_thread_only().delayed_work_queue->InsertFence(current_fence);
{
base::internal::CheckedAutoLock lock(any_thread_lock_);
- if (!task_unblocked && previous_fence && previous_fence < current_fence) {
+ if (!front_task_unblocked && previous_fence &&
+ previous_fence < current_fence) {
if (!any_thread_.immediate_incoming_queue.empty() &&
any_thread_.immediate_incoming_queue.front().enqueue_order() >
previous_fence &&
any_thread_.immediate_incoming_queue.front().enqueue_order() <
current_fence) {
- task_unblocked = true;
+ front_task_unblocked = true;
}
}
UpdateCrossThreadQueueStateLocked();
}
- if (IsQueueEnabled() && task_unblocked)
+ if (IsQueueEnabled() && front_task_unblocked) {
+ main_thread_only().last_unblocked_enqueue_order =
+ sequence_manager_->GetNextSequenceNumber();
sequence_manager_->ScheduleWork();
+ }
}
void TaskQueueImpl::InsertFenceAt(TimeTicks time) {
@@ -783,24 +811,28 @@ void TaskQueueImpl::RemoveFence() {
main_thread_only().current_fence = EnqueueOrder::none();
main_thread_only().delayed_fence = nullopt;
- bool task_unblocked = main_thread_only().immediate_work_queue->RemoveFence();
- task_unblocked |= main_thread_only().delayed_work_queue->RemoveFence();
+ bool front_task_unblocked =
+ main_thread_only().immediate_work_queue->RemoveFence();
+ front_task_unblocked |= main_thread_only().delayed_work_queue->RemoveFence();
{
base::internal::CheckedAutoLock lock(any_thread_lock_);
- if (!task_unblocked && previous_fence) {
+ if (!front_task_unblocked && previous_fence) {
if (!any_thread_.immediate_incoming_queue.empty() &&
any_thread_.immediate_incoming_queue.front().enqueue_order() >
previous_fence) {
- task_unblocked = true;
+ front_task_unblocked = true;
}
}
UpdateCrossThreadQueueStateLocked();
}
- if (IsQueueEnabled() && task_unblocked)
+ if (IsQueueEnabled() && front_task_unblocked) {
+ main_thread_only().last_unblocked_enqueue_order =
+ sequence_manager_->GetNextSequenceNumber();
sequence_manager_->ScheduleWork();
+ }
}
bool TaskQueueImpl::BlockedByFence() const {
@@ -829,6 +861,10 @@ bool TaskQueueImpl::HasActiveFence() {
return !!main_thread_only().current_fence;
}
+EnqueueOrder TaskQueueImpl::GetLastUnblockEnqueueOrder() const {
+ return main_thread_only().last_unblocked_enqueue_order;
+}
+
bool TaskQueueImpl::CouldTaskRun(EnqueueOrder enqueue_order) const {
if (!IsQueueEnabled())
return false;
@@ -872,51 +908,96 @@ bool TaskQueueImpl::IsQueueEnabled() const {
}
void TaskQueueImpl::SetQueueEnabled(bool enabled) {
- if (main_thread_only().is_enabled != enabled) {
- main_thread_only().is_enabled = enabled;
- EnableOrDisableWithSelector(enabled);
- }
-}
-
-void TaskQueueImpl::EnableOrDisableWithSelector(bool enable) {
- // |sequence_manager_| can be null in tests.
- if (!sequence_manager_)
+ if (main_thread_only().is_enabled == enabled)
return;
+ // Update the |main_thread_only_| struct.
+ main_thread_only().is_enabled = enabled;
+ main_thread_only().disabled_time = nullopt;
+ if (!enabled) {
+ bool tracing_enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ &tracing_enabled);
+ main_thread_only().disabled_time = main_thread_only().time_domain->Now();
+ } else {
+ // Override reporting if the queue is becoming enabled again.
+ main_thread_only().should_report_posted_tasks_when_disabled = false;
+ }
+
LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
UpdateDelayedWakeUp(&lazy_now);
- bool has_pending_immediate_work;
+ bool has_pending_immediate_work = false;
{
base::internal::CheckedAutoLock lock(any_thread_lock_);
UpdateCrossThreadQueueStateLocked();
has_pending_immediate_work = HasPendingImmediateWorkLocked();
+
+ // Copy over the task-reporting related state.
+ any_thread_.tracing_only.is_enabled = enabled;
+ any_thread_.tracing_only.disabled_time = main_thread_only().disabled_time;
+ any_thread_.tracing_only.should_report_posted_tasks_when_disabled =
+ main_thread_only().should_report_posted_tasks_when_disabled;
}
- if (enable) {
- if (has_pending_immediate_work &&
- !main_thread_only().on_next_wake_up_changed_callback.is_null()) {
+ // |sequence_manager_| can be null in tests.
+ if (!sequence_manager_)
+ return;
+
+ // Finally, enable or disable the queue with the selector.
+ if (enabled) {
+ if (has_pending_immediate_work && main_thread_only().task_queue_observer) {
// Delayed work notification will be issued via time domain.
- main_thread_only().on_next_wake_up_changed_callback.Run(TimeTicks());
+ main_thread_only().task_queue_observer->OnQueueNextWakeUpChanged(
+ TimeTicks());
}
// Note the selector calls SequenceManager::OnTaskQueueEnabled which posts
// a DoWork if needed.
sequence_manager_->main_thread_only().selector.EnableQueue(this);
+
+ if (!BlockedByFence()) {
+ main_thread_only().last_unblocked_enqueue_order =
+ sequence_manager_->GetNextSequenceNumber();
+ }
} else {
sequence_manager_->main_thread_only().selector.DisableQueue(this);
}
}
+void TaskQueueImpl::SetShouldReportPostedTasksWhenDisabled(bool should_report) {
+ if (main_thread_only().should_report_posted_tasks_when_disabled ==
+ should_report)
+ return;
+
+ // Only observe transitions turning the reporting on if tracing is enabled.
+ if (should_report) {
+ bool tracing_enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ &tracing_enabled);
+ if (!tracing_enabled)
+ return;
+ }
+
+ main_thread_only().should_report_posted_tasks_when_disabled = should_report;
+
+ // Mirror the state to the AnyThread struct as well.
+ {
+ base::internal::CheckedAutoLock lock(any_thread_lock_);
+ any_thread_.tracing_only.should_report_posted_tasks_when_disabled =
+ should_report;
+ }
+}
+
void TaskQueueImpl::UpdateCrossThreadQueueStateLocked() {
any_thread_.immediate_work_queue_empty =
main_thread_only().immediate_work_queue->Empty();
- if (main_thread_only().on_next_wake_up_changed_callback) {
- // If there's a callback we need a DoWork for the callback to be issued by
- // ReloadEmptyImmediateWorkQueue. The callback isn't
- // sent for disabled queues.
+ if (main_thread_only().task_queue_observer) {
+ // If there's an observer we need a DoWork for the callback to be issued by
+ // ReloadEmptyImmediateWorkQueue. The callback isn't sent for disabled
+ // queues.
any_thread_.post_immediate_task_should_schedule_work = IsQueueEnabled();
} else {
// Otherwise we need PostImmediateTaskImpl to ScheduleWork unless the queue
@@ -985,16 +1066,17 @@ void TaskQueueImpl::RequeueDeferredNonNestableTask(
}
}
-void TaskQueueImpl::SetOnNextWakeUpChangedCallback(
- TaskQueueImpl::OnNextWakeUpChangedCallback callback) {
-#if DCHECK_IS_ON()
- if (callback) {
- DCHECK(main_thread_only().on_next_wake_up_changed_callback.is_null())
+void TaskQueueImpl::SetObserver(TaskQueue::Observer* observer) {
+ if (observer) {
+ DCHECK(!main_thread_only().task_queue_observer)
<< "Can't assign two different observers to "
- "blink::scheduler::TaskQueue";
+ "base::sequence_manager:TaskQueue";
}
-#endif
- main_thread_only().on_next_wake_up_changed_callback = callback;
+
+ main_thread_only().task_queue_observer = observer;
+
+ base::internal::CheckedAutoLock lock(any_thread_lock_);
+ any_thread_.task_queue_observer = observer;
}
void TaskQueueImpl::UpdateDelayedWakeUp(LazyNow* lazy_now) {
@@ -1007,10 +1089,10 @@ void TaskQueueImpl::UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
return;
main_thread_only().scheduled_wake_up = wake_up;
- if (wake_up &&
- !main_thread_only().on_next_wake_up_changed_callback.is_null() &&
+ if (wake_up && main_thread_only().task_queue_observer &&
!HasPendingImmediateWork()) {
- main_thread_only().on_next_wake_up_changed_callback.Run(wake_up->time);
+ main_thread_only().task_queue_observer->OnQueueNextWakeUpChanged(
+ wake_up->time);
}
WakeUpResolution resolution = has_pending_high_resolution_tasks()
@@ -1044,6 +1126,16 @@ bool TaskQueueImpl::HasPendingImmediateWorkLocked() {
!any_thread_.immediate_incoming_queue.empty();
}
+void TaskQueueImpl::SetOnTaskReadyHandler(
+ TaskQueueImpl::OnTaskReadyHandler handler) {
+ DCHECK(should_notify_observers_ || handler.is_null());
+ main_thread_only().on_task_ready_handler = handler;
+
+ base::internal::CheckedAutoLock lock(any_thread_lock_);
+ DCHECK_NE(!!any_thread_.on_task_ready_handler, !!handler);
+ any_thread_.on_task_ready_handler = std::move(handler);
+}
+
void TaskQueueImpl::SetOnTaskStartedHandler(
TaskQueueImpl::OnTaskStartedHandler handler) {
DCHECK(should_notify_observers_ || handler.is_null());
@@ -1130,6 +1222,107 @@ bool TaskQueueImpl::HasTasks() const {
return false;
}
+void TaskQueueImpl::MaybeReportIpcTaskQueuedFromMainThread(
+ Task* pending_task,
+ const char* task_queue_name) {
+ if (!pending_task->ipc_hash)
+ return;
+
+ // It's possible that tracing was just enabled and no disabled time has been
+ // stored. In that case, skip emitting the event.
+ if (!main_thread_only().disabled_time)
+ return;
+
+ bool tracing_enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ &tracing_enabled);
+ if (!tracing_enabled)
+ return;
+
+ if (main_thread_only().is_enabled ||
+ !main_thread_only().should_report_posted_tasks_when_disabled) {
+ return;
+ }
+
+ base::TimeDelta time_since_disabled =
+ main_thread_only().time_domain->Now() -
+ main_thread_only().disabled_time.value();
+
+ ReportIpcTaskQueued(pending_task, task_queue_name, time_since_disabled);
+}
+
+bool TaskQueueImpl::ShouldReportIpcTaskQueuedFromAnyThreadLocked(
+ base::TimeDelta* time_since_disabled) {
+ // It's possible that tracing was just enabled and no disabled time has been
+ // stored. In that case, skip emitting the event.
+ if (!any_thread_.tracing_only.disabled_time)
+ return false;
+
+ if (any_thread_.tracing_only.is_enabled ||
+ any_thread_.tracing_only.should_report_posted_tasks_when_disabled) {
+ return false;
+ }
+
+ *time_since_disabled = any_thread_.time_domain->Now() -
+ any_thread_.tracing_only.disabled_time.value();
+ return true;
+}
+
+void TaskQueueImpl::MaybeReportIpcTaskQueuedFromAnyThreadLocked(
+ Task* pending_task,
+ const char* task_queue_name) {
+ if (!pending_task->ipc_hash)
+ return;
+
+ bool tracing_enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ &tracing_enabled);
+ if (!tracing_enabled)
+ return;
+
+ base::TimeDelta time_since_disabled;
+ if (ShouldReportIpcTaskQueuedFromAnyThreadLocked(&time_since_disabled))
+ ReportIpcTaskQueued(pending_task, task_queue_name, time_since_disabled);
+}
+
+void TaskQueueImpl::MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(
+ Task* pending_task,
+ const char* task_queue_name) {
+ if (!pending_task->ipc_hash)
+ return;
+
+ bool tracing_enabled = false;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ &tracing_enabled);
+ if (!tracing_enabled)
+ return;
+
+ base::TimeDelta time_since_disabled;
+ bool should_report = false;
+ {
+ base::internal::CheckedAutoLock lock(any_thread_lock_);
+ should_report =
+ ShouldReportIpcTaskQueuedFromAnyThreadLocked(&time_since_disabled);
+ }
+
+ ReportIpcTaskQueued(pending_task, task_queue_name, time_since_disabled);
+}
+
+void TaskQueueImpl::ReportIpcTaskQueued(
+ Task* pending_task,
+ const char* task_queue_name,
+ const base::TimeDelta& time_since_disabled) {
+ // Use a begin/end event pair so we can get 4 fields in the event.
+ TRACE_EVENT_BEGIN2(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ "task_posted_to_disabled_queue", "task_queue_name",
+ task_queue_name, "time_since_disabled_ms",
+ time_since_disabled.InMilliseconds());
+ TRACE_EVENT_END2(TRACE_DISABLED_BY_DEFAULT("lifecycles"),
+ "task_posted_to_disabled_queue", "ipc_hash",
+ pending_task->ipc_hash, "location",
+ pending_task->posted_from.program_counter());
+}
+
TaskQueueImpl::DelayedIncomingQueue::DelayedIncomingQueue() = default;
TaskQueueImpl::DelayedIncomingQueue::~DelayedIncomingQueue() = default;
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.h b/chromium/base/task/sequence_manager/task_queue_impl.h
index 24ee81057b2..2fce76d4aa3 100644
--- a/chromium/base/task/sequence_manager/task_queue_impl.h
+++ b/chromium/base/task/sequence_manager/task_queue_impl.h
@@ -89,6 +89,7 @@ class BASE_EXPORT TaskQueueImpl {
};
using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
+ using OnTaskReadyHandler = RepeatingCallback<void(const Task&, LazyNow*)>;
using OnTaskStartedHandler =
RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
using OnTaskCompletedHandler =
@@ -102,6 +103,7 @@ class BASE_EXPORT TaskQueueImpl {
const char* GetName() const;
bool IsQueueEnabled() const;
void SetQueueEnabled(bool enabled);
+ void SetShouldReportPostedTasksWhenDisabled(bool should_report);
bool IsEmpty() const;
size_t GetNumberOfPendingTasks() const;
bool HasTaskToRunImmediately() const;
@@ -119,9 +121,10 @@ class BASE_EXPORT TaskQueueImpl {
void RemoveFence();
bool HasActiveFence();
bool BlockedByFence() const;
+ EnqueueOrder GetLastUnblockEnqueueOrder() const;
// Implementation of TaskQueue::SetObserver.
- void SetOnNextWakeUpChangedCallback(OnNextWakeUpChangedCallback callback);
+ void SetObserver(TaskQueue::Observer* observer);
void UnregisterTaskQueue();
@@ -193,6 +196,11 @@ class BASE_EXPORT TaskQueueImpl {
// addition MaybeShrinkQueue is called on all internal queues.
void ReclaimMemory(TimeTicks now);
+ // Registers a handler to invoke when a task posted to this TaskQueueImpl is
+ // ready. For a non-delayed task, this is when the task is posted. For a
+ // delayed task, this is when the delay expires.
+ void SetOnTaskReadyHandler(OnTaskReadyHandler handler);
+
// Allows wrapping TaskQueue to set a handler to subscribe for notifications
// about started and completed tasks.
void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
@@ -333,25 +341,32 @@ class BASE_EXPORT TaskQueueImpl {
// See description inside struct AnyThread for details.
TimeDomain* time_domain;
- // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
- OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+ TaskQueue::Observer* task_queue_observer = nullptr;
std::unique_ptr<WorkQueue> delayed_work_queue;
std::unique_ptr<WorkQueue> immediate_work_queue;
DelayedIncomingQueue delayed_incoming_queue;
ObserverList<TaskObserver>::Unchecked task_observers;
base::internal::HeapHandle heap_handle;
- bool is_enabled;
- trace_event::BlameContext* blame_context; // Not owned.
+ bool is_enabled = true;
+ trace_event::BlameContext* blame_context = nullptr; // Not owned.
EnqueueOrder current_fence;
Optional<TimeTicks> delayed_fence;
+ EnqueueOrder last_unblocked_enqueue_order;
+ OnTaskReadyHandler on_task_ready_handler;
OnTaskStartedHandler on_task_started_handler;
OnTaskCompletedHandler on_task_completed_handler;
// Last reported wake up, used only in UpdateWakeUp to avoid
// excessive calls.
Optional<DelayedWakeUp> scheduled_wake_up;
// If false, queue will be disabled. Used only for tests.
- bool is_enabled_for_test;
+ bool is_enabled_for_test = true;
+ // The time at which the task queue was disabled, if it is currently
+ // disabled.
+ Optional<TimeTicks> disabled_time;
+ // Whether or not the task queue should emit tracing events for tasks
+ // posted to this queue when it is disabled.
+ bool should_report_posted_tasks_when_disabled = false;
};
void PostTask(PostedTask task);
@@ -395,8 +410,6 @@ class BASE_EXPORT TaskQueueImpl {
TimeTicks now,
trace_event::TracedValue* state);
- void EnableOrDisableWithSelector(bool enable);
-
// Schedules delayed work on time domain and calls the observer.
void UpdateDelayedWakeUp(LazyNow* lazy_now);
void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
@@ -412,6 +425,23 @@ class BASE_EXPORT TaskQueueImpl {
void MaybeLogPostTask(PostedTask* task);
void MaybeAdjustTaskDelay(PostedTask* task, CurrentThread current_thread);
+ // Reports the task if it was due to IPC and was posted to a disabled queue.
+ // This should be called after WillQueueTask has been called for the task.
+ void MaybeReportIpcTaskQueuedFromMainThread(Task* pending_task,
+ const char* task_queue_name);
+ bool ShouldReportIpcTaskQueuedFromAnyThreadLocked(
+ base::TimeDelta* time_since_disabled)
+ EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
+ void MaybeReportIpcTaskQueuedFromAnyThreadLocked(Task* pending_task,
+ const char* task_queue_name)
+ EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
+ void MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(
+ Task* pending_task,
+ const char* task_queue_name);
+ void ReportIpcTaskQueued(Task* pending_task,
+ const char* task_queue_name,
+ const base::TimeDelta& time_since_disabled);
+
const char* name_;
SequenceManagerImpl* const sequence_manager_;
@@ -422,6 +452,16 @@ class BASE_EXPORT TaskQueueImpl {
mutable base::internal::CheckedLock any_thread_lock_;
struct AnyThread {
+ // Mirrored from MainThreadOnly. These are only used for tracing.
+ struct TracingOnly {
+ TracingOnly();
+ ~TracingOnly();
+
+ bool is_enabled = true;
+ Optional<TimeTicks> disabled_time;
+ bool should_report_posted_tasks_when_disabled = false;
+ };
+
explicit AnyThread(TimeDomain* time_domain);
~AnyThread();
@@ -430,6 +470,8 @@ class BASE_EXPORT TaskQueueImpl {
// locked before accessing from other threads.
TimeDomain* time_domain;
+ TaskQueue::Observer* task_queue_observer = nullptr;
+
TaskDeque immediate_incoming_queue;
// True if main_thread_only().immediate_work_queue is empty.
@@ -439,13 +481,17 @@ class BASE_EXPORT TaskQueueImpl {
bool unregistered = false;
+ OnTaskReadyHandler on_task_ready_handler;
+
#if DCHECK_IS_ON()
- // A cached of |immediate_work_queue->work_queue_set_index()| which is used
+ // A cache of |immediate_work_queue->work_queue_set_index()| which is used
// to index into
// SequenceManager::Settings::per_priority_cross_thread_task_delay to apply
// a priority specific delay for debugging purposes.
int queue_set_index = 0;
#endif
+
+ TracingOnly tracing_only;
};
AnyThread any_thread_ GUARDED_BY(any_thread_lock_);
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.cc b/chromium/base/task/sequence_manager/task_queue_selector.cc
index d898f715e7a..5e3a14a8e80 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector.cc
+++ b/chromium/base/task/sequence_manager/task_queue_selector.cc
@@ -26,6 +26,8 @@ TaskQueueSelector::TaskQueueSelector(
#if DCHECK_IS_ON()
random_task_selection_(settings.random_task_selection_seed != 0),
#endif
+ anti_starvation_logic_for_priorities_disabled_(
+ settings.anti_starvation_logic_for_priorities_disabled),
delayed_work_queue_sets_("delayed", this, settings),
immediate_work_queue_sets_("immediate", this, settings) {
}
@@ -128,6 +130,8 @@ int64_t TaskQueueSelector::GetSortKeyForPriority(
return std::numeric_limits<int64_t>::max();
default:
+ if (anti_starvation_logic_for_priorities_disabled_)
+ return per_priority_starvation_tolerance_[priority];
return selection_count_ + per_priority_starvation_tolerance_[priority];
}
}
@@ -219,9 +223,12 @@ void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
task_queue_selector_observer_ = observer;
}
-bool TaskQueueSelector::AllEnabledWorkQueuesAreEmpty() const {
+Optional<TaskQueue::QueuePriority>
+TaskQueueSelector::GetHighestPendingPriority() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- return active_priorities_.empty();
+ if (active_priorities_.empty())
+ return nullopt;
+ return active_priorities_.min_id();
}
void TaskQueueSelector::SetImmediateStarvationCountForTest(
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.h b/chromium/base/task/sequence_manager/task_queue_selector.h
index 2946d1cf475..2ae9b52ef05 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector.h
+++ b/chromium/base/task/sequence_manager/task_queue_selector.h
@@ -68,9 +68,9 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
// on the main thread. If |observer| is null, then no callbacks will occur.
void SetTaskQueueSelectorObserver(Observer* observer);
- // Returns true if all the enabled work queues are empty. Returns false
- // otherwise.
- bool AllEnabledWorkQueuesAreEmpty() const;
+ // Returns the priority of the most important pending task if one exists.
+ // O(1).
+ Optional<TaskQueue::QueuePriority> GetHighestPendingPriority() const;
// WorkQueueSets::Observer implementation:
void WorkQueueSetBecameEmpty(size_t set_index) override;
@@ -251,6 +251,16 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
const bool random_task_selection_ = false;
#endif
+ // If true, the scheduler will bypass the priority-based anti-starvation logic
+ // that prevents indefinite starvation of lower priority tasks in the presence
+ // of higher priority tasks by occasionally selecting lower priority task
+ // queues over higher priority task queues.
+ //
+ // Note: this does not affect the anti-starvation logic that is in place for
+ // preventing delayed tasks from starving immediate tasks, which is always
+ // enabled.
+ const bool anti_starvation_logic_for_priorities_disabled_;
+
// Count of the number of sets (delayed or immediate) for each priority.
// Should only contain 0, 1 or 2.
std::array<int, TaskQueue::kQueuePriorityCount> non_empty_set_counts_ = {{0}};
diff --git a/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
index d63860fe532..c99366471ba 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
+++ b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
@@ -16,6 +16,7 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/pending_task.h"
+#include "base/task/sequence_manager/enqueue_order_generator.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/test/mock_time_domain.h"
#include "base/task/sequence_manager/work_queue.h"
@@ -56,20 +57,21 @@ class TaskQueueSelectorForTest : public TaskQueueSelector {
using TaskQueueSelector::SetOperationOldest;
using TaskQueueSelector::SmallPriorityQueue;
- TaskQueueSelectorForTest(scoped_refptr<AssociatedThreadId> associated_thread)
- : TaskQueueSelector(associated_thread, SequenceManager::Settings()) {}
+ TaskQueueSelectorForTest(scoped_refptr<AssociatedThreadId> associated_thread,
+ const SequenceManager::Settings& settings)
+ : TaskQueueSelector(associated_thread, settings) {}
};
-class TaskQueueSelectorTest : public testing::Test {
+class TaskQueueSelectorTestBase : public testing::Test {
public:
- TaskQueueSelectorTest()
- : test_closure_(BindRepeating(&TaskQueueSelectorTest::TestFunction)),
+ explicit TaskQueueSelectorTestBase(const SequenceManager::Settings& settings)
+ : test_closure_(BindRepeating(&TaskQueueSelectorTestBase::TestFunction)),
associated_thread_(AssociatedThreadId::CreateBound()),
- selector_(associated_thread_) {}
- ~TaskQueueSelectorTest() override = default;
+ selector_(associated_thread_, settings) {}
+ ~TaskQueueSelectorTestBase() override = default;
void PushTasks(const size_t queue_indices[], size_t num_tasks) {
- EnqueueOrder::Generator enqueue_order_generator;
+ EnqueueOrderGenerator enqueue_order_generator;
for (size_t i = 0; i < num_tasks; i++) {
task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
Task(PostedTask(test_closure_, FROM_HERE), TimeTicks(),
@@ -151,6 +153,15 @@ class TaskQueueSelectorTest : public testing::Test {
std::map<TaskQueueImpl*, size_t> queue_to_index_map_;
};
+class TaskQueueSelectorTest : public TaskQueueSelectorTestBase {
+ public:
+ TaskQueueSelectorTest()
+ : TaskQueueSelectorTestBase(
+ SequenceManager::Settings::Builder()
+ .SetAntiStarvationLogicForPrioritiesDisabled(false)
+ .Build()) {}
+};
+
TEST_F(TaskQueueSelectorTest, TestDefaultPriority) {
size_t queue_order[] = {4, 3, 2, 1, 0};
PushTasks(queue_order, 5);
@@ -626,24 +637,21 @@ TEST_F(TaskQueueSelectorTest,
EXPECT_NE(chosen_work_queue->task_queue(), task_queues_[3].get());
}
-TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty) {
- EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+TEST_F(TaskQueueSelectorTest, GetHighestPendingPriority) {
+ EXPECT_FALSE(selector_.GetHighestPendingPriority().has_value());
size_t queue_order[] = {0, 1};
PushTasks(queue_order, 2);
- EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ EXPECT_EQ(TaskQueue::kHighPriority, *selector_.GetHighestPendingPriority());
PopTasksAndReturnQueueIndices();
- EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
-}
+ EXPECT_FALSE(selector_.GetHighestPendingPriority().has_value());
-TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty_ControlPriority) {
- size_t queue_order[] = {0};
PushTasks(queue_order, 1);
-
- selector_.SetQueuePriority(task_queues_[0].get(),
- TaskQueue::kControlPriority);
-
- EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+ EXPECT_EQ(TaskQueue::kNormalPriority, *selector_.GetHighestPendingPriority());
+ PopTasksAndReturnQueueIndices();
+ EXPECT_FALSE(selector_.GetHighestPendingPriority().has_value());
}
TEST_F(TaskQueueSelectorTest, ChooseWithPriority_Empty) {
@@ -685,7 +693,8 @@ TEST_F(TaskQueueSelectorTest, ChooseWithPriority_OnlyImmediate) {
}
TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
- TaskQueueSelectorForTest selector(associated_thread_);
+ TaskQueueSelectorForTest selector(associated_thread_,
+ SequenceManager::Settings());
MockObserver mock_observer;
selector.SetTaskQueueSelectorObserver(&mock_observer);
@@ -710,7 +719,8 @@ TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
}
TEST_F(TaskQueueSelectorTest, TestObserverWithTwoBlockedQueues) {
- TaskQueueSelectorForTest selector(associated_thread_);
+ TaskQueueSelectorForTest selector(associated_thread_,
+ SequenceManager::Settings());
MockObserver mock_observer;
selector.SetTaskQueueSelectorObserver(&mock_observer);
@@ -752,6 +762,62 @@ TEST_F(TaskQueueSelectorTest, TestObserverWithTwoBlockedQueues) {
task_queue2->UnregisterTaskQueue();
}
+class DisabledAntiStarvationLogicTaskQueueSelectorTest
+ : public TaskQueueSelectorTestBase,
+ public testing::WithParamInterface<TaskQueue::QueuePriority> {
+ public:
+ DisabledAntiStarvationLogicTaskQueueSelectorTest()
+ : TaskQueueSelectorTestBase(
+ SequenceManager::Settings::Builder()
+ .SetAntiStarvationLogicForPrioritiesDisabled(true)
+ .Build()) {}
+};
+
+TEST_P(DisabledAntiStarvationLogicTaskQueueSelectorTest,
+ TestStarvedByHigherPriorities) {
+ TaskQueue::QueuePriority priority_to_test = GetParam();
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ // Setting the queue priority to its current value causes a check to fail.
+ if (task_queues_[0]->GetQueuePriority() != priority_to_test) {
+ selector_.SetQueuePriority(task_queues_[0].get(), priority_to_test);
+ }
+
+ // Test that |priority_to_test| is starved by all higher priorities.
+ for (int higher_priority = static_cast<int>(priority_to_test) - 1;
+ higher_priority >= 0; higher_priority--) {
+ // Setting the queue priority to its current value causes a check to fail.
+ if (task_queues_[1]->GetQueuePriority() != higher_priority) {
+ selector_.SetQueuePriority(
+ task_queues_[1].get(),
+ static_cast<TaskQueue::QueuePriority>(higher_priority));
+ }
+
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = selector_.SelectWorkQueueToService();
+ ASSERT_THAT(chosen_work_queue, NotNull());
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ }
+}
+
+std::string GetPriorityTestNameSuffix(
+ const testing::TestParamInfo<TaskQueue::QueuePriority>& info) {
+ return TaskQueue::PriorityToString(info.param);
+}
+
+INSTANTIATE_TEST_SUITE_P(,
+ DisabledAntiStarvationLogicTaskQueueSelectorTest,
+ testing::Values(TaskQueue::kHighestPriority,
+ TaskQueue::kVeryHighPriority,
+ TaskQueue::kHighPriority,
+ TaskQueue::kNormalPriority,
+ TaskQueue::kLowPriority,
+ TaskQueue::kBestEffortPriority),
+ GetPriorityTestNameSuffix);
+
struct ChooseWithPriorityTestParam {
int delayed_task_enqueue_order;
int immediate_task_enqueue_order;
diff --git a/chromium/base/task/sequence_manager/tasks.cc b/chromium/base/task/sequence_manager/tasks.cc
index 10067628001..14bd306b947 100644
--- a/chromium/base/task/sequence_manager/tasks.cc
+++ b/chromium/base/task/sequence_manager/tasks.cc
@@ -9,8 +9,8 @@ namespace sequence_manager {
Task::Task(internal::PostedTask posted_task,
TimeTicks desired_run_time,
- internal::EnqueueOrder sequence_order,
- internal::EnqueueOrder enqueue_order,
+ EnqueueOrder sequence_order,
+ EnqueueOrder enqueue_order,
internal::WakeUpResolution resolution)
: PendingTask(posted_task.location,
std::move(posted_task.callback),
diff --git a/chromium/base/task/sequence_manager/tasks.h b/chromium/base/task/sequence_manager/tasks.h
index f9658a7a6d7..0c886c4b9ca 100644
--- a/chromium/base/task/sequence_manager/tasks.h
+++ b/chromium/base/task/sequence_manager/tasks.h
@@ -72,8 +72,8 @@ struct DelayedWakeUp {
struct BASE_EXPORT Task : public PendingTask {
Task(internal::PostedTask posted_task,
TimeTicks desired_run_time,
- internal::EnqueueOrder sequence_order,
- internal::EnqueueOrder enqueue_order = internal::EnqueueOrder(),
+ EnqueueOrder sequence_order,
+ EnqueueOrder enqueue_order = EnqueueOrder(),
internal::WakeUpResolution wake_up_resolution =
internal::WakeUpResolution::kLow);
@@ -83,12 +83,12 @@ struct BASE_EXPORT Task : public PendingTask {
// SequenceManager is particularly sensitive to enqueue order,
// so we have accessors for safety.
- internal::EnqueueOrder enqueue_order() const {
+ EnqueueOrder enqueue_order() const {
DCHECK(enqueue_order_);
return enqueue_order_;
}
- void set_enqueue_order(internal::EnqueueOrder enqueue_order) {
+ void set_enqueue_order(EnqueueOrder enqueue_order) {
DCHECK(!enqueue_order_);
enqueue_order_ = enqueue_order;
}
@@ -107,7 +107,7 @@ struct BASE_EXPORT Task : public PendingTask {
// is set when posted, but for delayed tasks it's not defined until they are
// enqueued. This is because otherwise delayed tasks could run before
// an immediate task posted after the delayed task.
- internal::EnqueueOrder enqueue_order_;
+ EnqueueOrder enqueue_order_;
};
} // namespace sequence_manager
diff --git a/chromium/base/task/sequence_manager/thread_controller_impl.cc b/chromium/base/task/sequence_manager/thread_controller_impl.cc
index 8921dfc879c..22bffdb8a5b 100644
--- a/chromium/base/task/sequence_manager/thread_controller_impl.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_impl.cc
@@ -32,8 +32,7 @@ ThreadControllerImpl::ThreadControllerImpl(
? funneled_sequence_manager->GetTaskRunner()
: nullptr),
time_source_(time_source),
- work_deduplicator_(associated_thread_),
- weak_factory_(this) {
+ work_deduplicator_(associated_thread_) {
if (task_runner_ || funneled_sequence_manager_)
work_deduplicator_.BindToCurrentThread();
immediate_do_work_closure_ =
@@ -164,7 +163,8 @@ void ThreadControllerImpl::WillQueueTask(PendingTask* pending_task,
}
void ThreadControllerImpl::DoWork(WorkType work_type) {
- TRACE_EVENT0("sequence_manager", "ThreadControllerImpl::DoWork");
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::DoWork");
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence_);
diff --git a/chromium/base/task/sequence_manager/thread_controller_impl.h b/chromium/base/task/sequence_manager/thread_controller_impl.h
index 9a7967bf077..cb16595c1ae 100644
--- a/chromium/base/task/sequence_manager/thread_controller_impl.h
+++ b/chromium/base/task/sequence_manager/thread_controller_impl.h
@@ -127,7 +127,7 @@ class BASE_EXPORT ThreadControllerImpl : public ThreadController,
bool default_task_runner_set_ = false;
#endif
- WeakPtrFactory<ThreadControllerImpl> weak_factory_;
+ WeakPtrFactory<ThreadControllerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
};
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
index 1886b5c82af..deeda0e4260 100644
--- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -330,22 +330,19 @@ bool ThreadControllerWithMessagePumpImpl::DoDelayedWork(
TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
LazyNow* continuation_lazy_now,
bool* ran_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
+ "ThreadControllerImpl::DoWork");
+
if (!main_thread_only().task_execution_allowed) {
if (main_thread_only().quit_runloop_after == TimeTicks::Max())
return TimeDelta::Max();
return main_thread_only().quit_runloop_after - continuation_lazy_now->Now();
}
- // Keep this in-sync with
- // third_party/catapult/tracing/tracing/extras/chrome/event_finder_utils.html
- // TODO(alexclarke): Rename this event to whatever we end up calling this
- // after the DoWork / DoDelayed work merge.
- TRACE_EVENT0("toplevel", "ThreadControllerImpl::RunTask");
-
DCHECK(main_thread_only().task_source);
for (int i = 0; i < main_thread_only().work_batch_size; i++) {
- Optional<PendingTask> task = main_thread_only().task_source->TakeTask();
+ Optional<Task> task = main_thread_only().task_source->TakeTask();
if (!task)
break;
@@ -364,8 +361,8 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
- TRACE_TASK_EXECUTION("ThreadController::Task", *task);
- task_annotator_.RunTask("ThreadController::Task", &*task);
+ TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
+ task_annotator_.RunTask("SequenceManager RunTask", &*task);
}
#if DCHECK_IS_ON()
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
index 3b8bf47fa1a..64eb8f7e74b 100644
--- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
@@ -79,12 +79,12 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
explicit FakeSequencedTaskSource(TickClock* clock) : clock_(clock) {}
~FakeSequencedTaskSource() override = default;
- Optional<PendingTask> TakeTask() override {
+ Optional<Task> TakeTask() override {
if (tasks_.empty())
return nullopt;
if (tasks_.front().delayed_run_time > clock_->NowTicks())
return nullopt;
- PendingTask task = std::move(tasks_.front());
+ Task task = std::move(tasks_.front());
tasks_.pop();
return task;
}
@@ -101,10 +101,13 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
return tasks_.front().delayed_run_time - lazy_now->Now();
}
- void AddTask(PendingTask task) {
- DCHECK(tasks_.empty() || task.delayed_run_time.is_null() ||
- tasks_.back().delayed_run_time < task.delayed_run_time);
- tasks_.push(std::move(task));
+ void AddTask(Location posted_from,
+ OnceClosure task,
+ TimeTicks delayed_run_time) {
+ DCHECK(tasks_.empty() || delayed_run_time.is_null() ||
+ tasks_.back().delayed_run_time < delayed_run_time);
+ tasks_.push(Task(internal::PostedTask(std::move(task), posted_from),
+ delayed_run_time, EnqueueOrder::FromIntForTesting(13)));
}
bool HasPendingHighResolutionTasks() override { return false; }
@@ -113,7 +116,7 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
private:
TickClock* clock_;
- std::queue<PendingTask> tasks_;
+ std::queue<Task> tasks_;
};
TimeTicks Seconds(int seconds) {
@@ -151,11 +154,11 @@ TEST_F(ThreadControllerWithMessagePumpTest, ScheduleDelayedWork) {
TimeTicks next_run_time;
MockCallback<OnceClosure> task1;
- task_source_.AddTask(PendingTask(FROM_HERE, task1.Get(), Seconds(10)));
+ task_source_.AddTask(FROM_HERE, task1.Get(), Seconds(10));
MockCallback<OnceClosure> task2;
- task_source_.AddTask(PendingTask(FROM_HERE, task2.Get(), TimeTicks()));
+ task_source_.AddTask(FROM_HERE, task2.Get(), TimeTicks());
MockCallback<OnceClosure> task3;
- task_source_.AddTask(PendingTask(FROM_HERE, task3.Get(), Seconds(20)));
+ task_source_.AddTask(FROM_HERE, task3.Get(), Seconds(20));
// Call a no-op DoWork. Expect that it doesn't do any work.
clock_.SetNowTicks(Seconds(5));
@@ -219,7 +222,7 @@ TEST_F(ThreadControllerWithMessagePumpTest, SetNextDelayedDoWork_CapAtOneDay) {
TEST_F(ThreadControllerWithMessagePumpTest, DelayedWork_CapAtOneDay) {
MockCallback<OnceClosure> task1;
- task_source_.AddTask(PendingTask(FROM_HERE, task1.Get(), Days(10)));
+ task_source_.AddTask(FROM_HERE, task1.Get(), Days(10));
TimeTicks next_run_time;
EXPECT_FALSE(thread_controller_.DoDelayedWork(&next_run_time));
@@ -228,7 +231,7 @@ TEST_F(ThreadControllerWithMessagePumpTest, DelayedWork_CapAtOneDay) {
TEST_F(ThreadControllerWithMessagePumpTest, DoWorkDoesntScheduleDelayedWork) {
MockCallback<OnceClosure> task1;
- task_source_.AddTask(PendingTask(FROM_HERE, task1.Get(), Seconds(10)));
+ task_source_.AddTask(FROM_HERE, task1.Get(), Seconds(10));
EXPECT_CALL(*message_pump_, ScheduleDelayedWork(_)).Times(0);
EXPECT_FALSE(thread_controller_.DoWork());
@@ -260,37 +263,34 @@ TEST_F(ThreadControllerWithMessagePumpTest, NestedExecution) {
log.push_back("exiting nested runloop");
}));
- task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task1");
- RunLoop().Run();
- },
- &log, &thread_controller_),
- TimeTicks()));
- task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task2");
- },
- &log, &thread_controller_),
- TimeTicks()));
- task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task3");
- },
- &log, &thread_controller_),
- TimeTicks()));
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task1");
+ RunLoop().Run();
+ },
+ &log, &thread_controller_),
+ TimeTicks());
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task2");
+ },
+ &log, &thread_controller_),
+ TimeTicks());
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task3");
+ },
+ &log, &thread_controller_),
+ TimeTicks());
EXPECT_TRUE(thread_controller_.IsTaskExecutionAllowed());
RunLoop().Run();
@@ -330,36 +330,34 @@ TEST_F(ThreadControllerWithMessagePumpTest,
}));
task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task1");
- RunLoop(RunLoop::Type::kNestableTasksAllowed).Run();
- },
- &log, &thread_controller_),
- TimeTicks()));
- task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task2");
- },
- &log, &thread_controller_),
- TimeTicks()));
- task_source_.AddTask(
- PendingTask(FROM_HERE,
- base::BindOnce(
- [](std::vector<std::string>* log,
- ThreadControllerForTest* controller) {
- EXPECT_FALSE(controller->IsTaskExecutionAllowed());
- log->push_back("task3");
- },
- &log, &thread_controller_),
- TimeTicks()));
+ FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task1");
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).Run();
+ },
+ &log, &thread_controller_),
+ TimeTicks());
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task2");
+ },
+ &log, &thread_controller_),
+ TimeTicks());
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce(
+ [](std::vector<std::string>* log,
+ ThreadControllerForTest* controller) {
+ EXPECT_FALSE(controller->IsTaskExecutionAllowed());
+ log->push_back("task3");
+ },
+ &log, &thread_controller_),
+ TimeTicks());
EXPECT_TRUE(thread_controller_.IsTaskExecutionAllowed());
RunLoop().Run();
@@ -381,12 +379,13 @@ TEST_F(ThreadControllerWithMessagePumpTest, ScheduleWorkFromDelayedTask) {
}));
EXPECT_CALL(*message_pump_, ScheduleWork());
- task_source_.AddTask(PendingTask(FROM_HERE, base::BindLambdaForTesting([&]() {
- // Triggers a ScheduleWork call.
- task_source_.AddTask(PendingTask(
- FROM_HERE, base::BindOnce([]() {})));
- }),
- TimeTicks()));
+ task_source_.AddTask(FROM_HERE, base::BindLambdaForTesting([&]() {
+ // Triggers a ScheduleWork call.
+ task_source_.AddTask(FROM_HERE,
+ base::BindOnce([]() {}),
+ base::TimeTicks());
+ }),
+ TimeTicks());
RunLoop().Run();
testing::Mock::VerifyAndClearExpectations(message_pump_);
@@ -406,7 +405,7 @@ TEST_F(ThreadControllerWithMessagePumpTest, SetDefaultTaskRunner) {
}
TEST_F(ThreadControllerWithMessagePumpTest, EnsureWorkScheduled) {
- task_source_.AddTask(PendingTask(FROM_HERE, DoNothing(), TimeTicks()));
+ task_source_.AddTask(FROM_HERE, DoNothing(), TimeTicks());
// Ensure that the first ScheduleWork() call results in the pump being called.
EXPECT_CALL(*message_pump_, ScheduleWork());
@@ -446,8 +445,8 @@ TEST_F(ThreadControllerWithMessagePumpTest, WorkBatching) {
}));
for (int i = 0; i < kBatchSize; i++) {
- task_source_.AddTask(PendingTask(
- FROM_HERE, BindLambdaForTesting([&] { task_count++; }), TimeTicks()));
+ task_source_.AddTask(FROM_HERE, BindLambdaForTesting([&] { task_count++; }),
+ TimeTicks());
}
RunLoop run_loop;
@@ -483,11 +482,11 @@ TEST_F(ThreadControllerWithMessagePumpTest, QuitInterruptsBatch) {
RunLoop run_loop;
for (int i = 0; i < kBatchSize; i++) {
- task_source_.AddTask(PendingTask(FROM_HERE, BindLambdaForTesting([&] {
- if (!task_count++)
- run_loop.Quit();
- }),
- TimeTicks()));
+ task_source_.AddTask(FROM_HERE, BindLambdaForTesting([&] {
+ if (!task_count++)
+ run_loop.Quit();
+ }),
+ TimeTicks());
}
run_loop.Run();
@@ -517,16 +516,16 @@ TEST_F(ThreadControllerWithMessagePumpTest, EarlyQuit) {
RunLoop run_loop;
- task_source_.AddTask(PendingTask(
+ task_source_.AddTask(
FROM_HERE,
base::BindOnce(
[](std::vector<std::string>* log) { log->push_back("task1"); }, &log),
- TimeTicks()));
- task_source_.AddTask(PendingTask(
+ TimeTicks());
+ task_source_.AddTask(
FROM_HERE,
base::BindOnce(
[](std::vector<std::string>* log) { log->push_back("task2"); }, &log),
- TimeTicks()));
+ TimeTicks());
run_loop.RunUntilIdle();
@@ -536,7 +535,7 @@ TEST_F(ThreadControllerWithMessagePumpTest, EarlyQuit) {
TEST_F(ThreadControllerWithMessagePumpTest, NativeNestedMessageLoop) {
bool did_run = false;
- task_source_.AddTask(PendingTask(
+ task_source_.AddTask(
FROM_HERE, BindLambdaForTesting([&] {
// Clear expectation set for the non-nested PostTask.
testing::Mock::VerifyAndClearExpectations(message_pump_);
@@ -555,7 +554,7 @@ TEST_F(ThreadControllerWithMessagePumpTest, NativeNestedMessageLoop) {
// Simulate a native callback which posts a task, this
// should now ask the pump to ScheduleWork();
- task_source_.AddTask(PendingTask(FROM_HERE, DoNothing(), TimeTicks()));
+ task_source_.AddTask(FROM_HERE, DoNothing(), TimeTicks());
EXPECT_CALL(*message_pump_, ScheduleWork());
thread_controller_.ScheduleWork();
testing::Mock::VerifyAndClearExpectations(message_pump_);
@@ -566,13 +565,13 @@ TEST_F(ThreadControllerWithMessagePumpTest, NativeNestedMessageLoop) {
// we've left the native loop. This should not ScheduleWork
// on the pump because the ThreadController will do that
// after this task finishes.
- task_source_.AddTask(PendingTask(FROM_HERE, DoNothing(), TimeTicks()));
+ task_source_.AddTask(FROM_HERE, DoNothing(), TimeTicks());
EXPECT_CALL(*message_pump_, ScheduleWork()).Times(0);
thread_controller_.ScheduleWork();
did_run = true;
}),
- TimeTicks()));
+ TimeTicks());
// Simulate a PostTask that enters a native nested message loop.
EXPECT_CALL(*message_pump_, ScheduleWork());
@@ -583,11 +582,11 @@ TEST_F(ThreadControllerWithMessagePumpTest, NativeNestedMessageLoop) {
TEST_F(ThreadControllerWithMessagePumpTest, RunWithTimeout) {
MockCallback<OnceClosure> task1;
- task_source_.AddTask(PendingTask(FROM_HERE, task1.Get(), Seconds(5)));
+ task_source_.AddTask(FROM_HERE, task1.Get(), Seconds(5));
MockCallback<OnceClosure> task2;
- task_source_.AddTask(PendingTask(FROM_HERE, task2.Get(), Seconds(10)));
+ task_source_.AddTask(FROM_HERE, task2.Get(), Seconds(10));
MockCallback<OnceClosure> task3;
- task_source_.AddTask(PendingTask(FROM_HERE, task3.Get(), Seconds(20)));
+ task_source_.AddTask(FROM_HERE, task3.Get(), Seconds(20));
EXPECT_CALL(*message_pump_, Run(_))
.WillOnce(Invoke([&](MessagePump::Delegate*) {
diff --git a/chromium/base/task/sequence_manager/time_domain.h b/chromium/base/task/sequence_manager/time_domain.h
index 152ebedc050..cf4ca662ecc 100644
--- a/chromium/base/task/sequence_manager/time_domain.h
+++ b/chromium/base/task/sequence_manager/time_domain.h
@@ -47,10 +47,10 @@ class BASE_EXPORT TimeDomain {
// TODO(alexclarke): Make this main thread only.
virtual TimeTicks Now() const = 0;
- // Computes the delay until the time when TimeDomain needs to wake up
- // some TaskQueue. Specific time domains (e.g. virtual or throttled) may
- // return TimeDelata() if TaskQueues have any delayed tasks they deem
- // eligible to run. It's also allowed to advance time domains's internal
+ // Computes the delay until the time when TimeDomain needs to wake up some
+ // TaskQueue on the main thread. Specific time domains (e.g. virtual or
+ // throttled) may return TimeDelta() if TaskQueues have any delayed tasks they
+ // deem eligible to run. It's also allowed to advance time domains's internal
// clock when this method is called.
// Can be called from main thread only.
// NOTE: |lazy_now| and the return value are in the SequenceManager's time.
diff --git a/chromium/base/task/sequence_manager/work_queue_unittest.cc b/chromium/base/task/sequence_manager/work_queue_unittest.cc
index 5dba2b510e1..721f60fda7d 100644
--- a/chromium/base/task/sequence_manager/work_queue_unittest.cc
+++ b/chromium/base/task/sequence_manager/work_queue_unittest.cc
@@ -31,11 +31,11 @@ class MockObserver : public WorkQueueSets::Observer {
void NopTask() {}
struct Cancelable {
- Cancelable() : weak_ptr_factory(this) {}
+ Cancelable() {}
void NopTask() {}
- WeakPtrFactory<Cancelable> weak_ptr_factory;
+ WeakPtrFactory<Cancelable> weak_ptr_factory{this};
};
class RealTimeDomainFake : public RealTimeDomain {
diff --git a/chromium/base/task/single_thread_task_executor.cc b/chromium/base/task/single_thread_task_executor.cc
index c94b0804258..2c524706fc6 100644
--- a/chromium/base/task/single_thread_task_executor.cc
+++ b/chromium/base/task/single_thread_task_executor.cc
@@ -6,20 +6,29 @@
#include "base/message_loop/message_pump.h"
#include "base/task/sequence_manager/sequence_manager.h"
+#include "base/task/sequence_manager/sequence_manager_impl.h"
+#include "build/build_config.h"
namespace base {
SingleThreadTaskExecutor::SingleThreadTaskExecutor(MessagePump::Type type)
- : sequence_manager_(
- sequence_manager::CreateSequenceManagerOnCurrentThreadWithPump(
- MessagePump::Create(type),
- sequence_manager::SequenceManager::Settings::Builder()
- .SetMessagePumpType(type)
- .Build())),
+ : sequence_manager_(sequence_manager::CreateUnboundSequenceManager(
+ sequence_manager::SequenceManager::Settings::Builder()
+ .SetMessagePumpType(type)
+ .Build())),
default_task_queue_(sequence_manager_->CreateTaskQueue(
sequence_manager::TaskQueue::Spec("default_tq"))),
type_(type) {
sequence_manager_->SetDefaultTaskRunner(default_task_queue_->task_runner());
+ sequence_manager_->BindToMessagePump(MessagePump::Create(type));
+
+#if defined(OS_IOS)
+ if (type == MessagePump::Type::UI) {
+ static_cast<sequence_manager::internal::SequenceManagerImpl*>(
+ sequence_manager_.get())
+ ->AttachToMessagePump();
+ }
+#endif
}
SingleThreadTaskExecutor::~SingleThreadTaskExecutor() = default;
diff --git a/chromium/base/task/task_executor.h b/chromium/base/task/task_executor.h
index 70d8abca0b2..b4e79e14c9a 100644
--- a/chromium/base/task/task_executor.h
+++ b/chromium/base/task/task_executor.h
@@ -30,27 +30,26 @@ class BASE_EXPORT TaskExecutor {
// Posts |task| with a |delay| and specific |traits|. |delay| can be zero. For
// one off tasks that don't require a TaskRunner. Returns false if the task
// definitely won't run because of current shutdown state.
- virtual bool PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay) = 0;
+ virtual bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) = 0;
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
- virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ virtual scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits) = 0;
// Returns a SequencedTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits|. Tasks run one at a time in posting order.
- virtual scoped_refptr<SequencedTaskRunner>
- CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+ virtual scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
+ const TaskTraits& traits) = 0;
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits|. Tasks run on a single thread in posting
// order. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used.
- virtual scoped_refptr<SingleThreadTaskRunner>
- CreateSingleThreadTaskRunnerWithTraits(
+ virtual scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) = 0;
@@ -60,8 +59,7 @@ class BASE_EXPORT TaskExecutor {
// run in the same Single-Threaded Apartment in posting order for the returned
// SingleThreadTaskRunner. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used.
- virtual scoped_refptr<SingleThreadTaskRunner>
- CreateCOMSTATaskRunnerWithTraits(
+ virtual scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) = 0;
#endif // defined(OS_WIN)
diff --git a/chromium/base/task/task_features.cc b/chromium/base/task/task_features.cc
index 97428587e83..55bba7e3515 100644
--- a/chromium/base/task/task_features.cc
+++ b/chromium/base/task/task_features.cc
@@ -28,4 +28,7 @@ const Feature kUseNativeThreadPool = {"UseNativeThreadPool",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif
+const Feature kUseFiveMinutesThreadReclaimTime = {
+ "UseFiveMinutesThreadReclaimTime", base::FEATURE_DISABLED_BY_DEFAULT};
+
} // namespace base
diff --git a/chromium/base/task/task_features.h b/chromium/base/task/task_features.h
index ac1ae975f9e..be1f70a5808 100644
--- a/chromium/base/task/task_features.h
+++ b/chromium/base/task/task_features.h
@@ -37,6 +37,10 @@ extern const BASE_EXPORT Feature kMayBlockWithoutDelay;
extern const BASE_EXPORT Feature kUseNativeThreadPool;
#endif
+// Whether threads in the ThreadPool should be reclaimed after being idle for 5
+// minutes, instead of 30 seconds.
+extern const BASE_EXPORT Feature kUseFiveMinutesThreadReclaimTime;
+
} // namespace base
#endif // BASE_TASK_TASK_FEATURES_H_
diff --git a/chromium/base/task/task_traits.h b/chromium/base/task/task_traits.h
index 3a11711ad8d..cd3f601b4a6 100644
--- a/chromium/base/task/task_traits.h
+++ b/chromium/base/task/task_traits.h
@@ -41,15 +41,17 @@ enum class TaskPriority : uint8_t {
// - Reporting metrics.
// - Persisting data to disk.
// - Loading data that is required for a potential future user interaction
- // (Note: Use CreateUpdateableSequencedTaskRunnerWithTraits() to increase
+ // (Note: Use CreateUpdateableSequencedTaskRunner() to increase
// the priority when that user interactions happens).
BEST_EFFORT = LOWEST,
- // This task affects UI but it is not an immediate response to a user
+ // The result of this task is visible to the user (in the UI or as a
+ // side-effect on the system) but it is not an immediate response to a user
// interaction.
//
// Examples:
// - Updating the UI to reflect progress on a long task.
+ // - Downloading a file requested by the user.
// - Loading an image that is displayed in the UI but is non-critical.
USER_VISIBLE,
@@ -64,6 +66,8 @@ enum class TaskPriority : uint8_t {
HIGHEST = USER_BLOCKING
};
+using TaskPriorityType = std::underlying_type<TaskPriority>::type;
+
// Valid shutdown behaviors supported by the thread pool.
enum class TaskShutdownBehavior : uint8_t {
// Tasks posted with this mode which have not started executing before
@@ -225,24 +229,26 @@ class BASE_EXPORT TaskTraits {
trait_helpers::AreValidTraits<ValidTrait, ArgTypes...>{},
args...)),
priority_(
- trait_helpers::GetEnum<TaskPriority, TaskPriority::USER_BLOCKING>(
- args...)),
+ static_cast<uint8_t>(
+ trait_helpers::GetEnum<TaskPriority,
+ TaskPriority::USER_BLOCKING>(args...)) |
+ (trait_helpers::HasTrait<TaskPriority>(args...) ? kIsExplicitFlag
+ : 0)),
shutdown_behavior_(
- trait_helpers::HasTrait<TaskShutdownBehavior>(args...)
- ? static_cast<uint8_t>(
- trait_helpers::GetEnum<
- TaskShutdownBehavior,
- TaskShutdownBehavior::SKIP_ON_SHUTDOWN>(args...))
- : kUnspecified),
+ static_cast<uint8_t>(
+ trait_helpers::GetEnum<TaskShutdownBehavior,
+ TaskShutdownBehavior::SKIP_ON_SHUTDOWN>(
+ args...)) |
+ (trait_helpers::HasTrait<TaskShutdownBehavior>(args...)
+ ? kIsExplicitFlag
+ : 0)),
thread_policy_(
- trait_helpers::HasTrait<ThreadPolicy>(args...)
- ? static_cast<uint8_t>(
- trait_helpers::GetEnum<ThreadPolicy,
- ThreadPolicy::PREFER_BACKGROUND>(
- args...))
- : kUnspecified),
- priority_set_explicitly_(
- trait_helpers::HasTrait<TaskPriority>(args...)),
+ static_cast<uint8_t>(
+ trait_helpers::GetEnum<ThreadPolicy,
+ ThreadPolicy::PREFER_BACKGROUND>(
+ args...)) |
+ (trait_helpers::HasTrait<ThreadPolicy>(args...) ? kIsExplicitFlag
+ : 0)),
may_block_(trait_helpers::HasTrait<MayBlock>(args...)),
with_base_sync_primitives_(
trait_helpers::HasTrait<WithBaseSyncPrimitives>(args...)),
@@ -253,12 +259,11 @@ class BASE_EXPORT TaskTraits {
// TODO(eseckler): Default the comparison operator once C++20 arrives.
bool operator==(const TaskTraits& other) const {
- static_assert(sizeof(TaskTraits) == 16,
+ static_assert(sizeof(TaskTraits) == 15,
"Update comparison operator when TaskTraits change");
return extension_ == other.extension_ && priority_ == other.priority_ &&
shutdown_behavior_ == other.shutdown_behavior_ &&
thread_policy_ == other.thread_policy_ &&
- priority_set_explicitly_ == other.priority_set_explicitly_ &&
may_block_ == other.may_block_ &&
with_base_sync_primitives_ == other.with_base_sync_primitives_ &&
use_thread_pool_ == other.use_thread_pool_;
@@ -266,47 +271,45 @@ class BASE_EXPORT TaskTraits {
// Sets the priority of tasks with these traits to |priority|.
void UpdatePriority(TaskPriority priority) {
- priority_ = priority;
- priority_set_explicitly_ = true;
+ priority_ = static_cast<uint8_t>(priority) | kIsExplicitFlag;
}
// Sets the priority to |priority| if it wasn't explicitly set before.
void InheritPriority(TaskPriority priority) {
- if (priority_set_explicitly_)
+ if (priority_set_explicitly())
return;
- priority_ = priority;
+ priority_ = static_cast<uint8_t>(priority);
}
// Returns true if the priority was set explicitly.
constexpr bool priority_set_explicitly() const {
- return priority_set_explicitly_;
+ return priority_ & kIsExplicitFlag;
}
// Returns the priority of tasks with these traits.
- constexpr TaskPriority priority() const { return priority_; }
+ constexpr TaskPriority priority() const {
+ return static_cast<TaskPriority>(priority_ & ~kIsExplicitFlag);
+ }
// Returns true if the shutdown behavior was set explicitly.
constexpr bool shutdown_behavior_set_explicitly() const {
- return shutdown_behavior_ != kUnspecified;
+ return shutdown_behavior_ & kIsExplicitFlag;
}
// Returns the shutdown behavior of tasks with these traits.
constexpr TaskShutdownBehavior shutdown_behavior() const {
- return shutdown_behavior_set_explicitly()
- ? static_cast<TaskShutdownBehavior>(shutdown_behavior_)
- : TaskShutdownBehavior::SKIP_ON_SHUTDOWN;
+ return static_cast<TaskShutdownBehavior>(shutdown_behavior_ &
+ ~kIsExplicitFlag);
}
// Returns true if the thread policy was set explicitly.
constexpr bool thread_policy_set_explicitly() const {
- return thread_policy_ != kUnspecified;
+ return thread_policy_ & kIsExplicitFlag;
}
// Returns the thread policy of tasks with these traits.
constexpr ThreadPolicy thread_policy() const {
- return thread_policy_set_explicitly()
- ? static_cast<ThreadPolicy>(thread_policy_)
- : ThreadPolicy::PREFER_BACKGROUND;
+ return static_cast<ThreadPolicy>(thread_policy_ & ~kIsExplicitFlag);
}
// Returns true if tasks with these traits may block.
@@ -340,24 +343,26 @@ class BASE_EXPORT TaskTraits {
bool use_thread_pool,
TaskTraitsExtensionStorage extension)
: extension_(extension),
- priority_(priority),
- shutdown_behavior_(kUnspecified),
- thread_policy_(kUnspecified),
- priority_set_explicitly_(priority_set_explicitly),
+ priority_(static_cast<uint8_t>(priority) |
+ (priority_set_explicitly ? kIsExplicitFlag : 0)),
+ shutdown_behavior_(
+ static_cast<uint8_t>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN)),
+ thread_policy_(static_cast<uint8_t>(ThreadPolicy::PREFER_BACKGROUND)),
may_block_(may_block),
with_base_sync_primitives_(false),
use_thread_pool_(use_thread_pool) {
- static_assert(sizeof(TaskTraits) == 16, "Keep this constructor up to date");
+ static_assert(sizeof(TaskTraits) == 15, "Keep this constructor up to date");
}
- static constexpr uint8_t kUnspecified = 0xFF;
+ // This bit is set in |priority_|, |shutdown_behavior_| and |thread_policy_|
+ // when the value was set explicitly.
+ static constexpr uint8_t kIsExplicitFlag = 0x80;
// Ordered for packing.
TaskTraitsExtensionStorage extension_;
- TaskPriority priority_;
+ uint8_t priority_;
uint8_t shutdown_behavior_;
uint8_t thread_policy_;
- bool priority_set_explicitly_;
bool may_block_;
bool with_base_sync_primitives_;
bool use_thread_pool_;
diff --git a/chromium/base/task/task_traits_extension.h b/chromium/base/task/task_traits_extension.h
index 80ed4c23545..c731758d3e3 100644
--- a/chromium/base/task/task_traits_extension.h
+++ b/chromium/base/task/task_traits_extension.h
@@ -120,7 +120,7 @@ namespace base {
// my_embedder::MyExtensionTrait::kValueA};
//
// // Extension traits can also be specified directly when posting a task.
-// base::PostTaskWithTraits(FROM_HERE,
+// base::PostTask(FROM_HERE,
// {my_embedder::MyExtensionTrait::kValueB},
// base::BindOnce(...));
diff --git a/chromium/base/task/task_traits_extension_unittest.cc b/chromium/base/task/task_traits_extension_unittest.cc
index 12de5c6c2f2..ac54144c839 100644
--- a/chromium/base/task/task_traits_extension_unittest.cc
+++ b/chromium/base/task/task_traits_extension_unittest.cc
@@ -10,14 +10,14 @@
namespace base {
TEST(TaskTraitsExtensionTest, NoExtension) {
- constexpr TaskTraits traits = {};
+ constexpr TaskTraits traits = {ThreadPool()};
EXPECT_EQ(traits.extension_id(),
TaskTraitsExtensionStorage::kInvalidExtensionId);
}
TEST(TaskTraitsExtensionTest, CreateWithOneExtensionTrait) {
- constexpr TaskTraits traits = {TestExtensionEnumTrait::kB};
+ constexpr TaskTraits traits = {ThreadPool(), TestExtensionEnumTrait::kB};
EXPECT_EQ(traits.GetExtension<TestTaskTraitsExtension>().enum_trait(),
TestExtensionEnumTrait::kB);
@@ -25,7 +25,7 @@ TEST(TaskTraitsExtensionTest, CreateWithOneExtensionTrait) {
}
TEST(TaskTraitsExtensionTest, CreateWithMultipleExtensionTraits) {
- constexpr TaskTraits traits = {TestExtensionEnumTrait::kB,
+ constexpr TaskTraits traits = {ThreadPool(), TestExtensionEnumTrait::kB,
TestExtensionBoolTrait()};
EXPECT_EQ(traits.GetExtension<TestTaskTraitsExtension>().enum_trait(),
@@ -34,7 +34,7 @@ TEST(TaskTraitsExtensionTest, CreateWithMultipleExtensionTraits) {
}
TEST(TaskTraitsExtensionTest, CreateWithBaseAndExtensionTraits) {
- constexpr TaskTraits traits = {TaskPriority::USER_BLOCKING,
+ constexpr TaskTraits traits = {ThreadPool(), TaskPriority::USER_BLOCKING,
TestExtensionEnumTrait::kC,
TestExtensionBoolTrait()};
diff --git a/chromium/base/task/task_traits_unittest.cc b/chromium/base/task/task_traits_unittest.cc
index 83c3cad69dc..5b59de8d535 100644
--- a/chromium/base/task/task_traits_unittest.cc
+++ b/chromium/base/task/task_traits_unittest.cc
@@ -9,7 +9,7 @@
namespace base {
TEST(TaskTraitsTest, Default) {
- constexpr TaskTraits traits = {};
+ constexpr TaskTraits traits = {ThreadPool()};
EXPECT_FALSE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::USER_BLOCKING, traits.priority());
EXPECT_FALSE(traits.shutdown_behavior_set_explicitly());
@@ -21,7 +21,7 @@ TEST(TaskTraitsTest, Default) {
}
TEST(TaskTraitsTest, TaskPriority) {
- constexpr TaskTraits traits = {TaskPriority::BEST_EFFORT};
+ constexpr TaskTraits traits = {ThreadPool(), TaskPriority::BEST_EFFORT};
EXPECT_TRUE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::BEST_EFFORT, traits.priority());
EXPECT_FALSE(traits.shutdown_behavior_set_explicitly());
@@ -33,7 +33,8 @@ TEST(TaskTraitsTest, TaskPriority) {
}
TEST(TaskTraitsTest, TaskShutdownBehavior) {
- constexpr TaskTraits traits = {TaskShutdownBehavior::BLOCK_SHUTDOWN};
+ constexpr TaskTraits traits = {ThreadPool(),
+ TaskShutdownBehavior::BLOCK_SHUTDOWN};
EXPECT_FALSE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::USER_BLOCKING, traits.priority());
EXPECT_TRUE(traits.shutdown_behavior_set_explicitly());
@@ -45,7 +46,8 @@ TEST(TaskTraitsTest, TaskShutdownBehavior) {
}
TEST(TaskTraitsTest, ThreadPolicy) {
- constexpr TaskTraits traits = {ThreadPolicy::MUST_USE_FOREGROUND};
+ constexpr TaskTraits traits = {ThreadPool(),
+ ThreadPolicy::MUST_USE_FOREGROUND};
EXPECT_FALSE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::USER_BLOCKING, traits.priority());
EXPECT_FALSE(traits.shutdown_behavior_set_explicitly());
@@ -57,7 +59,7 @@ TEST(TaskTraitsTest, ThreadPolicy) {
}
TEST(TaskTraitsTest, MayBlock) {
- constexpr TaskTraits traits = {MayBlock()};
+ constexpr TaskTraits traits = {ThreadPool(), MayBlock()};
EXPECT_FALSE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::USER_BLOCKING, traits.priority());
EXPECT_FALSE(traits.shutdown_behavior_set_explicitly());
@@ -69,7 +71,7 @@ TEST(TaskTraitsTest, MayBlock) {
}
TEST(TaskTraitsTest, WithBaseSyncPrimitives) {
- constexpr TaskTraits traits = {WithBaseSyncPrimitives()};
+ constexpr TaskTraits traits = {ThreadPool(), WithBaseSyncPrimitives()};
EXPECT_FALSE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::USER_BLOCKING, traits.priority());
EXPECT_FALSE(traits.shutdown_behavior_set_explicitly());
@@ -80,10 +82,47 @@ TEST(TaskTraitsTest, WithBaseSyncPrimitives) {
EXPECT_TRUE(traits.with_base_sync_primitives());
}
+TEST(TaskTraitsTest, UpdatePriority) {
+ {
+ TaskTraits traits = {ThreadPool()};
+ EXPECT_FALSE(traits.priority_set_explicitly());
+ traits.UpdatePriority(TaskPriority::BEST_EFFORT);
+ EXPECT_EQ(TaskPriority::BEST_EFFORT, traits.priority());
+ EXPECT_TRUE(traits.priority_set_explicitly());
+ }
+
+ {
+ TaskTraits traits = {ThreadPool(), TaskPriority::USER_VISIBLE};
+ EXPECT_TRUE(traits.priority_set_explicitly());
+ traits.UpdatePriority(TaskPriority::BEST_EFFORT);
+ EXPECT_EQ(TaskPriority::BEST_EFFORT, traits.priority());
+ EXPECT_TRUE(traits.priority_set_explicitly());
+ }
+}
+
+TEST(TaskTraitsTest, InheritPriority) {
+ {
+ TaskTraits traits = {ThreadPool()};
+ traits.InheritPriority(TaskPriority::BEST_EFFORT);
+ EXPECT_EQ(TaskPriority::BEST_EFFORT, traits.priority());
+ EXPECT_FALSE(traits.priority_set_explicitly());
+ }
+
+ {
+ TaskTraits traits = {ThreadPool(), TaskPriority::USER_VISIBLE};
+ traits.InheritPriority(TaskPriority::BEST_EFFORT);
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, traits.priority());
+ EXPECT_TRUE(traits.priority_set_explicitly());
+ }
+}
+
TEST(TaskTraitsTest, MultipleTraits) {
- constexpr TaskTraits traits = {
- TaskPriority::BEST_EFFORT, TaskShutdownBehavior::BLOCK_SHUTDOWN,
- ThreadPolicy::MUST_USE_FOREGROUND, MayBlock(), WithBaseSyncPrimitives()};
+ constexpr TaskTraits traits = {ThreadPool(),
+ TaskPriority::BEST_EFFORT,
+ TaskShutdownBehavior::BLOCK_SHUTDOWN,
+ ThreadPolicy::MUST_USE_FOREGROUND,
+ MayBlock(),
+ WithBaseSyncPrimitives()};
EXPECT_TRUE(traits.priority_set_explicitly());
EXPECT_EQ(TaskPriority::BEST_EFFORT, traits.priority());
EXPECT_TRUE(traits.shutdown_behavior_set_explicitly());
@@ -95,9 +134,12 @@ TEST(TaskTraitsTest, MultipleTraits) {
}
TEST(TaskTraitsTest, Copy) {
- constexpr TaskTraits traits = {
- TaskPriority::BEST_EFFORT, TaskShutdownBehavior::BLOCK_SHUTDOWN,
- ThreadPolicy::MUST_USE_FOREGROUND, MayBlock(), WithBaseSyncPrimitives()};
+ constexpr TaskTraits traits = {ThreadPool(),
+ TaskPriority::BEST_EFFORT,
+ TaskShutdownBehavior::BLOCK_SHUTDOWN,
+ ThreadPolicy::MUST_USE_FOREGROUND,
+ MayBlock(),
+ WithBaseSyncPrimitives()};
constexpr TaskTraits traits_copy(traits);
EXPECT_EQ(traits, traits_copy);
diff --git a/chromium/base/task/thread_pool/delayed_task_manager.cc b/chromium/base/task/thread_pool/delayed_task_manager.cc
index 9807d572012..a2e92a20124 100644
--- a/chromium/base/task/thread_pool/delayed_task_manager.cc
+++ b/chromium/base/task/thread_pool/delayed_task_manager.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/task/post_task.h"
#include "base/task/thread_pool/task.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
#include "base/task_runner.h"
namespace base {
@@ -46,14 +47,10 @@ void DelayedTaskManager::DelayedTask::SetScheduled() {
scheduled_ = true;
}
-DelayedTaskManager::DelayedTaskManager(
- std::unique_ptr<const TickClock> tick_clock)
+DelayedTaskManager::DelayedTaskManager()
: process_ripe_tasks_closure_(
BindRepeating(&DelayedTaskManager::ProcessRipeTasks,
- Unretained(this))),
- tick_clock_(std::move(tick_clock)) {
- DCHECK(tick_clock_);
-}
+ Unretained(this))) {}
DelayedTaskManager::~DelayedTaskManager() = default;
@@ -101,7 +98,7 @@ void DelayedTaskManager::ProcessRipeTasks() {
{
CheckedAutoLock auto_lock(queue_lock_);
- const TimeTicks now = tick_clock_->NowTicks();
+ const TimeTicks now = ThreadPoolClock::Now();
while (!delayed_task_queue_.empty() &&
delayed_task_queue_.Min().task.delayed_run_time <= now) {
// The const_cast on top is okay since the DelayedTask is
@@ -120,6 +117,13 @@ void DelayedTaskManager::ProcessRipeTasks() {
}
}
+Optional<TimeTicks> DelayedTaskManager::NextScheduledRunTime() const {
+ CheckedAutoLock auto_lock(queue_lock_);
+ if (delayed_task_queue_.empty())
+ return nullopt;
+ return delayed_task_queue_.Min().task.delayed_run_time;
+}
+
TimeTicks DelayedTaskManager::GetTimeToScheduleProcessRipeTasksLockRequired() {
queue_lock_.AssertAcquired();
if (delayed_task_queue_.empty())
@@ -139,7 +143,7 @@ void DelayedTaskManager::ScheduleProcessRipeTasksOnServiceThread(
DCHECK(!next_delayed_task_run_time.is_null());
if (next_delayed_task_run_time.is_max())
return;
- const TimeTicks now = tick_clock_->NowTicks();
+ const TimeTicks now = ThreadPoolClock::Now();
TimeDelta delay = std::max(TimeDelta(), next_delayed_task_run_time - now);
service_thread_task_runner_->PostDelayedTask(
FROM_HERE, process_ripe_tasks_closure_, delay);
diff --git a/chromium/base/task/thread_pool/delayed_task_manager.h b/chromium/base/task/thread_pool/delayed_task_manager.h
index c5cb475cc78..f829068b146 100644
--- a/chromium/base/task/thread_pool/delayed_task_manager.h
+++ b/chromium/base/task/thread_pool/delayed_task_manager.h
@@ -13,12 +13,12 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
+#include "base/optional.h"
#include "base/synchronization/atomic_flag.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/thread_pool/task.h"
-#include "base/time/default_tick_clock.h"
-#include "base/time/tick_clock.h"
+#include "base/thread_annotations.h"
namespace base {
@@ -34,9 +34,7 @@ class BASE_EXPORT DelayedTaskManager {
// Posts |task| for execution immediately.
using PostTaskNowCallback = OnceCallback<void(Task task)>;
- // |tick_clock| can be specified for testing.
- DelayedTaskManager(std::unique_ptr<const TickClock> tick_clock =
- std::make_unique<DefaultTickClock>());
+ DelayedTaskManager();
~DelayedTaskManager();
// Starts the delayed task manager, allowing past and future tasks to be
@@ -52,6 +50,12 @@ class BASE_EXPORT DelayedTaskManager {
PostTaskNowCallback post_task_now_callback,
scoped_refptr<TaskRunner> task_runner);
+ // Pop and post all the ripe tasks in the delayed task queue.
+ void ProcessRipeTasks();
+
+ // Returns the |delayed_run_time| of the next scheduled task, if any.
+ Optional<TimeTicks> NextScheduledRunTime() const;
+
private:
struct DelayedTask {
DelayedTask();
@@ -89,13 +93,11 @@ class BASE_EXPORT DelayedTaskManager {
DISALLOW_COPY_AND_ASSIGN(DelayedTask);
};
- // Pop and post all the ripe tasks in the delayed task queue.
- void ProcessRipeTasks();
-
// Get the time at which to schedule the next |ProcessRipeTasks()| execution,
// or TimeTicks::Max() if none needs to be scheduled (i.e. no task, or next
// task already scheduled).
- TimeTicks GetTimeToScheduleProcessRipeTasksLockRequired();
+ TimeTicks GetTimeToScheduleProcessRipeTasksLockRequired()
+ EXCLUSIVE_LOCKS_REQUIRED(queue_lock_);
// Schedule |ProcessRipeTasks()| on the service thread to be executed at the
// given |process_ripe_tasks_time|, provided the given time is not
@@ -105,18 +107,16 @@ class BASE_EXPORT DelayedTaskManager {
const RepeatingClosure process_ripe_tasks_closure_;
- const std::unique_ptr<const TickClock> tick_clock_;
-
- scoped_refptr<TaskRunner> service_thread_task_runner_;
-
- IntrusiveHeap<DelayedTask> delayed_task_queue_;
-
// Synchronizes access to |delayed_task_queue_| and the setting of
- // |service_thread_task_runner|. Once |service_thread_task_runner_| is set,
+ // |service_thread_task_runner_|. Once |service_thread_task_runner_| is set,
// it is never modified. It is therefore safe to access
// |service_thread_task_runner_| without synchronization once it is observed
// that it is non-null.
- CheckedLock queue_lock_;
+ mutable CheckedLock queue_lock_;
+
+ scoped_refptr<TaskRunner> service_thread_task_runner_;
+
+ IntrusiveHeap<DelayedTask> delayed_task_queue_ GUARDED_BY(queue_lock_);
DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
};
diff --git a/chromium/base/task/thread_pool/delayed_task_manager_unittest.cc b/chromium/base/task/thread_pool/delayed_task_manager_unittest.cc
index e2f10f08f7b..af29027099c 100644
--- a/chromium/base/task/thread_pool/delayed_task_manager_unittest.cc
+++ b/chromium/base/task/thread_pool/delayed_task_manager_unittest.cc
@@ -12,6 +12,7 @@
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/task/thread_pool/task.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
#include "base/test/bind_test_util.h"
#include "base/test/test_mock_time_task_runner.h"
#include "base/threading/thread.h"
@@ -47,20 +48,17 @@ Task ConstructMockedTask(testing::StrictMock<MockTask>& mock_task,
class ThreadPoolDelayedTaskManagerTest : public testing::Test {
protected:
- ThreadPoolDelayedTaskManagerTest()
- : delayed_task_manager_(
- service_thread_task_runner_->DeprecatedGetMockTickClock()),
- task_(ConstructMockedTask(
- mock_task_,
- service_thread_task_runner_->GetMockTickClock()->NowTicks(),
- kLongDelay)) {}
+ ThreadPoolDelayedTaskManagerTest() = default;
~ThreadPoolDelayedTaskManagerTest() override = default;
const scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner_ =
MakeRefCounted<TestMockTimeTaskRunner>();
+ ThreadPoolClock thread_pool_clock_{
+ service_thread_task_runner_->GetMockTickClock()};
DelayedTaskManager delayed_task_manager_;
testing::StrictMock<MockTask> mock_task_;
- Task task_;
+ Task task_{
+ ConstructMockedTask(mock_task_, ThreadPoolClock::Now(), kLongDelay)};
private:
DISALLOW_COPY_AND_ASSIGN(ThreadPoolDelayedTaskManagerTest);
@@ -156,19 +154,16 @@ TEST_F(ThreadPoolDelayedTaskManagerTest, DelayedTasksRunAfterDelay) {
delayed_task_manager_.Start(service_thread_task_runner_);
testing::StrictMock<MockTask> mock_task_a;
- Task task_a = ConstructMockedTask(
- mock_task_a, service_thread_task_runner_->GetMockTickClock()->NowTicks(),
- TimeDelta::FromHours(1));
+ Task task_a = ConstructMockedTask(mock_task_a, ThreadPoolClock::Now(),
+ TimeDelta::FromHours(1));
testing::StrictMock<MockTask> mock_task_b;
- Task task_b = ConstructMockedTask(
- mock_task_b, service_thread_task_runner_->GetMockTickClock()->NowTicks(),
- TimeDelta::FromHours(2));
+ Task task_b = ConstructMockedTask(mock_task_b, ThreadPoolClock::Now(),
+ TimeDelta::FromHours(2));
testing::StrictMock<MockTask> mock_task_c;
- Task task_c = ConstructMockedTask(
- mock_task_c, service_thread_task_runner_->GetMockTickClock()->NowTicks(),
- TimeDelta::FromHours(1));
+ Task task_c = ConstructMockedTask(mock_task_c, ThreadPoolClock::Now(),
+ TimeDelta::FromHours(1));
// Send tasks to the DelayedTaskManager.
delayed_task_manager_.AddDelayedTask(std::move(task_a), BindOnce(&RunTask),
diff --git a/chromium/base/task/thread_pool/job_task_source.cc b/chromium/base/task/thread_pool/job_task_source.cc
new file mode 100644
index 00000000000..571b77550d5
--- /dev/null
+++ b/chromium/base/task/thread_pool/job_task_source.cc
@@ -0,0 +1,90 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/job_task_source.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/memory/ptr_util.h"
+#include "base/task/task_features.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+JobTaskSource::JobTaskSource(const Location& from_here,
+ base::RepeatingClosure worker_task,
+ const TaskTraits& traits)
+ : TaskSource(traits, nullptr, TaskSourceExecutionMode::kJob),
+ from_here_(from_here),
+ worker_task_(std::move(worker_task)),
+ queue_time_(ThreadPoolClock::Now()) {}
+
+JobTaskSource::~JobTaskSource() = default;
+
+ExecutionEnvironment JobTaskSource::GetExecutionEnvironment() {
+ return {SequenceToken::Create(), nullptr};
+}
+
+TaskSource::RunIntent JobTaskSource::WillRunTask() {
+ const size_t max_concurrency = GetMaxConcurrency();
+ const size_t worker_count_initial =
+ worker_count_.load(std::memory_order_relaxed);
+ // Don't allow this worker to run the task if either:
+ // A) |worker_count_| is already at |max_concurrency|.
+ // B) |max_concurrency| was lowered below or to |worker_count_|.
+ if (worker_count_initial >= max_concurrency) {
+ // The caller receives an invalid RunIntent and should skip this TaskSource.
+ return RunIntent();
+ }
+ const size_t worker_count_before_add =
+ worker_count_.fetch_add(1, std::memory_order_relaxed);
+ // WillRunTask() has external synchronization to prevent concurrent calls and
+ // it is the only place where |worker_count_| is incremented. Therefore, the
+ // second reading of |worker_count_| from WillRunTask() cannot be greater than
+ // the first reading. However, since DidProcessTask() can decrement
+ // |worker_count_| concurrently with WillRunTask(), the second reading can be
+ // lower than the first reading.
+ DCHECK_LE(worker_count_before_add, worker_count_initial);
+ DCHECK_LT(worker_count_before_add, max_concurrency);
+ return MakeRunIntent(max_concurrency == worker_count_before_add + 1
+ ? Saturated::kYes
+ : Saturated::kNo);
+}
+
+size_t JobTaskSource::GetRemainingConcurrency() const {
+ return GetMaxConcurrency() - worker_count_.load(std::memory_order_relaxed);
+}
+
+Optional<Task> JobTaskSource::TakeTask() {
+ DCHECK_GT(worker_count_.load(std::memory_order_relaxed), 0U);
+ DCHECK(worker_task_);
+ return base::make_optional<Task>(from_here_, worker_task_, TimeDelta());
+}
+
+bool JobTaskSource::DidProcessTask(RunResult run_result) {
+ size_t worker_count_before_sub =
+ worker_count_.fetch_sub(1, std::memory_order_relaxed);
+ DCHECK_GT(worker_count_before_sub, 0U);
+ // Re-enqueue the TaskSource if the task ran and the worker count is below the
+ // max concurrency.
+ const bool must_be_queued =
+ run_result == RunResult::kSkippedAtShutdown
+ ? false
+ : worker_count_before_sub <= GetMaxConcurrency();
+ return must_be_queued;
+}
+
+SequenceSortKey JobTaskSource::GetSortKey() const {
+ return SequenceSortKey(traits_.priority(), queue_time_);
+}
+
+void JobTaskSource::Clear() {
+ worker_task_.Reset();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/thread_pool/job_task_source.h b/chromium/base/task/thread_pool/job_task_source.h
new file mode 100644
index 00000000000..7e110a7ea98
--- /dev/null
+++ b/chromium/base/task/thread_pool/job_task_source.h
@@ -0,0 +1,69 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
+#define BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
+
+#include <stddef.h>
+
+#include <atomic>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/optional.h"
+#include "base/task/task_traits.h"
+#include "base/task/thread_pool/sequence_sort_key.h"
+#include "base/task/thread_pool/task.h"
+#include "base/task/thread_pool/task_source.h"
+
+namespace base {
+namespace internal {
+
+// A JobTaskSource generates many Tasks from a single RepeatingClosure.
+//
+// Derived classes control the intended concurrency with GetMaxConcurrency().
+// Increase in concurrency is not supported and should never happen.
+// TODO(etiennep): Support concurrency increase.
+class BASE_EXPORT JobTaskSource : public TaskSource {
+ public:
+ JobTaskSource(const Location& from_here,
+ base::RepeatingClosure task,
+ const TaskTraits& traits);
+
+ // TaskSource:
+ RunIntent WillRunTask() override;
+ ExecutionEnvironment GetExecutionEnvironment() override;
+ size_t GetRemainingConcurrency() const override;
+
+ protected:
+ ~JobTaskSource() override;
+
+ // Returns the maximum number of tasks from this TaskSource that can run
+ // concurrently. The implementation can only return values lower than or equal
+ // to previously returned values.
+ virtual size_t GetMaxConcurrency() const = 0;
+
+ private:
+ // TaskSource:
+ Optional<Task> TakeTask() override;
+ bool DidProcessTask(RunResult run_result) override;
+ SequenceSortKey GetSortKey() const override;
+ void Clear() override;
+
+ // The current number of workers concurrently running tasks from this
+ // TaskSource. "memory_order_relaxed" is sufficient to access this variable as
+ // no other state is synchronized with it.
+ std::atomic_size_t worker_count_{0U};
+
+ const Location from_here_;
+ base::RepeatingClosure worker_task_;
+ const TimeTicks queue_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(JobTaskSource);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
diff --git a/chromium/base/task/thread_pool/job_task_source_unittest.cc b/chromium/base/task/thread_pool/job_task_source_unittest.cc
new file mode 100644
index 00000000000..a68657cf26b
--- /dev/null
+++ b/chromium/base/task/thread_pool/job_task_source_unittest.cc
@@ -0,0 +1,146 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/job_task_source.h"
+
+#include <utility>
+
+#include "base/bind_helpers.h"
+#include "base/memory/ptr_util.h"
+#include "base/task/thread_pool/test_utils.h"
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+// Verifies the normal flow of running 2 tasks in series.
+TEST(ThreadPoolJobTaskSourceTest, RunTasks) {
+ scoped_refptr<test::MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BEST_EFFORT),
+ /* num_tasks_to_run */ 2, /* max_concurrency */ 1);
+
+ TaskSource::Transaction task_source_transaction(
+ task_source->BeginTransaction());
+
+ {
+ auto run_intent = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent);
+ EXPECT_TRUE(run_intent.IsSaturated());
+
+ // An attempt to run an additional task is not allowed until this task
+ // is processed.
+ EXPECT_FALSE(task_source->WillRunTask());
+
+ auto task = task_source_transaction.TakeTask(&run_intent);
+
+ EXPECT_FALSE(task_source->WillRunTask());
+
+ std::move(task->task).Run();
+ EXPECT_TRUE(task_source_transaction.DidProcessTask(std::move(run_intent)));
+ }
+ {
+ auto run_intent = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent);
+ EXPECT_TRUE(run_intent.IsSaturated());
+ auto task = task_source_transaction.TakeTask(&run_intent);
+ std::move(task->task).Run();
+ EXPECT_FALSE(task_source_transaction.DidProcessTask(std::move(run_intent)));
+ }
+}
+
+// Verifies that a job task source doesn't get reenqueued when a task is not
+// run.
+TEST(ThreadPoolJobTaskSourceTest, SkipTask) {
+ scoped_refptr<test::MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BEST_EFFORT),
+ /* num_tasks_to_run */ 2, /* max_concurrency */ 1);
+
+ TaskSource::Transaction task_source_transaction(
+ task_source->BeginTransaction());
+
+ auto run_intent = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent);
+ EXPECT_TRUE(run_intent.IsSaturated());
+ auto task = task_source_transaction.TakeTask(&run_intent);
+ EXPECT_FALSE(task_source_transaction.DidProcessTask(
+ std::move(run_intent), TaskSource::RunResult::kSkippedAtShutdown));
+}
+
+// Verifies that multiple tasks can run in parallel up to |max_concurrency|.
+TEST(ThreadPoolJobTaskSourceTest, RunTasksInParallel) {
+ scoped_refptr<test::MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BEST_EFFORT),
+ /* num_tasks_to_run */ 3, /* max_concurrency */ 2);
+
+ TaskSource::Transaction task_source_transaction(
+ task_source->BeginTransaction());
+
+ auto run_intent_a = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent_a);
+ EXPECT_FALSE(run_intent_a.IsSaturated());
+ auto task_a = task_source_transaction.TakeTask(&run_intent_a);
+
+ auto run_intent_b = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent_b);
+ EXPECT_TRUE(run_intent_b.IsSaturated());
+ auto task_b = task_source_transaction.TakeTask(&run_intent_b);
+
+ // WillRunTask() should return a null RunIntent once the max concurrency is
+ // reached.
+ EXPECT_FALSE(task_source->WillRunTask());
+
+ std::move(task_a->task).Run();
+ EXPECT_TRUE(task_source_transaction.DidProcessTask(std::move(run_intent_a)));
+
+ std::move(task_b->task).Run();
+ EXPECT_TRUE(task_source_transaction.DidProcessTask(std::move(run_intent_b)));
+
+ auto run_intent_c = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent_c);
+ EXPECT_TRUE(run_intent_c.IsSaturated());
+ auto task_c = task_source_transaction.TakeTask(&run_intent_c);
+
+ std::move(task_c->task).Run();
+ EXPECT_FALSE(task_source_transaction.DidProcessTask(std::move(run_intent_c)));
+}
+
+TEST(ThreadPoolJobTaskSourceTest, InvalidTakeTask) {
+ scoped_refptr<test::MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BEST_EFFORT),
+ /* num_tasks_to_run */ 1, /* max_concurrency */ 1);
+ TaskSource::Transaction task_source_transaction(
+ task_source->BeginTransaction());
+
+ auto run_intent_a = task_source->WillRunTask();
+ auto run_intent_b = task_source->WillRunTask();
+ EXPECT_FALSE(run_intent_b);
+ // Can not be called with an invalid RunIntent.
+ EXPECT_DCHECK_DEATH(
+ { auto task = task_source_transaction.TakeTask(&run_intent_b); });
+ run_intent_a.ReleaseForTesting();
+}
+
+TEST(ThreadPoolJobTaskSourceTest, InvalidDidProcessTask) {
+ scoped_refptr<test::MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, DoNothing(), TaskTraits(TaskPriority::BEST_EFFORT),
+ /* num_tasks_to_run */ 1, /* max_concurrency */ 1);
+ TaskSource::Transaction task_source_transaction(
+ task_source->BeginTransaction());
+
+ auto run_intent = task_source->WillRunTask();
+ EXPECT_TRUE(run_intent);
+ // Can not be called before TakeTask().
+ EXPECT_DCHECK_DEATH(
+ task_source_transaction.DidProcessTask(std::move(run_intent)));
+ run_intent.ReleaseForTesting();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
index 5aebe0bdf0f..5b680bd6b1b 100644
--- a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
+++ b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.cc
@@ -110,20 +110,23 @@ class WorkerThreadDelegate : public WorkerThread::Delegate {
PlatformThread::SetName(thread_name_);
}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
CheckedAutoLock auto_lock(lock_);
DCHECK(worker_awake_);
auto task_source = GetWorkLockRequired(worker);
if (!task_source) {
// The worker will sleep after this returns nullptr.
worker_awake_ = false;
+ return nullptr;
}
- return task_source;
+ auto run_intent = task_source->WillRunTask();
+ DCHECK(run_intent);
+ return {std::move(task_source), std::move(run_intent)};
}
- void DidRunTask(RegisteredTaskSource task_source) override {
+ void DidProcessTask(RegisteredTaskSource task_source) override {
if (task_source) {
- EnqueueTaskSource(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ EnqueueTaskSource(TransactionWithRegisteredTaskSource::FromTaskSource(
std::move(task_source)));
}
}
@@ -143,6 +146,7 @@ class WorkerThreadDelegate : public WorkerThread::Delegate {
auto registered_task_source = task_tracker_->WillQueueTaskSource(sequence);
if (!registered_task_source)
return false;
+ task_tracker_->WillPostTaskNow(task, transaction.traits().priority());
transaction.PushTask(std::move(task));
bool should_wakeup = EnqueueTaskSource(
{std::move(registered_task_source), std::move(transaction)});
@@ -198,9 +202,9 @@ class WorkerThreadDelegate : public WorkerThread::Delegate {
// Returns true iff the worker must wakeup, i.e. task source is allowed to run
// and the worker was not awake.
bool EnqueueTaskSource(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
CheckedAutoLock auto_lock(lock_);
- priority_queue_.Push(std::move(task_source_and_transaction));
+ priority_queue_.Push(std::move(transaction_with_task_source));
if (!worker_awake_ && CanRunNextTaskSource()) {
worker_awake_ = true;
return true;
@@ -249,7 +253,7 @@ class WorkerThreadCOMDelegate : public WorkerThreadDelegate {
scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
// This scheme below allows us to cover the following scenarios:
// * Only WorkerThreadDelegate::GetWork() has work:
// Always return the task source from GetWork().
@@ -304,8 +308,11 @@ class WorkerThreadCOMDelegate : public WorkerThreadDelegate {
if (!task_source) {
// The worker will sleep after this returns nullptr.
worker_awake_ = false;
+ return nullptr;
}
- return task_source;
+ auto run_intent = task_source->WillRunTask();
+ DCHECK(run_intent);
+ return {std::move(task_source), std::move(run_intent)};
}
void OnMainExit(WorkerThread* /* worker */) override {
@@ -534,20 +541,18 @@ void PooledSingleThreadTaskRunnerManager::DidUpdateCanRunPolicy() {
}
scoped_refptr<SingleThreadTaskRunner>
-PooledSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunnerWithTraits(
+PooledSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- return CreateTaskRunnerWithTraitsImpl<WorkerThreadDelegate>(traits,
- thread_mode);
+ return CreateTaskRunnerImpl<WorkerThreadDelegate>(traits, thread_mode);
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner>
-PooledSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
+PooledSingleThreadTaskRunnerManager::CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- return CreateTaskRunnerWithTraitsImpl<WorkerThreadCOMDelegate>(traits,
- thread_mode);
+ return CreateTaskRunnerImpl<WorkerThreadCOMDelegate>(traits, thread_mode);
}
#endif // defined(OS_WIN)
@@ -562,7 +567,7 @@ PooledSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
template <typename DelegateType>
scoped_refptr<PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunner>
-PooledSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
+PooledSingleThreadTaskRunnerManager::CreateTaskRunnerImpl(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
diff --git a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.h b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.h
index 17c8348d414..2b69a290098 100644
--- a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.h
+++ b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager.h
@@ -70,7 +70,7 @@ class BASE_EXPORT PooledSingleThreadTaskRunnerManager final {
// named "ThreadPoolSingleThread[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
- scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
@@ -79,7 +79,7 @@ class BASE_EXPORT PooledSingleThreadTaskRunnerManager final {
// STA thread named "ThreadPoolSingleThreadCOMSTA[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
- scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
#endif // defined(OS_WIN)
@@ -99,7 +99,7 @@ class BASE_EXPORT PooledSingleThreadTaskRunnerManager final {
const TaskTraits& traits);
template <typename DelegateType>
- scoped_refptr<PooledSingleThreadTaskRunner> CreateTaskRunnerWithTraitsImpl(
+ scoped_refptr<PooledSingleThreadTaskRunner> CreateTaskRunnerImpl(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
diff --git a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc
index c11dc45d8f7..1559b65435f 100644
--- a/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc
+++ b/chromium/base/task/thread_pool/pooled_single_thread_task_runner_manager_unittest.cc
@@ -69,7 +69,7 @@ class PooledSingleThreadTaskRunnerManagerTest : public testing::Test {
}
Thread service_thread_;
- TaskTracker task_tracker_ = {"Test"};
+ TaskTracker task_tracker_{"Test"};
DelayedTaskManager delayed_task_manager_;
std::unique_ptr<PooledSingleThreadTaskRunnerManager>
single_thread_task_runner_manager_;
@@ -91,15 +91,13 @@ void ShouldNotRun() {
TEST_F(PooledSingleThreadTaskRunnerManagerTest, DifferentThreadsUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
PlatformThreadRef thread_ref_1;
task_runner_1->PostTask(FROM_HERE,
@@ -117,15 +115,13 @@ TEST_F(PooledSingleThreadTaskRunnerManagerTest, DifferentThreadsUsed) {
TEST_F(PooledSingleThreadTaskRunnerManagerTest, SameThreadUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::SHARED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::SHARED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::SHARED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::SHARED);
PlatformThreadRef thread_ref_1;
task_runner_1->PostTask(FROM_HERE,
@@ -143,15 +139,13 @@ TEST_F(PooledSingleThreadTaskRunnerManagerTest, SameThreadUsed) {
TEST_F(PooledSingleThreadTaskRunnerManagerTest, RunsTasksInCurrentSequence) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
- single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_FALSE(task_runner_1->RunsTasksInCurrentSequence());
EXPECT_FALSE(task_runner_2->RunsTasksInCurrentSequence());
@@ -183,8 +177,9 @@ TEST_F(PooledSingleThreadTaskRunnerManagerTest,
SharedWithBaseSyncPrimitivesDCHECKs) {
testing::GTEST_FLAG(death_test_style) = "threadsafe";
EXPECT_DCHECK_DEATH({
- single_thread_task_runner_manager_->CreateSingleThreadTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, SingleThreadTaskRunnerThreadMode::SHARED);
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), WithBaseSyncPrimitives()},
+ SingleThreadTaskRunnerThreadMode::SHARED);
});
}
@@ -197,8 +192,8 @@ TEST_F(PooledSingleThreadTaskRunnerManagerTest,
// Post a CONTINUE_ON_SHUTDOWN task that waits on
// |task_can_continue| to a shared SingleThreadTaskRunner.
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ ->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE, base::BindOnce(
[](WaitableEvent* task_has_started,
@@ -215,8 +210,8 @@ TEST_F(PooledSingleThreadTaskRunnerManagerTest,
// Post a BLOCK_SHUTDOWN task to a shared SingleThreadTaskRunner.
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ ->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE, DoNothing());
@@ -239,10 +234,10 @@ class PooledSingleThreadTaskRunnerManagerCommonTest
public:
PooledSingleThreadTaskRunnerManagerCommonTest() = default;
- scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
- TaskTraits traits = TaskTraits()) {
- return single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(traits, GetParam());
+ scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(TaskTraits traits = {
+ ThreadPool()}) {
+ return single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ traits, GetParam());
}
private:
@@ -256,25 +251,29 @@ TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, PrioritySetCorrectly) {
TaskTraits traits;
ThreadPriority expected_thread_priority;
} test_cases[] = {
- {TaskTraits(TaskPriority::BEST_EFFORT),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT},
CanUseBackgroundPriorityForWorkerThread() ? ThreadPriority::BACKGROUND
: ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::PREFER_BACKGROUND},
CanUseBackgroundPriorityForWorkerThread() ? ThreadPriority::BACKGROUND
: ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::MUST_USE_FOREGROUND},
ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_VISIBLE), ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_VISIBLE, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE}, ThreadPriority::NORMAL},
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::PREFER_BACKGROUND},
ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_VISIBLE,
- ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::MUST_USE_FOREGROUND},
ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_BLOCKING), ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_BLOCKING, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING}, ThreadPriority::NORMAL},
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::PREFER_BACKGROUND},
ThreadPriority::NORMAL},
- {TaskTraits(TaskPriority::USER_BLOCKING,
- ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::MUST_USE_FOREGROUND},
ThreadPriority::NORMAL}};
// Why are events used here instead of the task tracker?
@@ -310,51 +309,56 @@ TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
std::string expected_thread_name;
} test_cases[] = {
// Non-MayBlock()
- {TaskTraits(TaskPriority::BEST_EFFORT),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT},
CanUseBackgroundPriorityForWorkerThread() ? background : foreground},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::PREFER_BACKGROUND},
CanUseBackgroundPriorityForWorkerThread() ? background : foreground},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::MUST_USE_FOREGROUND},
foreground},
- {TaskTraits(TaskPriority::USER_VISIBLE), foreground},
- {TaskTraits(TaskPriority::USER_VISIBLE, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE}, foreground},
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::PREFER_BACKGROUND},
foreground},
- {TaskTraits(TaskPriority::USER_VISIBLE,
- ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::MUST_USE_FOREGROUND},
foreground},
- {TaskTraits(TaskPriority::USER_BLOCKING), foreground},
- {TaskTraits(TaskPriority::USER_BLOCKING, ThreadPolicy::PREFER_BACKGROUND),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING}, foreground},
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::PREFER_BACKGROUND},
foreground},
- {TaskTraits(TaskPriority::USER_BLOCKING,
- ThreadPolicy::MUST_USE_FOREGROUND),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::MUST_USE_FOREGROUND},
foreground},
// MayBlock()
- {TaskTraits(TaskPriority::BEST_EFFORT, MayBlock()),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
CanUseBackgroundPriorityForWorkerThread() ? background_blocking
: foreground_blocking},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::PREFER_BACKGROUND,
- MayBlock()),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::PREFER_BACKGROUND, MayBlock()},
CanUseBackgroundPriorityForWorkerThread() ? background_blocking
: foreground_blocking},
- {TaskTraits(TaskPriority::BEST_EFFORT, ThreadPolicy::MUST_USE_FOREGROUND,
- MayBlock()),
+ {{ThreadPool(), TaskPriority::BEST_EFFORT,
+ ThreadPolicy::MUST_USE_FOREGROUND, MayBlock()},
+ foreground_blocking},
+ {{ThreadPool(), TaskPriority::USER_VISIBLE, MayBlock()},
foreground_blocking},
- {TaskTraits(TaskPriority::USER_VISIBLE, MayBlock()), foreground_blocking},
- {TaskTraits(TaskPriority::USER_VISIBLE, ThreadPolicy::PREFER_BACKGROUND,
- MayBlock()),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::PREFER_BACKGROUND, MayBlock()},
foreground_blocking},
- {TaskTraits(TaskPriority::USER_VISIBLE, ThreadPolicy::MUST_USE_FOREGROUND,
- MayBlock()),
+ {{ThreadPool(), TaskPriority::USER_VISIBLE,
+ ThreadPolicy::MUST_USE_FOREGROUND, MayBlock()},
foreground_blocking},
- {TaskTraits(TaskPriority::USER_BLOCKING, MayBlock()),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING, MayBlock()},
foreground_blocking},
- {TaskTraits(TaskPriority::USER_BLOCKING, ThreadPolicy::PREFER_BACKGROUND,
- MayBlock()),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::PREFER_BACKGROUND, MayBlock()},
foreground_blocking},
- {TaskTraits(TaskPriority::USER_BLOCKING,
- ThreadPolicy::MUST_USE_FOREGROUND, MayBlock()),
+ {{ThreadPool(), TaskPriority::USER_BLOCKING,
+ ThreadPolicy::MUST_USE_FOREGROUND, MayBlock()},
foreground_blocking}};
for (auto& test_case : test_cases) {
@@ -421,7 +425,9 @@ TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, PostTaskAfterDestroy) {
TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, CanRunPolicyBasic) {
test::TestCanRunPolicyBasic(
single_thread_task_runner_manager_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return CreateTaskRunner({ThreadPool(), priority});
+ },
&task_tracker_);
}
@@ -429,14 +435,18 @@ TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest,
CanRunPolicyUpdatedBeforeRun) {
test::TestCanRunPolicyChangedBeforeRun(
single_thread_task_runner_manager_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return CreateTaskRunner({ThreadPool(), priority});
+ },
&task_tracker_);
}
TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, CanRunPolicyLoad) {
test::TestCanRunPolicyLoad(
single_thread_task_runner_manager_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return CreateTaskRunner({ThreadPool(), priority});
+ },
&task_tracker_);
}
@@ -496,10 +506,10 @@ TEST_F(PooledSingleThreadTaskRunnerManagerJoinTest, ConcurrentJoin) {
WaitableEvent task_blocking;
{
- auto task_runner = single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ auto task_runner =
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), WithBaseSyncPrimitives()},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_TRUE(task_runner->PostTask(
FROM_HERE,
BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
@@ -524,10 +534,10 @@ TEST_F(PooledSingleThreadTaskRunnerManagerJoinTest,
WaitableEvent task_blocking;
{
- auto task_runner = single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()},
- SingleThreadTaskRunnerThreadMode::DEDICATED);
+ auto task_runner =
+ single_thread_task_runner_manager_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), WithBaseSyncPrimitives()},
+ SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_TRUE(task_runner->PostTask(
FROM_HERE,
BindOnce(&WaitableEvent::Signal, Unretained(&task_running))));
@@ -549,7 +559,7 @@ TEST_F(PooledSingleThreadTaskRunnerManagerJoinTest,
TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, COMSTAInitialized) {
scoped_refptr<SingleThreadTaskRunner> com_task_runner =
- single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunner(
{TaskShutdownBehavior::BLOCK_SHUTDOWN}, GetParam());
com_task_runner->PostTask(FROM_HERE, BindOnce(&win::AssertComApartmentType,
@@ -560,11 +570,11 @@ TEST_P(PooledSingleThreadTaskRunnerManagerCommonTest, COMSTAInitialized) {
TEST_F(PooledSingleThreadTaskRunnerManagerTest, COMSTASameThreadUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
- single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunner(
{TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
- single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunner(
{TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
@@ -629,7 +639,7 @@ class PooledSingleThreadTaskRunnerManagerTestWin
TEST_F(PooledSingleThreadTaskRunnerManagerTestWin, PumpsMessages) {
scoped_refptr<SingleThreadTaskRunner> com_task_runner =
- single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
+ single_thread_task_runner_manager_->CreateCOMSTATaskRunner(
{TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
HWND hwnd = nullptr;
@@ -679,8 +689,8 @@ TEST_F(PooledSingleThreadTaskRunnerManagerStartTest, PostTaskBeforeStart) {
AtomicFlag manager_started;
WaitableEvent task_finished;
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
+ ->CreateSingleThreadTaskRunner(
+ {ThreadPool()}, SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(
FROM_HERE,
BindOnce(
diff --git a/chromium/base/task/thread_pool/pooled_task_runner_delegate.h b/chromium/base/task/thread_pool/pooled_task_runner_delegate.h
index 8feec1ff549..46dbe39d4ab 100644
--- a/chromium/base/task/thread_pool/pooled_task_runner_delegate.h
+++ b/chromium/base/task/thread_pool/pooled_task_runner_delegate.h
@@ -7,6 +7,7 @@
#include "base/base_export.h"
#include "base/task/task_traits.h"
+#include "base/task/thread_pool/job_task_source.h"
#include "base/task/thread_pool/sequence.h"
#include "base/task/thread_pool/task.h"
#include "base/task/thread_pool/task_source.h"
@@ -33,6 +34,12 @@ class BASE_EXPORT PooledTaskRunnerDelegate {
virtual bool PostTaskWithSequence(Task task,
scoped_refptr<Sequence> sequence) = 0;
+ // Invoked when a task is posted as a Job. The implementation must add
+ // |task_source| to the appropriate priority queue, depending on |task_source|
+ // traits. Returns true if task source was successfully enqueued.
+ virtual bool EnqueueJobTaskSource(
+ scoped_refptr<JobTaskSource> task_source) = 0;
+
// Invoked when RunsTasksInCurrentSequence() is called on a
// PooledParallelTaskRunner. Returns true if the current thread is part of the
// ThreadGroup associated with |traits|.
diff --git a/chromium/base/task/thread_pool/priority_queue.cc b/chromium/base/task/thread_pool/priority_queue.cc
index 3e92d7a0d44..1670dca7248 100644
--- a/chromium/base/task/thread_pool/priority_queue.cc
+++ b/chromium/base/task/thread_pool/priority_queue.cc
@@ -65,6 +65,7 @@ class PriorityQueue::TaskSourceAndSortKey {
}
const TaskSource* task_source() const { return task_source_.get(); }
+ TaskSource* task_source() { return task_source_.get(); }
const SequenceSortKey& sort_key() const { return sort_key_; }
@@ -88,10 +89,10 @@ PriorityQueue::~PriorityQueue() {
PriorityQueue& PriorityQueue::operator=(PriorityQueue&& other) = default;
void PriorityQueue::Push(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
- auto sequence_sort_key = task_source_and_transaction.transaction.GetSortKey();
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
+ auto sequence_sort_key = transaction_with_task_source.GetSortKey();
container_.insert(TaskSourceAndSortKey(
- std::move(task_source_and_transaction.task_source), sequence_sort_key));
+ transaction_with_task_source.take_task_source(), sequence_sort_key));
IncrementNumTaskSourcesForPriority(sequence_sort_key.priority());
}
@@ -100,6 +101,16 @@ const SequenceSortKey& PriorityQueue::PeekSortKey() const {
return container_.Min().sort_key();
}
+TaskSource* PriorityQueue::PeekTaskSource() const {
+ DCHECK(!IsEmpty());
+
+ // The const_cast on Min() is okay since modifying the TaskSource cannot alter
+ // the sort order of TaskSourceAndSortKey.
+ auto& task_source_and_sort_key =
+ const_cast<PriorityQueue::TaskSourceAndSortKey&>(container_.Min());
+ return task_source_and_sort_key.task_source();
+}
+
RegisteredTaskSource PriorityQueue::PopTaskSource() {
DCHECK(!IsEmpty());
@@ -107,7 +118,7 @@ RegisteredTaskSource PriorityQueue::PopTaskSource() {
// transactionally being popped from |container_| right after and taking its
// TaskSource does not alter its sort order.
auto& task_source_and_sort_key =
- const_cast<PriorityQueue::TaskSourceAndSortKey&>(container_.Min());
+ const_cast<TaskSourceAndSortKey&>(container_.Min());
DecrementNumTaskSourcesForPriority(
task_source_and_sort_key.sort_key().priority());
RegisteredTaskSource task_source =
@@ -141,19 +152,19 @@ RegisteredTaskSource PriorityQueue::RemoveTaskSource(
}
void PriorityQueue::UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) {
- DCHECK(task_source_and_transaction.task_source);
+ TransactionWithOwnedTaskSource transaction_with_task_source) {
+ DCHECK(transaction_with_task_source);
if (IsEmpty())
return;
const HeapHandle heap_handle =
- task_source_and_transaction.task_source->heap_handle();
+ transaction_with_task_source.task_source()->heap_handle();
if (!heap_handle.IsValid())
return;
auto old_sort_key = container_.at(heap_handle).sort_key();
- auto new_sort_key = task_source_and_transaction.transaction.GetSortKey();
+ auto new_sort_key = transaction_with_task_source.GetSortKey();
auto registered_task_source =
const_cast<PriorityQueue::TaskSourceAndSortKey&>(
container_.at(heap_handle))
diff --git a/chromium/base/task/thread_pool/priority_queue.h b/chromium/base/task/thread_pool/priority_queue.h
index 395f49bdc2d..943572f3b90 100644
--- a/chromium/base/task/thread_pool/priority_queue.h
+++ b/chromium/base/task/thread_pool/priority_queue.h
@@ -28,7 +28,7 @@ class BASE_EXPORT PriorityQueue {
PriorityQueue& operator=(PriorityQueue&& other);
// Inserts |task_source| in the PriorityQueue with |sequence_sort_key|.
- void Push(RegisteredTaskSourceAndTransaction task_source_and_transaction);
+ void Push(TransactionWithRegisteredTaskSource transaction_with_task_source);
// Returns a reference to the SequenceSortKey representing the priority of
// the highest pending task in this PriorityQueue. The reference becomes
@@ -36,6 +36,11 @@ class BASE_EXPORT PriorityQueue {
// Cannot be called on an empty PriorityQueue.
const SequenceSortKey& PeekSortKey() const;
+ // Returns a reference to the highest priority TaskSource in this
+ // PriorityQueue. Cannot be called on an empty PriorityQueue. The returned
+ // task source may be modified as long as its sort key isn't affected.
+ TaskSource* PeekTaskSource() const;
+
// Removes and returns the highest priority TaskSource in this PriorityQueue.
// Cannot be called on an empty PriorityQueue.
RegisteredTaskSource PopTaskSource();
@@ -46,10 +51,11 @@ class BASE_EXPORT PriorityQueue {
// empty.
RegisteredTaskSource RemoveTaskSource(scoped_refptr<TaskSource> task_source);
- // Updates the sort key of the TaskSource in |task_source_and_transaction| to
+ // Updates the sort key of the TaskSource in |transaction_with_task_source| to
// match its current traits. No-ops if the TaskSource is not in the
// PriorityQueue or the PriorityQueue is empty.
- void UpdateSortKey(TaskSourceAndTransaction task_source_and_transaction);
+ void UpdateSortKey(
+ TransactionWithOwnedTaskSource transaction_with_task_source);
// Returns true if the PriorityQueue is empty.
bool IsEmpty() const;
diff --git a/chromium/base/task/thread_pool/priority_queue_unittest.cc b/chromium/base/task/thread_pool/priority_queue_unittest.cc
index 569fcb94c38..e0a2d64fce9 100644
--- a/chromium/base/task/thread_pool/priority_queue_unittest.cc
+++ b/chromium/base/task/thread_pool/priority_queue_unittest.cc
@@ -45,20 +45,20 @@ class PriorityQueueWithSequencesTest : public testing::Test {
num_user_blocking);
}
- scoped_refptr<TaskSource> sequence_a =
- MakeSequenceWithTraitsAndTask(TaskTraits(TaskPriority::USER_VISIBLE));
+ scoped_refptr<TaskSource> sequence_a = MakeSequenceWithTraitsAndTask(
+ TaskTraits(ThreadPool(), TaskPriority::USER_VISIBLE));
SequenceSortKey sort_key_a = sequence_a->BeginTransaction().GetSortKey();
- scoped_refptr<TaskSource> sequence_b =
- MakeSequenceWithTraitsAndTask(TaskTraits(TaskPriority::USER_BLOCKING));
+ scoped_refptr<TaskSource> sequence_b = MakeSequenceWithTraitsAndTask(
+ TaskTraits(ThreadPool(), TaskPriority::USER_BLOCKING));
SequenceSortKey sort_key_b = sequence_b->BeginTransaction().GetSortKey();
- scoped_refptr<TaskSource> sequence_c =
- MakeSequenceWithTraitsAndTask(TaskTraits(TaskPriority::USER_BLOCKING));
+ scoped_refptr<TaskSource> sequence_c = MakeSequenceWithTraitsAndTask(
+ TaskTraits(ThreadPool(), TaskPriority::USER_BLOCKING));
SequenceSortKey sort_key_c = sequence_c->BeginTransaction().GetSortKey();
- scoped_refptr<TaskSource> sequence_d =
- MakeSequenceWithTraitsAndTask(TaskTraits(TaskPriority::BEST_EFFORT));
+ scoped_refptr<TaskSource> sequence_d = MakeSequenceWithTraitsAndTask(
+ TaskTraits(ThreadPool(), TaskPriority::BEST_EFFORT));
SequenceSortKey sort_key_d = sequence_d->BeginTransaction().GetSortKey();
PriorityQueue pq;
@@ -72,28 +72,28 @@ TEST_F(PriorityQueueWithSequencesTest, PushPopPeek) {
// Push |sequence_a| in the PriorityQueue. It becomes the sequence with the
// highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_a)));
EXPECT_EQ(sort_key_a, pq.PeekSortKey());
ExpectNumSequences(0U, 1U, 0U);
// Push |sequence_b| in the PriorityQueue. It becomes the sequence with the
// highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_b)));
EXPECT_EQ(sort_key_b, pq.PeekSortKey());
ExpectNumSequences(0U, 1U, 1U);
// Push |sequence_c| in the PriorityQueue. |sequence_b| is still the sequence
// with the highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_c)));
EXPECT_EQ(sort_key_b, pq.PeekSortKey());
ExpectNumSequences(0U, 1U, 2U);
// Push |sequence_d| in the PriorityQueue. |sequence_b| is still the sequence
// with the highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_d)));
EXPECT_EQ(sort_key_b, pq.PeekSortKey());
ExpectNumSequences(1U, 1U, 2U);
@@ -127,13 +127,13 @@ TEST_F(PriorityQueueWithSequencesTest, RemoveSequence) {
// Push all test Sequences into the PriorityQueue. |sequence_b|
// will be the sequence with the highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_a)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_b)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_c)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_d)));
EXPECT_EQ(sort_key_b, pq.PeekSortKey());
ExpectNumSequences(1U, 1U, 2U);
@@ -176,13 +176,13 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
// Push all test Sequences into the PriorityQueue. |sequence_b| becomes the
// sequence with the highest priority.
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_a)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_b)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_c)));
- pq.Push(RegisteredTaskSourceAndTransaction::FromTaskSource(
+ pq.Push(TransactionWithRegisteredTaskSource::FromTaskSource(
RegisteredTaskSource::CreateForTesting(sequence_d)));
EXPECT_EQ(sort_key_b, pq.PeekSortKey());
ExpectNumSequences(1U, 1U, 2U);
@@ -191,9 +191,8 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
// Downgrade |sequence_b| from USER_BLOCKING to BEST_EFFORT. |sequence_c|
// (USER_BLOCKING priority) becomes the sequence with the highest priority.
auto sequence_b_and_transaction =
- TaskSourceAndTransaction::FromTaskSource(sequence_b);
- sequence_b_and_transaction.transaction.UpdatePriority(
- TaskPriority::BEST_EFFORT);
+ TransactionWithOwnedTaskSource::FromTaskSource(sequence_b);
+ sequence_b_and_transaction.UpdatePriority(TaskPriority::BEST_EFFORT);
pq.UpdateSortKey(std::move(sequence_b_and_transaction));
EXPECT_EQ(sort_key_c, pq.PeekSortKey());
@@ -205,9 +204,8 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
// |sequence_c| (USER_BLOCKING priority) is still the sequence with the
// highest priority.
auto sequence_c_and_transaction =
- TaskSourceAndTransaction::FromTaskSource(sequence_c);
- sequence_c_and_transaction.transaction.UpdatePriority(
- TaskPriority::USER_BLOCKING);
+ TransactionWithOwnedTaskSource::FromTaskSource(sequence_c);
+ sequence_c_and_transaction.UpdatePriority(TaskPriority::USER_BLOCKING);
pq.UpdateSortKey(std::move(sequence_c_and_transaction));
ExpectNumSequences(2U, 1U, 1U);
@@ -224,9 +222,8 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
// Upgrade |sequence_d| from BEST_EFFORT to USER_BLOCKING. |sequence_d|
// becomes the sequence with the highest priority.
auto sequence_d_and_transaction =
- TaskSourceAndTransaction::FromTaskSource(sequence_d);
- sequence_d_and_transaction.transaction.UpdatePriority(
- TaskPriority::USER_BLOCKING);
+ TransactionWithOwnedTaskSource::FromTaskSource(sequence_d);
+ sequence_d_and_transaction.UpdatePriority(TaskPriority::USER_BLOCKING);
pq.UpdateSortKey(std::move(sequence_d_and_transaction));
ExpectNumSequences(1U, 1U, 1U);
@@ -241,7 +238,8 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
}
{
- pq.UpdateSortKey(TaskSourceAndTransaction::FromTaskSource(sequence_d));
+ pq.UpdateSortKey(
+ TransactionWithOwnedTaskSource::FromTaskSource(sequence_d));
ExpectNumSequences(1U, 1U, 0U);
EXPECT_EQ(sequence_a, pq.PopTaskSource().Unregister());
ExpectNumSequences(1U, 0U, 0U);
@@ -251,7 +249,8 @@ TEST_F(PriorityQueueWithSequencesTest, UpdateSortKey) {
{
// No-op if UpdateSortKey() is called on an empty PriorityQueue.
- pq.UpdateSortKey(TaskSourceAndTransaction::FromTaskSource(sequence_b));
+ pq.UpdateSortKey(
+ TransactionWithOwnedTaskSource::FromTaskSource(sequence_b));
EXPECT_TRUE(pq.IsEmpty());
ExpectNumSequences(0U, 0U, 0U);
}
diff --git a/chromium/base/task/thread_pool/sequence.cc b/chromium/base/task/thread_pool/sequence.cc
index 324bf500c31..9e704519aaa 100644
--- a/chromium/base/task/thread_pool/sequence.cc
+++ b/chromium/base/task/thread_pool/sequence.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/task/task_features.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
#include "base/time/time.h"
namespace base {
@@ -40,7 +41,7 @@ void Sequence::Transaction::PushTask(Task task) {
DCHECK(task.queue_time.is_null());
bool should_be_queued = WillPushTask();
- task.queue_time = base::TimeTicks::Now();
+ task.queue_time = ThreadPoolClock::Now();
task.task = sequence()->traits_.shutdown_behavior() ==
TaskShutdownBehavior::BLOCK_SHUTDOWN
@@ -50,29 +51,50 @@ void Sequence::Transaction::PushTask(Task task) {
sequence()->queue_.push(std::move(task));
// AddRef() matched by manual Release() when the sequence has no more tasks
- // to run (in DidRunTask() or Clear()).
+ // to run (in DidProcessTask() or Clear()).
if (should_be_queued && sequence()->task_runner())
sequence()->task_runner()->AddRef();
}
-Optional<Task> Sequence::TakeTask() {
+TaskSource::RunIntent Sequence::WillRunTask() {
+ // There should never be a second call to WillRunTask() before DidProcessTask
+ // since the RunIntent is always marked a saturated.
DCHECK(!has_worker_);
+
+ // It's ok to access |has_worker_| outside of a Transaction since
+ // WillRunTask() is externally synchronized, always called in sequence with
+ // TakeTask() and DidProcessTask() and only called if |!queue_.empty()|, which
+ // means it won't race with WillPushTask()/PushTask().
+ has_worker_ = true;
+ return MakeRunIntent(Saturated::kYes);
+}
+
+size_t Sequence::GetRemainingConcurrency() const {
+ return 1;
+}
+
+Optional<Task> Sequence::TakeTask() {
+ DCHECK(has_worker_);
DCHECK(!queue_.empty());
DCHECK(queue_.front().task);
- has_worker_ = true;
auto next_task = std::move(queue_.front());
queue_.pop();
return std::move(next_task);
}
-bool Sequence::DidRunTask() {
+bool Sequence::DidProcessTask(RunResult run_result) {
+ // There should never be a call to DidProcessTask without an associated
+ // WillRunTask().
DCHECK(has_worker_);
has_worker_ = false;
if (queue_.empty()) {
ReleaseTaskRunner();
return false;
}
+ // Let the caller re-enqueue this non-empty Sequence regardless of
+ // |run_result| so it can continue churning through this Sequence's tasks and
+ // skip/delete them in the proper scope.
return true;
}
diff --git a/chromium/base/task/thread_pool/sequence.h b/chromium/base/task/thread_pool/sequence.h
index 3a7444defdd..e1c3b998458 100644
--- a/chromium/base/task/thread_pool/sequence.h
+++ b/chromium/base/task/thread_pool/sequence.h
@@ -83,7 +83,10 @@ class BASE_EXPORT Sequence : public TaskSource {
// active Sequence::Transaction.
Transaction BeginTransaction() WARN_UNUSED_RESULT;
+ // TaskSource:
ExecutionEnvironment GetExecutionEnvironment() override;
+ RunIntent WillRunTask() override;
+ size_t GetRemainingConcurrency() const override;
// Returns a token that uniquely identifies this Sequence.
const SequenceToken& token() const { return token_; }
@@ -97,7 +100,7 @@ class BASE_EXPORT Sequence : public TaskSource {
// TaskSource:
Optional<Task> TakeTask() override WARN_UNUSED_RESULT;
- bool DidRunTask() override;
+ bool DidProcessTask(RunResult run_result) override;
SequenceSortKey GetSortKey() const override;
void Clear() override;
@@ -110,7 +113,7 @@ class BASE_EXPORT Sequence : public TaskSource {
// Queue of tasks to execute.
base::queue<Task> queue_;
- // True if a worker is currently running a Task from this Sequence.
+ // True if a worker is currently associated with a Task from this Sequence.
bool has_worker_ = false;
// Holds data stored through the SequenceLocalStorageSlot API.
diff --git a/chromium/base/task/thread_pool/sequence_unittest.cc b/chromium/base/task/thread_pool/sequence_unittest.cc
index 6de963fb5ad..7adc344a2b4 100644
--- a/chromium/base/task/thread_pool/sequence_unittest.cc
+++ b/chromium/base/task/thread_pool/sequence_unittest.cc
@@ -44,9 +44,9 @@ TEST(ThreadPoolSequenceTest, PushTakeRemove) {
testing::StrictMock<MockTask> mock_task_d;
testing::StrictMock<MockTask> mock_task_e;
- scoped_refptr<Sequence> sequence =
- MakeRefCounted<Sequence>(TaskTraits(TaskPriority::BEST_EFFORT), nullptr,
- TaskSourceExecutionMode::kParallel);
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
+ TaskTraits(ThreadPool(), TaskPriority::BEST_EFFORT), nullptr,
+ TaskSourceExecutionMode::kParallel);
Sequence::Transaction sequence_transaction(sequence->BeginTransaction());
// Push task A in the sequence. PushTask() should return true since it's the
@@ -64,45 +64,52 @@ TEST(ThreadPoolSequenceTest, PushTakeRemove) {
sequence_transaction.PushTask(CreateTask(&mock_task_d));
// Take the task in front of the sequence. It should be task A.
- Optional<Task> task = sequence_transaction.TakeTask();
+ auto run_intent = sequence->WillRunTask();
+ Optional<Task> task = sequence_transaction.TakeTask(&run_intent);
ExpectMockTask(&mock_task_a, &task.value());
EXPECT_FALSE(task->queue_time.is_null());
// Remove the empty slot. Task B should now be in front.
- EXPECT_TRUE(sequence_transaction.DidRunTask());
+ EXPECT_TRUE(sequence_transaction.DidProcessTask(std::move(run_intent)));
+
EXPECT_FALSE(sequence_transaction.WillPushTask());
- task = sequence_transaction.TakeTask();
+ run_intent = sequence->WillRunTask();
+ task = sequence_transaction.TakeTask(&run_intent);
ExpectMockTask(&mock_task_b, &task.value());
EXPECT_FALSE(task->queue_time.is_null());
// Remove the empty slot. Task C should now be in front.
- EXPECT_TRUE(sequence_transaction.DidRunTask());
+ EXPECT_TRUE(sequence_transaction.DidProcessTask(std::move(run_intent)));
+
EXPECT_FALSE(sequence_transaction.WillPushTask());
- task = sequence_transaction.TakeTask();
+ run_intent = sequence->WillRunTask();
+ task = sequence_transaction.TakeTask(&run_intent);
ExpectMockTask(&mock_task_c, &task.value());
EXPECT_FALSE(task->queue_time.is_null());
// Remove the empty slot.
- EXPECT_TRUE(sequence_transaction.DidRunTask());
+ EXPECT_TRUE(sequence_transaction.DidProcessTask(std::move(run_intent)));
// Push task E in the sequence.
EXPECT_FALSE(sequence_transaction.WillPushTask());
sequence_transaction.PushTask(CreateTask(&mock_task_e));
// Task D should be in front.
- task = sequence_transaction.TakeTask();
+ run_intent = sequence->WillRunTask();
+ task = sequence_transaction.TakeTask(&run_intent);
ExpectMockTask(&mock_task_d, &task.value());
EXPECT_FALSE(task->queue_time.is_null());
// Remove the empty slot. Task E should now be in front.
- EXPECT_TRUE(sequence_transaction.DidRunTask());
+ EXPECT_TRUE(sequence_transaction.DidProcessTask(std::move(run_intent)));
EXPECT_FALSE(sequence_transaction.WillPushTask());
- task = sequence_transaction.TakeTask();
+ run_intent = sequence->WillRunTask();
+ task = sequence_transaction.TakeTask(&run_intent);
ExpectMockTask(&mock_task_e, &task.value());
EXPECT_FALSE(task->queue_time.is_null());
// Remove the empty slot. The sequence should now be empty.
- EXPECT_FALSE(sequence_transaction.DidRunTask());
+ EXPECT_FALSE(sequence_transaction.DidProcessTask(std::move(run_intent)));
EXPECT_TRUE(sequence_transaction.WillPushTask());
}
@@ -110,9 +117,9 @@ TEST(ThreadPoolSequenceTest, PushTakeRemove) {
TEST(ThreadPoolSequenceTest, GetSortKeyBestEffort) {
// Create a BEST_EFFORT sequence with a task.
Task best_effort_task(FROM_HERE, DoNothing(), TimeDelta());
- scoped_refptr<Sequence> best_effort_sequence =
- MakeRefCounted<Sequence>(TaskTraits(TaskPriority::BEST_EFFORT), nullptr,
- TaskSourceExecutionMode::kParallel);
+ scoped_refptr<Sequence> best_effort_sequence = MakeRefCounted<Sequence>(
+ TaskTraits(ThreadPool(), TaskPriority::BEST_EFFORT), nullptr,
+ TaskSourceExecutionMode::kParallel);
Sequence::Transaction best_effort_sequence_transaction(
best_effort_sequence->BeginTransaction());
best_effort_sequence_transaction.PushTask(std::move(best_effort_task));
@@ -123,15 +130,17 @@ TEST(ThreadPoolSequenceTest, GetSortKeyBestEffort) {
// Take the task from the sequence, so that its sequenced time is available
// for the check below.
- auto take_best_effort_task = best_effort_sequence_transaction.TakeTask();
+ auto run_intent = best_effort_sequence->WillRunTask();
+ auto take_best_effort_task =
+ best_effort_sequence_transaction.TakeTask(&run_intent);
// Verify the sort key.
EXPECT_EQ(TaskPriority::BEST_EFFORT, best_effort_sort_key.priority());
EXPECT_EQ(take_best_effort_task->queue_time,
best_effort_sort_key.next_task_sequenced_time());
- // DidRunTask for correctness.
- best_effort_sequence_transaction.DidRunTask();
+ // DidProcessTask for correctness.
+ best_effort_sequence_transaction.DidProcessTask(std::move(run_intent));
}
// Same as ThreadPoolSequenceTest.GetSortKeyBestEffort, but with a
@@ -139,9 +148,9 @@ TEST(ThreadPoolSequenceTest, GetSortKeyBestEffort) {
TEST(ThreadPoolSequenceTest, GetSortKeyForeground) {
// Create a USER_VISIBLE sequence with a task.
Task foreground_task(FROM_HERE, DoNothing(), TimeDelta());
- scoped_refptr<Sequence> foreground_sequence =
- MakeRefCounted<Sequence>(TaskTraits(TaskPriority::USER_VISIBLE), nullptr,
- TaskSourceExecutionMode::kParallel);
+ scoped_refptr<Sequence> foreground_sequence = MakeRefCounted<Sequence>(
+ TaskTraits(ThreadPool(), TaskPriority::USER_VISIBLE), nullptr,
+ TaskSourceExecutionMode::kParallel);
Sequence::Transaction foreground_sequence_transaction(
foreground_sequence->BeginTransaction());
foreground_sequence_transaction.PushTask(std::move(foreground_task));
@@ -152,46 +161,61 @@ TEST(ThreadPoolSequenceTest, GetSortKeyForeground) {
// Take the task from the sequence, so that its sequenced time is available
// for the check below.
- auto take_foreground_task = foreground_sequence_transaction.TakeTask();
+ auto run_intent = foreground_sequence->WillRunTask();
+ auto take_foreground_task =
+ foreground_sequence_transaction.TakeTask(&run_intent);
// Verify the sort key.
EXPECT_EQ(TaskPriority::USER_VISIBLE, foreground_sort_key.priority());
EXPECT_EQ(take_foreground_task->queue_time,
foreground_sort_key.next_task_sequenced_time());
- // DidRunTask for correctness.
- foreground_sequence_transaction.DidRunTask();
+ // DidProcessTask for correctness.
+ foreground_sequence_transaction.DidProcessTask(std::move(run_intent));
}
-// Verify that a DCHECK fires if DidRunTask() is called on a sequence which
+// Verify that a DCHECK fires if DidProcessTask() is called on a sequence which
// didn't return a Task.
-TEST(ThreadPoolSequenceTest, DidRunTaskWithoutTakeTask) {
+TEST(ThreadPoolSequenceTest, DidProcessTaskWithoutTakeTask) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(), nullptr, TaskSourceExecutionMode::kParallel);
+ TaskTraits(ThreadPool()), nullptr, TaskSourceExecutionMode::kParallel);
Sequence::Transaction sequence_transaction(sequence->BeginTransaction());
sequence_transaction.PushTask(Task(FROM_HERE, DoNothing(), TimeDelta()));
- EXPECT_DCHECK_DEATH({ sequence_transaction.DidRunTask(); });
+ EXPECT_DCHECK_DEATH({
+ auto run_intent = sequence->WillRunTask();
+ sequence_transaction.DidProcessTask(std::move(run_intent));
+ });
}
// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
// slot is empty.
TEST(ThreadPoolSequenceTest, TakeEmptyFrontSlot) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(), nullptr, TaskSourceExecutionMode::kParallel);
+ TaskTraits(ThreadPool()), nullptr, TaskSourceExecutionMode::kParallel);
Sequence::Transaction sequence_transaction(sequence->BeginTransaction());
sequence_transaction.PushTask(Task(FROM_HERE, DoNothing(), TimeDelta()));
- EXPECT_TRUE(sequence_transaction.TakeTask());
- EXPECT_DCHECK_DEATH({ sequence_transaction.TakeTask(); });
+ {
+ auto run_intent = sequence->WillRunTask();
+ EXPECT_TRUE(sequence_transaction.TakeTask(&run_intent));
+ run_intent.ReleaseForTesting();
+ }
+ EXPECT_DCHECK_DEATH({
+ auto run_intent = sequence->WillRunTask();
+ auto task = sequence_transaction.TakeTask(&run_intent);
+ });
}
// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
TEST(ThreadPoolSequenceTest, TakeEmptySequence) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(), nullptr, TaskSourceExecutionMode::kParallel);
+ TaskTraits(ThreadPool()), nullptr, TaskSourceExecutionMode::kParallel);
Sequence::Transaction sequence_transaction(sequence->BeginTransaction());
- EXPECT_DCHECK_DEATH({ sequence_transaction.TakeTask(); });
+ auto run_intent = sequence->WillRunTask();
+ EXPECT_DCHECK_DEATH(
+ { auto task = sequence_transaction.TakeTask(&run_intent); });
+ run_intent.ReleaseForTesting();
}
} // namespace internal
diff --git a/chromium/base/task/thread_pool/service_thread.cc b/chromium/base/task/thread_pool/service_thread.cc
index 01800c5b1a8..849f2ef3c10 100644
--- a/chromium/base/task/thread_pool/service_thread.cc
+++ b/chromium/base/task/thread_pool/service_thread.cc
@@ -13,6 +13,7 @@
#include "base/task/task_traits.h"
#include "base/task/thread_pool/task_tracker.h"
#include "base/task/thread_pool/thread_pool.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
namespace base {
namespace internal {
@@ -72,9 +73,12 @@ void ServiceThread::PerformHeartbeatLatencyReport() const {
return;
static constexpr TaskTraits kReportedTraits[] = {
- {TaskPriority::BEST_EFFORT}, {TaskPriority::BEST_EFFORT, MayBlock()},
- {TaskPriority::USER_VISIBLE}, {TaskPriority::USER_VISIBLE, MayBlock()},
- {TaskPriority::USER_BLOCKING}, {TaskPriority::USER_BLOCKING, MayBlock()}};
+ {ThreadPool(), TaskPriority::BEST_EFFORT},
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
+ {ThreadPool(), TaskPriority::USER_VISIBLE},
+ {ThreadPool(), TaskPriority::USER_VISIBLE, MayBlock()},
+ {ThreadPool(), TaskPriority::USER_BLOCKING},
+ {ThreadPool(), TaskPriority::USER_BLOCKING, MayBlock()}};
// Only record latency for one set of TaskTraits per report to avoid bias in
// the order in which tasks are posted (should we record all at once) as well
@@ -88,7 +92,7 @@ void ServiceThread::PerformHeartbeatLatencyReport() const {
kReportedTraits[RandInt(0, base::size(kReportedTraits) - 1)];
// Post through the static API to time the full stack. Use a new Now() for
- // every set of traits in case PostTaskWithTraits() itself is slow.
+ // every set of traits in case PostTask() itself is slow.
// Bonus: this approach also includes the overhead of BindOnce() in the
// reported latency.
// TODO(jessemckenna): pass |profiled_traits| directly to
@@ -96,12 +100,12 @@ void ServiceThread::PerformHeartbeatLatencyReport() const {
// error on NaCl is fixed
TaskPriority task_priority = profiled_traits.priority();
bool may_block = profiled_traits.may_block();
- base::PostTaskWithTraits(
+ base::PostTask(
FROM_HERE, profiled_traits,
BindOnce(
&TaskTracker::RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms,
- Unretained(task_tracker_), task_priority, may_block, TimeTicks::Now(),
- task_tracker_->GetNumTasksRun()));
+ Unretained(task_tracker_), task_priority, may_block,
+ ThreadPoolClock::Now(), task_tracker_->GetNumTasksRun()));
}
} // namespace internal
diff --git a/chromium/base/task/thread_pool/task.cc b/chromium/base/task/thread_pool/task.cc
index 91eea84063f..a3691a20cef 100644
--- a/chromium/base/task/thread_pool/task.cc
+++ b/chromium/base/task/thread_pool/task.cc
@@ -7,6 +7,7 @@
#include <utility>
#include "base/atomic_sequence_num.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
namespace base {
namespace internal {
@@ -20,10 +21,11 @@ AtomicSequenceNumber g_sequence_nums_for_tracing;
Task::Task() = default;
Task::Task(const Location& posted_from, OnceClosure task, TimeDelta delay)
- : PendingTask(posted_from,
- std::move(task),
- delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
- Nestable::kNonNestable) {
+ : PendingTask(
+ posted_from,
+ std::move(task),
+ delay.is_zero() ? TimeTicks() : ThreadPoolClock::Now() + delay,
+ Nestable::kNonNestable) {
// ThreadPoolImpl doesn't use |sequence_num| but tracing (toplevel.flow)
// relies on it being unique. While this subtle dependency is a bit
// overreaching, ThreadPoolImpl is the only task system that doesn't use
diff --git a/chromium/base/task/thread_pool/task_source.cc b/chromium/base/task/thread_pool/task_source.cc
index e31b6249f19..6d39aebe7cb 100644
--- a/chromium/base/task/thread_pool/task_source.cc
+++ b/chromium/base/task/thread_pool/task_source.cc
@@ -15,6 +15,30 @@
namespace base {
namespace internal {
+TaskSource::RunIntent::RunIntent(RunIntent&& other) noexcept
+ : task_source_(other.task_source_),
+ run_step_(other.run_step_),
+ is_saturated_(other.is_saturated_) {
+ other.task_source_ = nullptr;
+}
+
+TaskSource::RunIntent::~RunIntent() {
+ DCHECK_EQ(task_source_, nullptr);
+}
+
+TaskSource::RunIntent& TaskSource::RunIntent::operator=(RunIntent&& other) {
+ DCHECK_EQ(task_source_, nullptr);
+ task_source_ = other.task_source_;
+ other.task_source_ = nullptr;
+ run_step_ = other.run_step_;
+ is_saturated_ = other.is_saturated_;
+ return *this;
+}
+
+TaskSource::RunIntent::RunIntent(const TaskSource* task_source,
+ Saturated is_saturated)
+ : task_source_(task_source), is_saturated_(is_saturated) {}
+
TaskSource::Transaction::Transaction(TaskSource* task_source)
: task_source_(task_source) {
task_source->lock_.Acquire();
@@ -32,12 +56,20 @@ TaskSource::Transaction::~Transaction() {
}
}
-Optional<Task> TaskSource::Transaction::TakeTask() {
+Optional<Task> TaskSource::Transaction::TakeTask(RunIntent* intent) {
+ DCHECK_EQ(intent->task_source_, task_source());
+ DCHECK_EQ(intent->run_step_, RunIntent::State::kInitial);
+ intent->run_step_ = RunIntent::State::kTaskAcquired;
return task_source_->TakeTask();
}
-bool TaskSource::Transaction::DidRunTask() {
- return task_source_->DidRunTask();
+bool TaskSource::Transaction::DidProcessTask(RunIntent intent,
+ RunResult run_result) {
+ DCHECK_EQ(intent.task_source_, task_source());
+ DCHECK_EQ(intent.run_step_, RunIntent::State::kTaskAcquired);
+ intent.run_step_ = RunIntent::State::kCompleted;
+ intent.Release();
+ return task_source_->DidProcessTask(run_result);
}
SequenceSortKey TaskSource::Transaction::GetSortKey() const {
@@ -54,6 +86,10 @@ void TaskSource::Transaction::UpdatePriority(TaskPriority priority) {
task_source_->traits_.UpdatePriority(priority);
}
+TaskSource::RunIntent TaskSource::MakeRunIntent(Saturated is_saturated) const {
+ return RunIntent(this, is_saturated);
+}
+
void TaskSource::SetHeapHandle(const HeapHandle& handle) {
heap_handle_ = handle;
}
@@ -68,7 +104,9 @@ TaskSource::TaskSource(const TaskTraits& traits,
: traits_(traits),
task_runner_(task_runner),
execution_mode_(execution_mode) {
- DCHECK(task_runner_ || execution_mode_ == TaskSourceExecutionMode::kParallel);
+ DCHECK(task_runner_ ||
+ execution_mode_ == TaskSourceExecutionMode::kParallel ||
+ execution_mode_ == TaskSourceExecutionMode::kJob);
}
TaskSource::~TaskSource() = default;
@@ -82,8 +120,8 @@ RegisteredTaskSource::RegisteredTaskSource() = default;
RegisteredTaskSource::RegisteredTaskSource(std::nullptr_t)
: RegisteredTaskSource() {}
-RegisteredTaskSource::RegisteredTaskSource(RegisteredTaskSource&& other) =
- default;
+RegisteredTaskSource::RegisteredTaskSource(
+ RegisteredTaskSource&& other) noexcept = default;
RegisteredTaskSource::~RegisteredTaskSource() {
Unregister();
@@ -114,7 +152,7 @@ RegisteredTaskSource& RegisteredTaskSource::operator=(
RegisteredTaskSource::RegisteredTaskSource(
scoped_refptr<TaskSource> task_source,
TaskTracker* task_tracker)
- : task_source_(task_source), task_tracker_(task_tracker) {}
+ : task_source_(std::move(task_source)), task_tracker_(task_tracker) {}
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/thread_pool/task_source.h b/chromium/base/task/thread_pool/task_source.h
index fea3165798a..bb9deaa0e29 100644
--- a/chromium/base/task/thread_pool/task_source.h
+++ b/chromium/base/task/thread_pool/task_source.h
@@ -29,7 +29,8 @@ enum class TaskSourceExecutionMode {
kParallel,
kSequenced,
kSingleThread,
- kMax = kSingleThread,
+ kJob,
+ kMax = kJob,
};
struct BASE_EXPORT ExecutionEnvironment {
@@ -40,26 +41,90 @@ struct BASE_EXPORT ExecutionEnvironment {
// A TaskSource is a virtual class that provides a series of Tasks that must be
// executed.
//
-// In order to execute a task from this TaskSource, TakeTask() can be called to
-// access the next Task, and DidRunTask() must be called after the task executed
-// and before accessing any subsequent Tasks. This ensure that the number of
-// workers concurrently running tasks never go over the intended concurrency.
+// In order to execute a task from this TaskSource, a worker should first make
+// sure that a task can run with WillRunTask() which returns a RunIntent.
+// TakeTask() can then be called to access the next Task, and DidProcessTask()
+// must be called after the task was processed. Many overlapping chains of
+// WillRunTask(), TakeTask(), run and DidProcessTask() can run concurrently, as
+// permitted by WillRunTask(). This ensure that the number of workers
+// concurrently running tasks never go over the intended concurrency.
//
// In comments below, an "empty TaskSource" is a TaskSource with no Task.
//
// Note: there is a known refcounted-ownership cycle in the Scheduler
-// architecture: TaskSource -> TaskRunner -> TaskSource -> ... This is
-// okay so long as the other owners of TaskSource (PriorityQueue and
-// WorkerThread in alternation and
-// ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork() temporarily) keep
-// running it (and taking Tasks from it as a result). A dangling reference cycle
-// would only occur should they release their reference to it while it's not
-// empty. In other words, it is only correct for them to release it when
-// DidRunTask() returns false.
+// architecture: TaskSource -> TaskRunner -> TaskSource -> ... This is okay so
+// long as the other owners of TaskSource (PriorityQueue and WorkerThread in
+// alternation and ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork()
+// temporarily) keep running it (and taking Tasks from it as a result). A
+// dangling reference cycle would only occur should they release their reference
+// to it while it's not empty. In other words, it is only correct for them to
+// release it when DidProcessTask() returns false.
//
// This class is thread-safe.
class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
+ protected:
+ // Indicates whether a TaskSource has reached its maximum intended concurrency
+ // and may not run any additional tasks.
+ enum class Saturated {
+ kYes,
+ kNo,
+ };
+
public:
+ // Indicates if a task was run or skipped as a result of shutdown.
+ enum class RunResult {
+ kDidRun,
+ kSkippedAtShutdown,
+ };
+
+ // Result of WillRunTask(). A single task associated with a RunIntent may be
+ // accessed with TakeTask() and run iff this evaluates to true.
+ class BASE_EXPORT RunIntent {
+ public:
+ RunIntent() = default;
+ RunIntent(RunIntent&&) noexcept;
+ ~RunIntent();
+
+ RunIntent& operator=(RunIntent&&);
+
+ operator bool() const { return !!task_source_; }
+
+ // Returns true iff the TaskSource from which this RunIntent was obtained
+ // may not run any additional tasks beyond this RunIntent as it has reached
+ // its maximum concurrency. This indicates that the TaskSource no longer
+ // needs to be queued.
+ bool IsSaturated() const { return is_saturated_ == Saturated::kYes; }
+
+ const TaskSource* task_source() const { return task_source_; }
+
+ void ReleaseForTesting() {
+ DCHECK(task_source_);
+ task_source_ = nullptr;
+ }
+
+ private:
+ friend class TaskSource;
+
+ // Indicates the step of a run intent chain.
+ enum class State {
+ kInitial, // After WillRunTask().
+ kTaskAcquired, // After TakeTask().
+ kCompleted, // After DidProcessTask().
+ };
+
+ RunIntent(const TaskSource* task_source, Saturated is_saturated);
+
+ void Release() {
+ DCHECK_EQ(run_step_, State::kCompleted);
+ DCHECK(task_source_);
+ task_source_ = nullptr;
+ }
+
+ const TaskSource* task_source_ = nullptr;
+ State run_step_ = State::kInitial;
+ Saturated is_saturated_ = Saturated::kYes;
+ };
+
// A Transaction can perform multiple operations atomically on a
// TaskSource. While a Transaction is alive, it is guaranteed that nothing
// else will access the TaskSource; the TaskSource's lock is held for the
@@ -69,20 +134,22 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
Transaction(Transaction&& other);
~Transaction();
+ operator bool() const { return !!task_source_; }
+
// Returns the next task to run from this TaskSource. This should be called
- // only if NeedsWorker returns true. Cannot be called on an empty
- // TaskSource.
+ // only with a valid |intent|. Cannot be called on an empty TaskSource.
//
// Because this method cannot be called on an empty TaskSource, the returned
// Optional<Task> is never nullptr. An Optional is used in preparation for
// the merge between ThreadPool and TaskQueueManager (in Blink).
// https://crbug.com/783309
- Optional<Task> TakeTask();
+ Optional<Task> TakeTask(RunIntent* intent) WARN_UNUSED_RESULT;
- // Must be called once the task was executed. Cannot be called on an empty
- // TaskSource. Returns true if the TaskSource should be queued after this
- // operation.
- bool DidRunTask();
+ // Must be called once the task was run or skipped. |run_result| indicates
+ // if the task executed. Cannot be called on an empty TaskSource. Returns
+ // true if the TaskSource should be queued after this operation.
+ bool DidProcessTask(RunIntent intent,
+ RunResult run_result = RunResult::kDidRun);
// Returns a SequenceSortKey representing the priority of the TaskSource.
// Cannot be called on an empty TaskSource.
@@ -125,6 +192,18 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
virtual ExecutionEnvironment GetExecutionEnvironment() = 0;
+ // Informs this TaskSource that an additional Task could be run. Returns a
+ // RunIntent that evaluates to true if this operation is allowed (TakeTask()
+ // can be called), or false otherwise. This function is not thread safe and
+ // must be externally synchronized (e.g. by the lock of the PriorityQueue
+ // holding the TaskSource).
+ virtual RunIntent WillRunTask() = 0;
+
+ // Thread-safe but the returned value may immediately be obsolete. As such
+ // this should only be used as a best-effort guess of how many more workers
+ // are needed.
+ virtual size_t GetRemainingConcurrency() const = 0;
+
// Support for IntrusiveHeap.
void SetHeapHandle(const HeapHandle& handle);
void ClearHeapHandle();
@@ -137,7 +216,7 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
}
// A reference to TaskRunner is only retained between PushTask() and when
- // DidRunTask() returns false, guaranteeing it is safe to dereference this
+ // DidProcessTask() returns false, guaranteeing it is safe to dereference this
// pointer. Otherwise, the caller should guarantee such TaskRunner still
// exists before dereferencing.
TaskRunner* task_runner() const { return task_runner_; }
@@ -149,9 +228,10 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
virtual Optional<Task> TakeTask() = 0;
- // Returns true if the TaskSource should be queued after this
- // operation.
- virtual bool DidRunTask() = 0;
+ // Informs this TaskSource that a task was processed. |was_run| indicates
+ // whether the task executed or not. Returns true if the TaskSource
+ // should be queued after this operation.
+ virtual bool DidProcessTask(RunResult run_result) = 0;
virtual SequenceSortKey GetSortKey() const = 0;
@@ -160,6 +240,10 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
// Sets TaskSource priority to |priority|.
void UpdatePriority(TaskPriority priority);
+ // Constructs and returns a RunIntent, where |is_saturated| indicates that the
+ // TaskSource has reached its maximum concurrency.
+ RunIntent MakeRunIntent(Saturated is_saturated) const;
+
// The TaskTraits of all Tasks in the TaskSource.
TaskTraits traits_;
@@ -176,7 +260,7 @@ class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
// A pointer to the TaskRunner that posts to this TaskSource, if any. The
// derived class is responsible for calling AddRef() when a TaskSource from
// which no Task is executing becomes non-empty and Release() when
- // DidRunTask() returns false.
+ // DidProcessTask() returns false.
TaskRunner* task_runner_;
TaskSourceExecutionMode execution_mode_;
@@ -190,7 +274,7 @@ class BASE_EXPORT RegisteredTaskSource {
public:
RegisteredTaskSource();
RegisteredTaskSource(std::nullptr_t);
- RegisteredTaskSource(RegisteredTaskSource&& other);
+ RegisteredTaskSource(RegisteredTaskSource&& other) noexcept;
~RegisteredTaskSource();
RegisteredTaskSource& operator=(RegisteredTaskSource&& other);
@@ -208,7 +292,6 @@ class BASE_EXPORT RegisteredTaskSource {
private:
friend class TaskTracker;
-
RegisteredTaskSource(scoped_refptr<TaskSource> task_source,
TaskTracker* task_tracker);
@@ -218,32 +301,58 @@ class BASE_EXPORT RegisteredTaskSource {
DISALLOW_COPY_AND_ASSIGN(RegisteredTaskSource);
};
+// Base implementation for TransactionWith[Owned/Registered]TaskSource (with
+// Transaction as the decorator) and RunIntentWithRegisteredTaskSource (with
+// RunIntent as the decorator).
+template <class Decorator, class T>
+class BASE_EXPORT DecoratorWithTaskSource : public Decorator {
+ public:
+ DecoratorWithTaskSource() = default;
+ DecoratorWithTaskSource(std::nullptr_t) : DecoratorWithTaskSource() {}
+ DecoratorWithTaskSource(T task_source_in, Decorator decorator)
+ : Decorator(std::move(decorator)),
+ task_source_(std::move(task_source_in)) {
+ DCHECK_EQ(task_source_.get(), this->task_source());
+ }
+ DecoratorWithTaskSource(DecoratorWithTaskSource&& other) = default;
+ ~DecoratorWithTaskSource() = default;
+
+ DecoratorWithTaskSource& operator=(DecoratorWithTaskSource&&) = default;
+
+ T take_task_source() { return std::move(task_source_); }
+
+ protected:
+ T task_source_;
+
+ DISALLOW_COPY_AND_ASSIGN(DecoratorWithTaskSource);
+};
+
+// A RunIntent with an additional RegisteredTaskSource member.
+using RunIntentWithRegisteredTaskSource =
+ DecoratorWithTaskSource<TaskSource::RunIntent, RegisteredTaskSource>;
+
template <class T>
-struct BASE_EXPORT BasicTaskSourceAndTransaction {
- T task_source;
- TaskSource::Transaction transaction;
+struct BASE_EXPORT BasicTransactionWithTaskSource
+ : public DecoratorWithTaskSource<TaskSource::Transaction, T> {
+ using DecoratorWithTaskSource<TaskSource::Transaction,
+ T>::DecoratorWithTaskSource;
- static BasicTaskSourceAndTransaction FromTaskSource(T task_source) {
+ static BasicTransactionWithTaskSource FromTaskSource(T task_source) {
auto transaction = task_source->BeginTransaction();
- return BasicTaskSourceAndTransaction(std::move(task_source),
- std::move(transaction));
+ return BasicTransactionWithTaskSource(std::move(task_source),
+ std::move(transaction));
}
-
- BasicTaskSourceAndTransaction(T task_source_in,
- TaskSource::Transaction transaction_in)
- : task_source(std::move(task_source_in)),
- transaction(std::move(transaction_in)) {}
- BasicTaskSourceAndTransaction(BasicTaskSourceAndTransaction&& other) =
- default;
- ~BasicTaskSourceAndTransaction() = default;
-
- DISALLOW_COPY_AND_ASSIGN(BasicTaskSourceAndTransaction);
};
-using TaskSourceAndTransaction =
- BasicTaskSourceAndTransaction<scoped_refptr<TaskSource>>;
-using RegisteredTaskSourceAndTransaction =
- BasicTaskSourceAndTransaction<RegisteredTaskSource>;
+// A Transaction with an additional scoped_refptr<TaskSource> member. Useful to
+// carry ownership of a TaskSource with an associated Transaction.
+using TransactionWithOwnedTaskSource =
+ BasicTransactionWithTaskSource<scoped_refptr<TaskSource>>;
+
+// A Transaction with an additional RegisteredTaskSource member. Useful to carry
+// a RegisteredTaskSource with an associated Transaction.
+using TransactionWithRegisteredTaskSource =
+ BasicTransactionWithTaskSource<RegisteredTaskSource>;
} // namespace internal
} // namespace base
diff --git a/chromium/base/task/thread_pool/task_tracker.cc b/chromium/base/task/thread_pool/task_tracker.cc
index daa8f42ae41..6b9282fbd6d 100644
--- a/chromium/base/task/thread_pool/task_tracker.cc
+++ b/chromium/base/task/thread_pool/task_tracker.cc
@@ -8,7 +8,9 @@
#include <string>
#include <vector>
+#include "base/base_switches.h"
#include "base/callback.h"
+#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
#include "base/json/json_writer.h"
@@ -18,6 +20,7 @@
#include "base/sequence_token.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
#include "base/threading/sequence_local_storage_map.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
@@ -32,7 +35,7 @@ namespace internal {
namespace {
constexpr const char* kExecutionModeString[] = {"parallel", "sequenced",
- "single thread"};
+ "single thread", "job"};
static_assert(
size(kExecutionModeString) ==
static_cast<size_t>(TaskSourceExecutionMode::kMax) + 1,
@@ -129,15 +132,23 @@ HistogramBase* GetHistogramForTaskTraits(
// Returns shutdown behavior based on |traits|; returns SKIP_ON_SHUTDOWN if
// shutdown behavior is BLOCK_SHUTDOWN and |is_delayed|, because delayed tasks
// are not allowed to block shutdown.
-TaskShutdownBehavior GetEffectiveShutdownBehavior(const TaskTraits& traits,
- bool is_delayed) {
- const TaskShutdownBehavior shutdown_behavior = traits.shutdown_behavior();
+TaskShutdownBehavior GetEffectiveShutdownBehavior(
+ TaskShutdownBehavior shutdown_behavior,
+ bool is_delayed) {
if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN && is_delayed) {
return TaskShutdownBehavior::SKIP_ON_SHUTDOWN;
}
return shutdown_behavior;
}
+bool HasLogBestEffortTasksSwitch() {
+ // The CommandLine might not be initialized if ThreadPool is initialized in a
+ // dynamic library which doesn't have access to argc/argv.
+ return CommandLine::InitializedForCurrentProcess() &&
+ CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kLogBestEffortTasks);
+}
+
} // namespace
// Atomic internal state used by TaskTracker to track items that are blocking
@@ -232,7 +243,8 @@ class TaskTracker::State {
// TODO(jessemckenna): Write a helper function to avoid code duplication below.
TaskTracker::TaskTracker(StringPiece histogram_label)
- : state_(new State),
+ : has_log_best_effort_tasks_switch_(HasLogBestEffortTasksSwitch()),
+ state_(new State),
can_run_policy_(CanRunPolicy::kAll),
flush_cv_(flush_lock_.CreateConditionVariable()),
shutdown_lock_(&flush_lock_),
@@ -295,9 +307,11 @@ TaskTracker::TaskTracker(StringPiece histogram_label)
"UserBlockingTaskPriority_MayBlock")}},
tracked_ref_factory_(this) {
// Confirm that all |task_latency_histograms_| have been initialized above.
- DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
- 1][0] -
- 1));
+ for (TaskPriorityType i = 0; i < kNumTaskPriorities; ++i) {
+ for (TaskPriorityType j = 0; j < kNumBlockingModes; ++j) {
+ DCHECK(task_latency_histograms_[i][j]);
+ }
+ }
}
TaskTracker::~TaskTracker() = default;
@@ -327,12 +341,10 @@ void TaskTracker::StartShutdown() {
}
}
-// TODO(gab): Figure out why TS_UNCHECKED_READ is insufficient to make thread
-// analysis of |shutdown_event_| happy on POSIX.
-void TaskTracker::CompleteShutdown() NO_THREAD_SAFETY_ANALYSIS {
+void TaskTracker::CompleteShutdown() {
// It is safe to access |shutdown_event_| without holding |lock_| because the
- // pointer never changes after being set by StartShutdown(), which must be
- // called before this.
+ // pointer never changes after being set by StartShutdown(), which must
+ // happen-before before this.
DCHECK(TS_UNCHECKED_READ(shutdown_event_));
{
base::ScopedAllowBaseSyncPrimitives allow_wait;
@@ -393,13 +405,7 @@ bool TaskTracker::WillPostTask(Task* task,
// ordering bug. This aims to catch those early.
CheckedAutoLock auto_lock(shutdown_lock_);
DCHECK(shutdown_event_);
- // TODO(http://crbug.com/698140): Atomically shutdown the service thread
- // to prevent racily posting BLOCK_SHUTDOWN tasks in response to a
- // FileDescriptorWatcher (and/or make such notifications never be
- // BLOCK_SHUTDOWN). Then, enable this DCHECK, until then, skip the task.
- // DCHECK(!shutdown_event_->IsSignaled());
- if (shutdown_event_->IsSignaled())
- return false;
+ DCHECK(!shutdown_event_->IsSignaled());
}
// TODO(scheduler-dev): Record the task traits here.
@@ -408,6 +414,14 @@ bool TaskTracker::WillPostTask(Task* task,
return true;
}
+void TaskTracker::WillPostTaskNow(const Task& task, TaskPriority priority) {
+ if (has_log_best_effort_tasks_switch_ &&
+ priority == TaskPriority::BEST_EFFORT) {
+ // A TaskPriority::BEST_EFFORT task is being posted.
+ LOG(INFO) << task.posted_from.ToString();
+ }
+}
+
RegisteredTaskSource TaskTracker::WillQueueTaskSource(
scoped_refptr<TaskSource> task_source) {
DCHECK(task_source);
@@ -435,16 +449,17 @@ bool TaskTracker::CanRunPriority(TaskPriority priority) const {
}
RegisteredTaskSource TaskTracker::RunAndPopNextTask(
- RegisteredTaskSource task_source) {
- DCHECK(task_source);
+ RunIntentWithRegisteredTaskSource run_intent_with_task_source) {
+ DCHECK(run_intent_with_task_source);
+ auto task_source = run_intent_with_task_source.take_task_source();
// Run the next task in |task_source|.
Optional<Task> task;
- TaskTraits traits;
+ TaskTraits traits{ThreadPool()};
{
TaskSource::Transaction task_source_transaction(
task_source->BeginTransaction());
- task = task_source_transaction.TakeTask();
+ task = task_source_transaction.TakeTask(&run_intent_with_task_source);
traits = task_source_transaction.traits();
}
@@ -459,14 +474,17 @@ RegisteredTaskSource TaskTracker::RunAndPopNextTask(
can_run_task);
const bool task_source_must_be_queued =
- task_source->BeginTransaction().DidRunTask();
+ task_source->BeginTransaction().DidProcessTask(
+ std::move(run_intent_with_task_source),
+ can_run_task ? TaskSource::RunResult::kDidRun
+ : TaskSource::RunResult::kSkippedAtShutdown);
if (can_run_task) {
IncrementNumTasksRun();
AfterRunTask(effective_shutdown_behavior);
}
- // |task_source| should be reenqueued iff requested by DidRunTask().
+ // |task_source| should be reenqueued iff requested by DidProcessTask().
if (task_source_must_be_queued) {
return task_source;
}
@@ -488,7 +506,7 @@ void TaskTracker::RecordLatencyHistogram(
LatencyHistogramType latency_histogram_type,
TaskTraits task_traits,
TimeTicks posted_time) const {
- const TimeDelta task_latency = TimeTicks::Now() - posted_time;
+ const TimeDelta task_latency = ThreadPoolClock::Now() - posted_time;
DCHECK(latency_histogram_type == LatencyHistogramType::TASK_LATENCY ||
latency_histogram_type == LatencyHistogramType::HEARTBEAT_LATENCY);
@@ -505,11 +523,11 @@ void TaskTracker::RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms(
bool may_block,
TimeTicks posted_time,
int num_tasks_run_when_posted) const {
- TaskTraits task_traits;
+ TaskTraits task_traits{ThreadPool()};
if (may_block)
- task_traits = TaskTraits(task_priority, MayBlock());
+ task_traits = TaskTraits(ThreadPool(), task_priority, MayBlock());
else
- task_traits = TaskTraits(task_priority);
+ task_traits = TaskTraits(ThreadPool(), task_priority);
RecordLatencyHistogram(LatencyHistogramType::HEARTBEAT_LATENCY, task_traits,
posted_time);
GetHistogramForTaskTraits(task_traits,
@@ -566,6 +584,7 @@ void TaskTracker::RunOrSkipTask(Task task,
Optional<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
Optional<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
switch (task_source->execution_mode()) {
+ case TaskSourceExecutionMode::kJob:
case TaskSourceExecutionMode::kParallel:
break;
case TaskSourceExecutionMode::kSequenced:
@@ -618,20 +637,11 @@ bool TaskTracker::BeforeQueueTaskSource(
const bool shutdown_started = state_->IncrementNumItemsBlockingShutdown();
if (shutdown_started) {
-
// A BLOCK_SHUTDOWN task posted after shutdown has completed is an
// ordering bug. This aims to catch those early.
CheckedAutoLock auto_lock(shutdown_lock_);
DCHECK(shutdown_event_);
- // TODO(http://crbug.com/698140): Atomically shutdown the service thread
- // to prevent racily posting BLOCK_SHUTDOWN tasks in response to a
- // FileDescriptorWatcher (and/or make such notifications never be
- // BLOCK_SHUTDOWN). Then, enable this DCHECK, until then, skip the task.
- // DCHECK(!shutdown_event_->IsSignaled());
- if (shutdown_event_->IsSignaled()) {
- state_->DecrementNumItemsBlockingShutdown();
- return false;
- }
+ DCHECK(!shutdown_event_->IsSignaled());
}
return true;
diff --git a/chromium/base/task/thread_pool/task_tracker.h b/chromium/base/task/thread_pool/task_tracker.h
index 8857ba586b4..4a7f28b6c8a 100644
--- a/chromium/base/task/thread_pool/task_tracker.h
+++ b/chromium/base/task/thread_pool/task_tracker.h
@@ -93,11 +93,16 @@ class BASE_EXPORT TaskTracker {
void SetCanRunPolicy(CanRunPolicy can_run_policy);
// Informs this TaskTracker that |task| with |shutdown_behavior| is about to
- // be posted to a task source. Returns true if this operation is allowed
- // (|task| should be pushed into its task source if-and-only-if it is). This
- // method may also modify metadata on |task| if desired.
+ // be pushed to a task source (if non-delayed) or be added to the
+ // DelayedTaskManager (if delayed). Returns true if this operation is allowed
+ // (the operation should be performed if-and-only-if it is). This method may
+ // also modify metadata on |task| if desired.
bool WillPostTask(Task* task, TaskShutdownBehavior shutdown_behavior);
+ // Informs this TaskTracker that |task| that is about to be pushed to a task
+ // source with |priority|.
+ void WillPostTaskNow(const Task& task, TaskPriority priority);
+
// Informs this TaskTracker that |task_source| is about to be queued. Returns
// a RegisteredTaskSource that should be queued if-and-only-if it evaluates to
// true.
@@ -113,7 +118,8 @@ class BASE_EXPORT TaskTracker {
// (which indicates that it should be reenqueued). WillPostTask() must have
// allowed the task in front of |task_source| to be posted before this is
// called.
- RegisteredTaskSource RunAndPopNextTask(RegisteredTaskSource task_source);
+ RegisteredTaskSource RunAndPopNextTask(
+ RunIntentWithRegisteredTaskSource task_source);
// Returns true once shutdown has started (StartShutdown() was called).
// Note: sequential consistency with the thread calling StartShutdown() isn't
@@ -154,6 +160,11 @@ class BASE_EXPORT TaskTracker {
return tracked_ref_factory_.GetTrackedRef();
}
+ // Returns true if there are task sources that haven't completed their
+ // execution (still queued or in progress). If it returns false: the side-
+ // effects of all completed tasks are guaranteed to be visible to the caller.
+ bool HasIncompleteTaskSourcesForTesting() const;
+
protected:
// Runs and deletes |task| if |can_run_task| is true. Otherwise, just deletes
// |task|. |task| is always deleted in the environment where it runs or would
@@ -166,11 +177,6 @@ class BASE_EXPORT TaskTracker {
const TaskTraits& traits,
bool can_run_task);
- // Returns true if there are task sources that haven't completed their
- // execution (still queued or in progress). If it returns false: the side-
- // effects of all completed tasks are guaranteed to be visible to the caller.
- bool HasIncompleteTaskSourcesForTesting() const;
-
private:
friend class RegisteredTaskSource;
class State;
@@ -225,8 +231,15 @@ class BASE_EXPORT TaskTracker {
TaskAnnotator task_annotator_;
+ // Indicates whether logging information about TaskPriority::BEST_EFFORT tasks
+ // was enabled with a command line switch.
+ const bool has_log_best_effort_tasks_switch_;
+
// Number of tasks blocking shutdown and boolean indicating whether shutdown
- // has started.
+ // has started. |shutdown_lock_| should be held to access |shutdown_event_|
+ // when this indicates that shutdown has started because State doesn't provide
+ // memory barriers. It intentionally trades having to use a Lock on shutdown
+ // with not needing memory barriers at runtime.
const std::unique_ptr<State> state_;
// Number of task sources that haven't completed their execution. Is
@@ -272,12 +285,16 @@ class BASE_EXPORT TaskTracker {
// blocking tasks. Intentionally leaked.
// TODO(scheduler-dev): Consider using STATIC_HISTOGRAM_POINTER_GROUP for
// these.
- static constexpr int kNumTaskPriorities =
- static_cast<int>(TaskPriority::HIGHEST) + 1;
- HistogramBase* const task_latency_histograms_[kNumTaskPriorities][2];
- HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities][2];
+ static constexpr auto kNumTaskPriorities =
+ static_cast<TaskPriorityType>(TaskPriority::HIGHEST) + 1;
+ static constexpr TaskPriorityType kNumBlockingModes = 2;
+ HistogramBase* const task_latency_histograms_[kNumTaskPriorities]
+ [kNumBlockingModes];
+ HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities]
+ [kNumBlockingModes];
HistogramBase* const
- num_tasks_run_while_queuing_histograms_[kNumTaskPriorities][2];
+ num_tasks_run_while_queuing_histograms_[kNumTaskPriorities]
+ [kNumBlockingModes];
// Ensures all state (e.g. dangling cleaned up workers) is coalesced before
// destroying the TaskTracker (e.g. in test environments).
diff --git a/chromium/base/task/thread_pool/task_tracker_posix_unittest.cc b/chromium/base/task/thread_pool/task_tracker_posix_unittest.cc
index fbcaf2acd5f..2bf9e114d4e 100644
--- a/chromium/base/task/thread_pool/task_tracker_posix_unittest.cc
+++ b/chromium/base/task/thread_pool/task_tracker_posix_unittest.cc
@@ -42,7 +42,7 @@ class ThreadPoolTaskTrackerPosixTest : public testing::Test {
protected:
Thread service_thread_;
- TaskTrackerPosix tracker_ = {"Test"};
+ TaskTrackerPosix tracker_{"Test"};
private:
DISALLOW_COPY_AND_ASSIGN(ThreadPoolTaskTrackerPosixTest);
@@ -57,7 +57,7 @@ TEST_F(ThreadPoolTaskTrackerPosixTest, RunTask) {
FROM_HERE,
BindOnce([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
TimeDelta());
- constexpr TaskTraits default_traits = {};
+ constexpr TaskTraits default_traits = {ThreadPool()};
EXPECT_TRUE(tracker_.WillPostTask(&task, default_traits.shutdown_behavior()));
@@ -78,7 +78,7 @@ TEST_F(ThreadPoolTaskTrackerPosixTest, FileDescriptorWatcher) {
BindOnce(IgnoreResult(&FileDescriptorWatcher::WatchReadable),
fds[0], DoNothing()),
TimeDelta());
- constexpr TaskTraits default_traits = {};
+ constexpr TaskTraits default_traits = {ThreadPool()};
EXPECT_TRUE(tracker_.WillPostTask(&task, default_traits.shutdown_behavior()));
diff --git a/chromium/base/task/thread_pool/task_tracker_unittest.cc b/chromium/base/task/thread_pool/task_tracker_unittest.cc
index 545a664e535..00ac240c44c 100644
--- a/chromium/base/task/thread_pool/task_tracker_unittest.cc
+++ b/chromium/base/task/thread_pool/task_tracker_unittest.cc
@@ -127,10 +127,12 @@ class ThreadPostingAndRunningTask : public SimpleThread {
if (post_and_queue_succeeded &&
(action_ == Action::RUN || action_ == Action::WILL_POST_AND_RUN)) {
EXPECT_TRUE(task_source_);
+ auto run_intent = task_source_->WillRunTask();
// Expect RunAndPopNextTask to return nullptr since |sequence| is empty
// after popping a task from it.
- EXPECT_FALSE(tracker_->RunAndPopNextTask(std::move(task_source_)));
+ EXPECT_FALSE(tracker_->RunAndPopNextTask(
+ {std::move(task_source_), std::move(run_intent)}));
}
}
@@ -178,6 +180,11 @@ class ThreadPoolTaskTrackerTest
auto sequence = test::CreateSequenceWithTask(std::move(task), traits);
return tracker_.WillQueueTaskSource(std::move(sequence));
}
+ RegisteredTaskSource RunAndPopNextTask(RegisteredTaskSource task_source) {
+ auto run_intent = task_source->WillRunTask();
+ return tracker_.RunAndPopNextTask(
+ {std::move(task_source), std::move(run_intent)});
+ }
// Calls tracker_->CompleteShutdown() on a new thread and expects it to block.
void ExpectAsyncCompleteShutdownBlocks() {
@@ -228,7 +235,7 @@ class ThreadPoolTaskTrackerTest
return num_tasks_executed_;
}
- TaskTracker tracker_ = {"Test"};
+ TaskTracker tracker_{"Test"};
private:
void RunTaskCallback() {
@@ -283,7 +290,8 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAndRunBeforeShutdown) {
EXPECT_EQ(0U, NumTasksExecuted());
test::QueueAndRunTaskSource(
- &tracker_, test::CreateSequenceWithTask(std::move(task), GetParam()));
+ &tracker_, test::CreateSequenceWithTask(std::move(task),
+ {ThreadPool(), GetParam()}));
EXPECT_EQ(1U, NumTasksExecuted());
// Shutdown() shouldn't block.
@@ -308,8 +316,8 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
- auto sequence =
- WillPostTaskAndQueueTaskSource(std::move(blocked_task), {GetParam()});
+ auto sequence = WillPostTaskAndQueueTaskSource(std::move(blocked_task),
+ {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
// Create a thread to run the task. Wait until the task starts running.
@@ -344,12 +352,13 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownQueueDuringShutdown) {
// Simulate posting a undelayed task.
Task task{CreateTask()};
EXPECT_TRUE(tracker_.WillPostTask(&task, GetParam()));
- auto sequence = test::CreateSequenceWithTask(std::move(task), GetParam());
+ auto sequence =
+ test::CreateSequenceWithTask(std::move(task), {ThreadPool(), GetParam()});
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
auto block_shutdown_sequence = WillPostTaskAndQueueTaskSource(
- CreateTask(), TaskShutdownBehavior::BLOCK_SHUTDOWN);
+ CreateTask(), {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN});
EXPECT_TRUE(block_shutdown_sequence);
// Start shutdown and try to complete it asynchronously.
@@ -358,10 +367,7 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownQueueDuringShutdown) {
const bool should_run = GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN;
if (should_run) {
- auto registered_task_source =
- tracker_.WillQueueTaskSource(std::move(sequence));
- EXPECT_TRUE(registered_task_source);
- tracker_.RunAndPopNextTask(std::move(registered_task_source));
+ test::QueueAndRunTaskSource(&tracker_, std::move(sequence));
EXPECT_EQ(1U, NumTasksExecuted());
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
} else {
@@ -369,20 +375,21 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownQueueDuringShutdown) {
}
// Unblock shutdown by running the remaining BLOCK_SHUTDOWN task.
- tracker_.RunAndPopNextTask(std::move(block_shutdown_sequence));
+ RunAndPopNextTask(std::move(block_shutdown_sequence));
EXPECT_EQ(should_run ? 2U : 1U, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
// Inform |task_tracker_| that a task will be posted.
- auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(), GetParam());
+ auto sequence =
+ WillPostTaskAndQueueTaskSource(CreateTask(), {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
auto block_shutdown_sequence = WillPostTaskAndQueueTaskSource(
- CreateTask(), TaskShutdownBehavior::BLOCK_SHUTDOWN);
+ CreateTask(), {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN});
EXPECT_TRUE(block_shutdown_sequence);
// Start shutdown and try to complete it asynchronously.
@@ -394,19 +401,20 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
EXPECT_EQ(0U, NumTasksExecuted());
const bool should_run = GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN;
- tracker_.RunAndPopNextTask(std::move(sequence));
+ RunAndPopNextTask(std::move(sequence));
EXPECT_EQ(should_run ? 1U : 0U, NumTasksExecuted());
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
// Unblock shutdown by running the remaining BLOCK_SHUTDOWN task.
- tracker_.RunAndPopNextTask(std::move(block_shutdown_sequence));
+ RunAndPopNextTask(std::move(block_shutdown_sequence));
EXPECT_EQ(should_run ? 2U : 1U, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
// Inform |task_tracker_| that a task will be posted.
- auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(), GetParam());
+ auto sequence =
+ WillPostTaskAndQueueTaskSource(CreateTask(), {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
// Start shutdown.
@@ -418,7 +426,7 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
ExpectAsyncCompleteShutdownBlocks();
// Run the task to unblock shutdown.
- tracker_.RunAndPopNextTask(std::move(sequence));
+ RunAndPopNextTask(std::move(sequence));
EXPECT_EQ(1U, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
@@ -429,7 +437,7 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
tracker_.CompleteShutdown();
// The task shouldn't be allowed to run after shutdown.
- tracker_.RunAndPopNextTask(std::move(sequence));
+ RunAndPopNextTask(std::move(sequence));
EXPECT_EQ(0U, NumTasksExecuted());
}
}
@@ -438,7 +446,7 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
auto block_shutdown_sequence = WillPostTaskAndQueueTaskSource(
- CreateTask(), TaskShutdownBehavior::BLOCK_SHUTDOWN);
+ CreateTask(), {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN});
EXPECT_TRUE(block_shutdown_sequence);
// Start shutdown.
@@ -446,16 +454,18 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAndRunDuringShutdown) {
if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted.
- auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(), GetParam());
+ auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(),
+ {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
// Run the BLOCK_SHUTDOWN task.
EXPECT_EQ(0U, NumTasksExecuted());
- tracker_.RunAndPopNextTask(std::move(sequence));
+ RunAndPopNextTask(std::move(sequence));
EXPECT_EQ(1U, NumTasksExecuted());
} else {
// It shouldn't be allowed to post a non BLOCK_SHUTDOWN task.
- auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(), GetParam());
+ auto sequence = WillPostTaskAndQueueTaskSource(CreateTask(),
+ {ThreadPool(), GetParam()});
EXPECT_FALSE(sequence);
// Don't try to run the task, because it wasn't allowed to be posted.
@@ -465,7 +475,7 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAndRunDuringShutdown) {
ExpectAsyncCompleteShutdownBlocks();
// Unblock shutdown by running |block_shutdown_task|.
- tracker_.RunAndPopNextTask(std::move(block_shutdown_sequence));
+ RunAndPopNextTask(std::move(block_shutdown_sequence));
EXPECT_EQ(GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN ? 2U : 1U,
NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
@@ -477,7 +487,11 @@ TEST_P(ThreadPoolTaskTrackerTest, WillPostAfterShutdown) {
Task task(CreateTask());
// |task_tracker_| shouldn't allow a task to be posted after shutdown.
- EXPECT_FALSE(tracker_.WillPostTask(&task, GetParam()));
+ if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
+ EXPECT_DCHECK_DEATH(tracker_.WillPostTask(&task, GetParam()));
+ } else {
+ EXPECT_FALSE(tracker_.WillPostTask(&task, GetParam()));
+ }
}
// Verify that BLOCK_SHUTDOWN and SKIP_ON_SHUTDOWN tasks can
@@ -488,7 +502,8 @@ TEST_P(ThreadPoolTaskTrackerTest, SingletonAllowed) {
Task task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
TimeDelta());
- auto sequence = WillPostTaskAndQueueTaskSource(std::move(task), GetParam());
+ auto sequence = WillPostTaskAndQueueTaskSource(std::move(task),
+ {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
// Set the singleton allowed bit to the opposite of what it is expected to be
@@ -498,9 +513,9 @@ TEST_P(ThreadPoolTaskTrackerTest, SingletonAllowed) {
// Running the task should fail iff the task isn't allowed to use singletons.
if (can_use_singletons) {
- EXPECT_FALSE(tracker_.RunAndPopNextTask(std::move(sequence)));
+ EXPECT_FALSE(RunAndPopNextTask(std::move(sequence)));
} else {
- EXPECT_DCHECK_DEATH({ tracker_.RunAndPopNextTask(std::move(sequence)); });
+ EXPECT_DCHECK_DEATH({ RunAndPopNextTask(std::move(sequence)); });
}
}
@@ -515,11 +530,11 @@ TEST_P(ThreadPoolTaskTrackerTest, IOAllowed) {
FROM_HERE, BlockingType::WILL_BLOCK);
}),
TimeDelta());
- TaskTraits traits_with_may_block = TaskTraits(MayBlock(), GetParam());
+ TaskTraits traits_with_may_block{ThreadPool(), MayBlock(), GetParam()};
auto sequence_with_may_block = WillPostTaskAndQueueTaskSource(
std::move(task_with_may_block), traits_with_may_block);
EXPECT_TRUE(sequence_with_may_block);
- tracker_.RunAndPopNextTask(std::move(sequence_with_may_block));
+ RunAndPopNextTask(std::move(sequence_with_may_block));
// Set the IO allowed bit. Expect TaskTracker to unset it before running a
// task without the MayBlock() trait.
@@ -531,11 +546,11 @@ TEST_P(ThreadPoolTaskTrackerTest, IOAllowed) {
});
}),
TimeDelta());
- TaskTraits traits_without_may_block = TaskTraits(GetParam());
+ TaskTraits traits_without_may_block = TaskTraits(ThreadPool(), GetParam());
auto sequence_without_may_block = WillPostTaskAndQueueTaskSource(
std::move(task_without_may_block), traits_without_may_block);
EXPECT_TRUE(sequence_without_may_block);
- tracker_.RunAndPopNextTask(std::move(sequence_without_may_block));
+ RunAndPopNextTask(std::move(sequence_without_may_block));
}
static void RunTaskRunnerHandleVerificationTask(
@@ -572,9 +587,9 @@ TEST_P(ThreadPoolTaskTrackerTest, TaskRunnerHandleIsNotSetOnParallel) {
// scope per no TaskRunner ref being set to it.
Task verify_task(FROM_HERE, BindOnce(&VerifyNoTaskRunnerHandle), TimeDelta());
- RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task),
- TaskTraits(GetParam()), nullptr,
- TaskSourceExecutionMode::kParallel);
+ RunTaskRunnerHandleVerificationTask(
+ &tracker_, std::move(verify_task), TaskTraits(ThreadPool(), GetParam()),
+ nullptr, TaskSourceExecutionMode::kParallel);
}
static void VerifySequencedTaskRunnerHandle(
@@ -596,7 +611,7 @@ TEST_P(ThreadPoolTaskTrackerTest, SequencedTaskRunnerHandleIsSetOnSequenced) {
TimeDelta());
RunTaskRunnerHandleVerificationTask(
- &tracker_, std::move(verify_task), TaskTraits(GetParam()),
+ &tracker_, std::move(verify_task), TaskTraits(ThreadPool(), GetParam()),
std::move(test_task_runner), TaskSourceExecutionMode::kSequenced);
}
@@ -621,7 +636,7 @@ TEST_P(ThreadPoolTaskTrackerTest, ThreadTaskRunnerHandleIsSetOnSingleThreaded) {
TimeDelta());
RunTaskRunnerHandleVerificationTask(
- &tracker_, std::move(verify_task), TaskTraits(GetParam()),
+ &tracker_, std::move(verify_task), TaskTraits(ThreadPool(), GetParam()),
std::move(test_task_runner), TaskSourceExecutionMode::kSingleThread);
}
@@ -645,8 +660,8 @@ TEST_P(ThreadPoolTaskTrackerTest, FlushAsyncForTestingPendingDelayedTask) {
TEST_P(ThreadPoolTaskTrackerTest, FlushPendingUndelayedTask) {
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -654,14 +669,14 @@ TEST_P(ThreadPoolTaskTrackerTest, FlushPendingUndelayedTask) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// FlushForTesting() should return after the undelayed task runs.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
WAIT_FOR_ASYNC_FLUSH_RETURNED();
}
TEST_P(ThreadPoolTaskTrackerTest, FlushAsyncForTestingPendingUndelayedTask) {
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
WaitableEvent event;
@@ -671,14 +686,14 @@ TEST_P(ThreadPoolTaskTrackerTest, FlushAsyncForTestingPendingUndelayedTask) {
EXPECT_FALSE(event.IsSignaled());
// FlushAsyncForTesting() should callback after the undelayed task runs.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
event.Wait();
}
TEST_P(ThreadPoolTaskTrackerTest, PostTaskDuringFlush) {
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -688,24 +703,24 @@ TEST_P(ThreadPoolTaskTrackerTest, PostTaskDuringFlush) {
// Simulate posting another undelayed task.
Task other_undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
auto other_undelayed_sequence = WillPostTaskAndQueueTaskSource(
- std::move(other_undelayed_task), GetParam());
+ std::move(other_undelayed_task), {ThreadPool(), GetParam()});
// Run the first undelayed task.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
// FlushForTesting() shouldn't return before the second undelayed task runs.
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// FlushForTesting() should return after the second undelayed task runs.
- tracker_.RunAndPopNextTask(std::move(other_undelayed_sequence));
+ RunAndPopNextTask(std::move(other_undelayed_sequence));
WAIT_FOR_ASYNC_FLUSH_RETURNED();
}
TEST_P(ThreadPoolTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
WaitableEvent event;
@@ -717,10 +732,10 @@ TEST_P(ThreadPoolTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
// Simulate posting another undelayed task.
Task other_undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
auto other_undelayed_sequence = WillPostTaskAndQueueTaskSource(
- std::move(other_undelayed_task), GetParam());
+ std::move(other_undelayed_task), {ThreadPool(), GetParam()});
// Run the first undelayed task.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
// FlushAsyncForTesting() shouldn't callback before the second undelayed task
// runs.
@@ -729,18 +744,18 @@ TEST_P(ThreadPoolTaskTrackerTest, PostTaskDuringFlushAsyncForTesting) {
// FlushAsyncForTesting() should callback after the second undelayed task
// runs.
- tracker_.RunAndPopNextTask(std::move(other_undelayed_sequence));
+ RunAndPopNextTask(std::move(other_undelayed_sequence));
event.Wait();
}
TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlush) {
// Simulate posting a delayed and an undelayed task.
Task delayed_task(FROM_HERE, DoNothing(), TimeDelta::FromDays(1));
- auto delayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(delayed_task), GetParam());
+ auto delayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(delayed_task), {ThreadPool(), GetParam()});
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushForTesting() shouldn't return before the undelayed task runs.
CallFlushFromAnotherThread();
@@ -748,7 +763,7 @@ TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Run the delayed task.
- tracker_.RunAndPopNextTask(std::move(delayed_sequence));
+ RunAndPopNextTask(std::move(delayed_sequence));
// FlushForTesting() shouldn't return since there is still a pending undelayed
// task.
@@ -756,7 +771,7 @@ TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Run the undelayed task.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
// FlushForTesting() should now return.
WAIT_FOR_ASYNC_FLUSH_RETURNED();
@@ -765,11 +780,11 @@ TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlush) {
TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlushAsyncForTesting) {
// Simulate posting a delayed and an undelayed task.
Task delayed_task(FROM_HERE, DoNothing(), TimeDelta::FromDays(1));
- auto delayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(delayed_task), GetParam());
+ auto delayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(delayed_task), {ThreadPool(), GetParam()});
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
WaitableEvent event;
@@ -779,7 +794,7 @@ TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlushAsyncForTesting) {
EXPECT_FALSE(event.IsSignaled());
// Run the delayed task.
- tracker_.RunAndPopNextTask(std::move(delayed_sequence));
+ RunAndPopNextTask(std::move(delayed_sequence));
// FlushAsyncForTesting() shouldn't callback since there is still a pending
// undelayed task.
@@ -787,7 +802,7 @@ TEST_P(ThreadPoolTaskTrackerTest, RunDelayedTaskDuringFlushAsyncForTesting) {
EXPECT_FALSE(event.IsSignaled());
// Run the undelayed task.
- tracker_.RunAndPopNextTask(std::move(undelayed_sequence));
+ RunAndPopNextTask(std::move(undelayed_sequence));
// FlushAsyncForTesting() should now callback.
event.Wait();
@@ -837,8 +852,8 @@ TEST_P(ThreadPoolTaskTrackerTest, ShutdownDuringFlush) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushForTesting() shouldn't return before the undelayed task runs or
// shutdown completes.
@@ -860,8 +875,8 @@ TEST_P(ThreadPoolTaskTrackerTest, ShutdownDuringFlushAsyncForTesting) {
// Simulate posting a task.
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs or
// shutdown completes.
@@ -882,8 +897,8 @@ TEST_P(ThreadPoolTaskTrackerTest, ShutdownDuringFlushAsyncForTesting) {
TEST_P(ThreadPoolTaskTrackerTest, DoublePendingFlushAsyncForTestingFails) {
Task undelayed_task(FROM_HERE, DoNothing(), TimeDelta());
- auto undelayed_sequence =
- WillPostTaskAndQueueTaskSource(std::move(undelayed_task), GetParam());
+ auto undelayed_sequence = WillPostTaskAndQueueTaskSource(
+ std::move(undelayed_task), {ThreadPool(), GetParam()});
// FlushAsyncForTesting() shouldn't callback before the undelayed task runs.
bool called_back = false;
@@ -909,11 +924,11 @@ TEST_P(ThreadPoolTaskTrackerTest, PostTasksDoNotBlockShutdown) {
TEST_P(ThreadPoolTaskTrackerTest, DelayedRunTasks) {
// Simulate posting a delayed task.
Task delayed_task(FROM_HERE, DoNothing(), TimeDelta::FromDays(1));
- auto sequence =
- WillPostTaskAndQueueTaskSource(std::move(delayed_task), GetParam());
+ auto sequence = WillPostTaskAndQueueTaskSource(std::move(delayed_task),
+ {ThreadPool(), GetParam()});
EXPECT_TRUE(sequence);
- tracker_.RunAndPopNextTask(std::move(sequence));
+ RunAndPopNextTask(std::move(sequence));
// Since the delayed task doesn't block shutdown, a call to Shutdown() should
// not hang.
@@ -945,7 +960,7 @@ void ExpectSequenceToken(SequenceToken sequence_token) {
// when a Task runs.
TEST_F(ThreadPoolTaskTrackerTest, CurrentSequenceToken) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(), nullptr, TaskSourceExecutionMode::kParallel);
+ TaskTraits(ThreadPool()), nullptr, TaskSourceExecutionMode::kParallel);
const SequenceToken sequence_token = sequence->token();
Task task(FROM_HERE, BindOnce(&ExpectSequenceToken, sequence_token),
@@ -970,24 +985,28 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunBeforeShutdown) {
for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN,
- nullptr, TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(),
+ TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true,
CreateTask()));
threads.back()->Start();
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
- nullptr, TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true,
CreateTask()));
threads.back()->Start();
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::BLOCK_SHUTDOWN, nullptr,
- TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true,
CreateTask()));
threads.back()->Start();
@@ -1006,11 +1025,11 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunBeforeShutdown) {
TEST_F(ThreadPoolTaskTrackerTest,
LoadWillPostBeforeShutdownAndRunDuringShutdown) {
constexpr TaskTraits traits_continue_on_shutdown =
- TaskTraits(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+ TaskTraits(ThreadPool(), TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
constexpr TaskTraits traits_skip_on_shutdown =
- TaskTraits(TaskShutdownBehavior::SKIP_ON_SHUTDOWN);
+ TaskTraits(ThreadPool(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN);
constexpr TaskTraits traits_block_shutdown =
- TaskTraits(TaskShutdownBehavior::BLOCK_SHUTDOWN);
+ TaskTraits(ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN);
// Post tasks asynchronously.
std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> post_threads;
@@ -1083,7 +1102,7 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
auto block_shutdown_sequence = WillPostTaskAndQueueTaskSource(
- CreateTask(), TaskShutdownBehavior::BLOCK_SHUTDOWN);
+ CreateTask(), {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN});
EXPECT_TRUE(block_shutdown_sequence);
// Start shutdown and try to complete it asynchronously.
@@ -1096,24 +1115,28 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN,
- nullptr, TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(),
+ TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false,
CreateTask()));
threads.back()->Start();
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
- nullptr, TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false,
CreateTask()));
threads.back()->Start();
threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
&tracker_,
- MakeRefCounted<Sequence>(TaskShutdownBehavior::BLOCK_SHUTDOWN, nullptr,
- TaskSourceExecutionMode::kParallel),
+ MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ nullptr, TaskSourceExecutionMode::kParallel),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true,
CreateTask()));
threads.back()->Start();
@@ -1129,7 +1152,7 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
// Unblock shutdown by running |block_shutdown_task|.
- tracker_.RunAndPopNextTask(std::move(block_shutdown_sequence));
+ RunAndPopNextTask(std::move(block_shutdown_sequence));
EXPECT_EQ(kLoadTestNumIterations + 1, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
@@ -1138,7 +1161,7 @@ TEST_F(ThreadPoolTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
// when it can be rescheduled.
TEST_F(ThreadPoolTaskTrackerTest,
RunAndPopNextTaskReturnsSequenceToReschedule) {
- TaskTraits default_traits = {};
+ TaskTraits default_traits = {ThreadPool()};
Task task_1(FROM_HERE, DoNothing(), TimeDelta());
EXPECT_TRUE(
tracker_.WillPostTask(&task_1, default_traits.shutdown_behavior()));
@@ -1171,7 +1194,7 @@ class WaitAllowedTestThread : public SimpleThread {
EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
}),
TimeDelta());
- TaskTraits default_traits = {};
+ TaskTraits default_traits = {ThreadPool()};
EXPECT_TRUE(task_tracker->WillPostTask(&task_without_sync_primitives,
default_traits.shutdown_behavior()));
auto sequence_without_sync_primitives = test::CreateSequenceWithTask(
@@ -1189,7 +1212,7 @@ class WaitAllowedTestThread : public SimpleThread {
}),
TimeDelta());
TaskTraits traits_with_sync_primitives =
- TaskTraits(WithBaseSyncPrimitives());
+ TaskTraits(ThreadPool(), WithBaseSyncPrimitives());
EXPECT_TRUE(task_tracker->WillPostTask(
&task_with_sync_primitives,
traits_with_sync_primitives.shutdown_behavior()));
@@ -1231,31 +1254,31 @@ TEST(ThreadPoolTaskTrackerHistogramTest, TaskLatency) {
const TaskTraits traits;
const char* const expected_histogram;
} static constexpr kTests[] = {
- {{TaskPriority::BEST_EFFORT},
+ {{ThreadPool(), TaskPriority::BEST_EFFORT},
"ThreadPool.TaskLatencyMicroseconds.Test."
"BackgroundTaskPriority"},
- {{MayBlock(), TaskPriority::BEST_EFFORT},
+ {{ThreadPool(), MayBlock(), TaskPriority::BEST_EFFORT},
"ThreadPool.TaskLatencyMicroseconds.Test."
"BackgroundTaskPriority_MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::BEST_EFFORT},
+ {{ThreadPool(), WithBaseSyncPrimitives(), TaskPriority::BEST_EFFORT},
"ThreadPool.TaskLatencyMicroseconds.Test."
"BackgroundTaskPriority_MayBlock"},
- {{TaskPriority::USER_VISIBLE},
+ {{ThreadPool(), TaskPriority::USER_VISIBLE},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserVisibleTaskPriority"},
- {{MayBlock(), TaskPriority::USER_VISIBLE},
+ {{ThreadPool(), MayBlock(), TaskPriority::USER_VISIBLE},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserVisibleTaskPriority_MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::USER_VISIBLE},
+ {{ThreadPool(), WithBaseSyncPrimitives(), TaskPriority::USER_VISIBLE},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserVisibleTaskPriority_MayBlock"},
- {{TaskPriority::USER_BLOCKING},
+ {{ThreadPool(), TaskPriority::USER_BLOCKING},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserBlockingTaskPriority"},
- {{MayBlock(), TaskPriority::USER_BLOCKING},
+ {{ThreadPool(), MayBlock(), TaskPriority::USER_BLOCKING},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserBlockingTaskPriority_MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::USER_BLOCKING},
+ {{ThreadPool(), WithBaseSyncPrimitives(), TaskPriority::USER_BLOCKING},
"ThreadPool.TaskLatencyMicroseconds.Test."
"UserBlockingTaskPriority_MayBlock"}};
diff --git a/chromium/base/task/thread_pool/test_task_factory.cc b/chromium/base/task/thread_pool/test_task_factory.cc
index ca29b1ca4e5..681014a7d99 100644
--- a/chromium/base/task/thread_pool/test_task_factory.cc
+++ b/chromium/base/task/thread_pool/test_task_factory.cc
@@ -57,6 +57,7 @@ void TestTaskFactory::RunTaskCallback(size_t task_index,
// Verify TaskRunnerHandles are set as expected in the task's scope.
switch (execution_mode_) {
+ case TaskSourceExecutionMode::kJob:
case TaskSourceExecutionMode::kParallel:
EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
diff --git a/chromium/base/task/thread_pool/test_utils.cc b/chromium/base/task/thread_pool/test_utils.cc
index fc8d6ca8a1b..466866bf172 100644
--- a/chromium/base/task/thread_pool/test_utils.cc
+++ b/chromium/base/task/thread_pool/test_utils.cc
@@ -10,6 +10,7 @@
#include "base/synchronization/condition_variable.h"
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
#include "base/task/thread_pool/pooled_sequenced_task_runner.h"
+#include "base/test/bind_test_util.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -18,6 +19,64 @@ namespace base {
namespace internal {
namespace test {
+namespace {
+
+// A task runner that posts each task as a MockJobTaskSource that runs a single
+// task. This is used to run ThreadGroupTests which require a TaskRunner with
+// kJob execution mode. Delayed tasks are not supported.
+class MockJobTaskRunner : public TaskRunner {
+ public:
+ MockJobTaskRunner(const TaskTraits& traits,
+ PooledTaskRunnerDelegate* pooled_task_runner_delegate)
+ : traits_(traits),
+ pooled_task_runner_delegate_(pooled_task_runner_delegate) {}
+
+ // TaskRunner:
+ bool PostDelayedTask(const Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) override;
+
+ bool RunsTasksInCurrentSequence() const override;
+
+ private:
+ ~MockJobTaskRunner() override;
+
+ const TaskTraits traits_;
+ PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockJobTaskRunner);
+};
+
+bool MockJobTaskRunner::PostDelayedTask(const Location& from_here,
+ OnceClosure closure,
+ TimeDelta delay) {
+ DCHECK_EQ(delay, TimeDelta()); // Jobs doesn't support delayed tasks.
+
+ if (!PooledTaskRunnerDelegate::Exists())
+ return false;
+
+ scoped_refptr<MockJobTaskSource> task_source =
+ MakeRefCounted<test::MockJobTaskSource>(from_here, std::move(closure),
+ traits_);
+ return pooled_task_runner_delegate_->EnqueueJobTaskSource(
+ std::move(task_source));
+}
+
+bool MockJobTaskRunner::RunsTasksInCurrentSequence() const {
+ return pooled_task_runner_delegate_->IsRunningPoolWithTraits(traits_);
+}
+
+MockJobTaskRunner::~MockJobTaskRunner() = default;
+
+scoped_refptr<TaskRunner> CreateJobTaskRunner(
+ const TaskTraits& traits,
+ MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
+ return MakeRefCounted<MockJobTaskRunner>(traits,
+ mock_pooled_task_runner_delegate);
+}
+
+} // namespace
+
MockWorkerThreadObserver::MockWorkerThreadObserver()
: on_main_exit_cv_(lock_.CreateConditionVariable()) {}
@@ -62,11 +121,12 @@ scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
const TaskTraits& traits) {
switch (execution_mode) {
case TaskSourceExecutionMode::kParallel:
- return CreateTaskRunnerWithTraits(traits,
- mock_pooled_task_runner_delegate);
+ return CreateTaskRunner(traits, mock_pooled_task_runner_delegate);
case TaskSourceExecutionMode::kSequenced:
- return CreateSequencedTaskRunnerWithTraits(
- traits, mock_pooled_task_runner_delegate);
+ return CreateSequencedTaskRunner(traits,
+ mock_pooled_task_runner_delegate);
+ case TaskSourceExecutionMode::kJob:
+ return CreateJobTaskRunner(traits, mock_pooled_task_runner_delegate);
default:
// Fall through.
break;
@@ -75,14 +135,14 @@ scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
return nullptr;
}
-scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits,
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
return MakeRefCounted<PooledParallelTaskRunner>(
traits, mock_pooled_task_runner_delegate);
}
-scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits,
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
return MakeRefCounted<PooledSequencedTaskRunner>(
@@ -157,6 +217,23 @@ void MockPooledTaskRunnerDelegate::PostTaskWithSequenceNow(
}
}
+bool MockPooledTaskRunnerDelegate::EnqueueJobTaskSource(
+ scoped_refptr<JobTaskSource> task_source) {
+ // |thread_group_| must be initialized with SetThreadGroup() before
+ // proceeding.
+ DCHECK(thread_group_);
+ DCHECK(task_source);
+
+ auto registered_task_source =
+ task_tracker_->WillQueueTaskSource(std::move(task_source));
+ if (!registered_task_source)
+ return false;
+ auto transaction = registered_task_source->BeginTransaction();
+ thread_group_->PushTaskSourceAndWakeUpWorkers(
+ {std::move(registered_task_source), std::move(transaction)});
+ return true;
+}
+
bool MockPooledTaskRunnerDelegate::IsRunningPoolWithTraits(
const TaskTraits& traits) const {
// |thread_group_| must be initialized with SetThreadGroup() before
@@ -179,11 +256,54 @@ void MockPooledTaskRunnerDelegate::SetThreadGroup(ThreadGroup* thread_group) {
thread_group_ = thread_group;
}
+MockJobTaskSource::~MockJobTaskSource() = default;
+
+MockJobTaskSource::MockJobTaskSource(const Location& from_here,
+ base::RepeatingClosure worker_task,
+ const TaskTraits& traits,
+ size_t num_tasks_to_run,
+ size_t max_concurrency)
+ : JobTaskSource(FROM_HERE,
+ BindLambdaForTesting([this, worker_task]() {
+ worker_task.Run();
+ size_t before = remaining_num_tasks_to_run_.fetch_sub(1);
+ DCHECK_GT(before, 0U);
+ }),
+ traits),
+ remaining_num_tasks_to_run_(num_tasks_to_run),
+ max_concurrency_(max_concurrency) {}
+
+MockJobTaskSource::MockJobTaskSource(const Location& from_here,
+ base::OnceClosure worker_task,
+ const TaskTraits& traits)
+ : JobTaskSource(FROM_HERE,
+ base::BindRepeating(
+ [](MockJobTaskSource* self,
+ base::OnceClosure&& worker_task) mutable {
+ std::move(worker_task).Run();
+ size_t before =
+ self->remaining_num_tasks_to_run_.fetch_sub(1);
+ DCHECK_EQ(before, 1U);
+ },
+ Unretained(this),
+ base::Passed(std::move(worker_task))),
+ traits),
+ remaining_num_tasks_to_run_(1),
+ max_concurrency_(1) {}
+
+size_t MockJobTaskSource::GetMaxConcurrency() const {
+ return std::min(remaining_num_tasks_to_run_.load(), max_concurrency_);
+}
+
RegisteredTaskSource QueueAndRunTaskSource(
TaskTracker* task_tracker,
scoped_refptr<TaskSource> task_source) {
+ auto registered_task_source =
+ task_tracker->WillQueueTaskSource(std::move(task_source));
+ EXPECT_TRUE(registered_task_source);
+ auto run_intent = registered_task_source->WillRunTask();
return task_tracker->RunAndPopNextTask(
- task_tracker->WillQueueTaskSource(std::move(task_source)));
+ {std::move(registered_task_source), std::move(run_intent)});
}
void ShutdownTaskTracker(TaskTracker* task_tracker) {
diff --git a/chromium/base/task/thread_pool/test_utils.h b/chromium/base/task/thread_pool/test_utils.h
index b08b6862947..7d7f9020a01 100644
--- a/chromium/base/task/thread_pool/test_utils.h
+++ b/chromium/base/task/thread_pool/test_utils.h
@@ -5,10 +5,14 @@
#ifndef BASE_TASK_THREAD_POOL_TEST_UTILS_H_
#define BASE_TASK_THREAD_POOL_TEST_UTILS_H_
+#include <atomic>
+
+#include "base/callback.h"
#include "base/task/common/checked_lock.h"
#include "base/task/task_features.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/delayed_task_manager.h"
+#include "base/task/thread_pool/job_task_source.h"
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
#include "base/task/thread_pool/sequence.h"
#include "base/task/thread_pool/task_tracker.h"
@@ -57,6 +61,7 @@ class MockPooledTaskRunnerDelegate : public PooledTaskRunnerDelegate {
// PooledTaskRunnerDelegate:
bool PostTaskWithSequence(Task task,
scoped_refptr<Sequence> sequence) override;
+ bool EnqueueJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
bool IsRunningPoolWithTraits(const TaskTraits& traits) const override;
void UpdatePriority(scoped_refptr<TaskSource> task_source,
TaskPriority priority) override;
@@ -71,6 +76,33 @@ class MockPooledTaskRunnerDelegate : public PooledTaskRunnerDelegate {
ThreadGroup* thread_group_ = nullptr;
};
+// A simple JobTaskSource that will give |worker_task| a fixed number of times,
+// possibly in parallel.
+class MockJobTaskSource : public JobTaskSource {
+ public:
+ // Gives |worker_task| to requesting workers |num_tasks_to_run| times.
+ // Allowing at most |max_concurrency| workers to be running |worker_task| in
+ // parallel.
+ MockJobTaskSource(const Location& from_here,
+ base::RepeatingClosure worker_task,
+ const TaskTraits& traits,
+ size_t num_tasks_to_run,
+ size_t max_concurrency);
+
+ // Gives |worker_task| to a single requesting worker.
+ MockJobTaskSource(const Location& from_here,
+ base::OnceClosure worker_task,
+ const TaskTraits& traits);
+
+ size_t GetMaxConcurrency() const override;
+
+ private:
+ ~MockJobTaskSource() override;
+
+ std::atomic_size_t remaining_num_tasks_to_run_;
+ const size_t max_concurrency_;
+};
+
// An enumeration of possible thread pool types. Used to parametrize relevant
// thread_pool tests.
enum class PoolType {
@@ -96,13 +128,13 @@ scoped_refptr<Sequence> CreateSequenceWithTask(
scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
TaskSourceExecutionMode execution_mode,
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate,
- const TaskTraits& traits = TaskTraits());
+ const TaskTraits& traits = {ThreadPool()});
-scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits,
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate);
-scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits,
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate);
diff --git a/chromium/base/task/thread_pool/thread_group.cc b/chromium/base/task/thread_pool/thread_group.cc
index bacdabb5899..b6f140662b7 100644
--- a/chromium/base/task/thread_pool/thread_group.cc
+++ b/chromium/base/task/thread_pool/thread_group.cc
@@ -34,23 +34,35 @@ const ThreadGroup* GetCurrentThreadGroup() {
} // namespace
+void ThreadGroup::BaseScopedWorkersExecutor::ScheduleReleaseTaskSource(
+ RegisteredTaskSource task_source) {
+ task_sources_to_release_.push_back(std::move(task_source));
+}
+
+ThreadGroup::BaseScopedWorkersExecutor::BaseScopedWorkersExecutor() = default;
+
+ThreadGroup::BaseScopedWorkersExecutor::~BaseScopedWorkersExecutor() {
+ CheckedLock::AssertNoLockHeldOnCurrentThread();
+}
+
ThreadGroup::ScopedReenqueueExecutor::ScopedReenqueueExecutor() = default;
ThreadGroup::ScopedReenqueueExecutor::~ScopedReenqueueExecutor() {
if (destination_thread_group_) {
destination_thread_group_->PushTaskSourceAndWakeUpWorkers(
- std::move(task_source_and_transaction_.value()));
+ std::move(transaction_with_task_source_.value()));
}
}
void ThreadGroup::ScopedReenqueueExecutor::
SchedulePushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction,
+ TransactionWithRegisteredTaskSource transaction_with_task_source,
ThreadGroup* destination_thread_group) {
DCHECK(destination_thread_group);
DCHECK(!destination_thread_group_);
- DCHECK(!task_source_and_transaction_);
- task_source_and_transaction_.emplace(std::move(task_source_and_transaction));
+ DCHECK(!transaction_with_task_source_);
+ transaction_with_task_source_.emplace(
+ std::move(transaction_with_task_source));
destination_thread_group_ = destination_thread_group;
}
@@ -80,17 +92,31 @@ bool ThreadGroup::IsBoundToCurrentThread() const {
return GetCurrentThreadGroup() == this;
}
-size_t ThreadGroup::GetNumQueuedCanRunBestEffortTaskSources() const {
+size_t
+ThreadGroup::GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired()
+ const {
+ // For simplicity, only 1 worker is assigned to each task source regardless of
+ // its max concurrency, with the exception of the top task source.
const size_t num_queued =
priority_queue_.GetNumTaskSourcesWithPriority(TaskPriority::BEST_EFFORT);
if (num_queued == 0 ||
!task_tracker_->CanRunPriority(TaskPriority::BEST_EFFORT)) {
return 0U;
}
+ if (priority_queue_.PeekSortKey().priority() == TaskPriority::BEST_EFFORT) {
+ // Assign the correct number of workers for the top TaskSource (-1 for the
+ // worker that is already accounted for in |num_queued|).
+ return num_queued +
+ priority_queue_.PeekTaskSource()->GetRemainingConcurrency() - 1;
+ }
return num_queued;
}
-size_t ThreadGroup::GetNumQueuedCanRunForegroundTaskSources() const {
+size_t
+ThreadGroup::GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired()
+ const {
+ // For simplicity, only 1 worker is assigned to each task source regardless of
+ // its max concurrency, with the exception of the top task source.
const size_t num_queued = priority_queue_.GetNumTaskSourcesWithPriority(
TaskPriority::USER_VISIBLE) +
priority_queue_.GetNumTaskSourcesWithPriority(
@@ -99,6 +125,14 @@ size_t ThreadGroup::GetNumQueuedCanRunForegroundTaskSources() const {
!task_tracker_->CanRunPriority(TaskPriority::HIGHEST)) {
return 0U;
}
+ auto priority = priority_queue_.PeekSortKey().priority();
+ if (priority == TaskPriority::USER_VISIBLE ||
+ priority == TaskPriority::USER_BLOCKING) {
+ // Assign the correct number of workers for the top TaskSource (-1 for the
+ // worker that is already accounted for in |num_queued|).
+ return num_queued +
+ priority_queue_.PeekTaskSource()->GetRemainingConcurrency() - 1;
+ }
return num_queued;
}
@@ -111,37 +145,86 @@ RegisteredTaskSource ThreadGroup::RemoveTaskSource(
void ThreadGroup::ReEnqueueTaskSourceLockRequired(
BaseScopedWorkersExecutor* workers_executor,
ScopedReenqueueExecutor* reenqueue_executor,
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
// Decide in which thread group the TaskSource should be reenqueued.
- ThreadGroup* destination_thread_group = delegate_->GetThreadGroupForTraits(
- task_source_and_transaction.transaction.traits());
+ ThreadGroup* destination_thread_group =
+ delegate_->GetThreadGroupForTraits(transaction_with_task_source.traits());
if (destination_thread_group == this) {
+ // Another worker that was running a task from this task source may have
+ // reenqueued it already, in which case its heap_handle will be valid. It
+ // shouldn't be queued twice so the task source registration is released.
+ if (transaction_with_task_source.task_source()->heap_handle().IsValid()) {
+ workers_executor->ScheduleReleaseTaskSource(
+ transaction_with_task_source.take_task_source());
+ return;
+ }
// If the TaskSource should be reenqueued in the current thread group,
// reenqueue it inside the scope of the lock.
- priority_queue_.Push(std::move(task_source_and_transaction));
+ priority_queue_.Push(std::move(transaction_with_task_source));
EnsureEnoughWorkersLockRequired(workers_executor);
} else {
// Otherwise, schedule a reenqueue after releasing the lock.
reenqueue_executor->SchedulePushTaskSourceAndWakeUpWorkers(
- std::move(task_source_and_transaction), destination_thread_group);
+ std::move(transaction_with_task_source), destination_thread_group);
}
}
+RunIntentWithRegisteredTaskSource
+ThreadGroup::TakeRunIntentWithRegisteredTaskSource(
+ BaseScopedWorkersExecutor* executor) {
+ DCHECK(!priority_queue_.IsEmpty());
+
+ auto run_intent = priority_queue_.PeekTaskSource()->WillRunTask();
+
+ if (!run_intent) {
+ executor->ScheduleReleaseTaskSource(priority_queue_.PopTaskSource());
+ return nullptr;
+ }
+
+ if (run_intent.IsSaturated())
+ return {priority_queue_.PopTaskSource(), std::move(run_intent)};
+
+ // If the TaskSource isn't saturated, check whether TaskTracker allows it to
+ // remain in the PriorityQueue.
+ // The canonical way of doing this is to pop the task source to return, call
+ // WillQueueTaskSource() to get an additional RegisteredTaskSource, and
+ // reenqueue that task source if valid. Instead, it is cheaper and equivalent
+ // to peek the task source, call WillQueueTaskSource() to get an additional
+ // RegisteredTaskSource to return if valid, and only pop |priority_queue_|
+ // otherwise.
+ RegisteredTaskSource task_source =
+ task_tracker_->WillQueueTaskSource(priority_queue_.PeekTaskSource());
+ if (!task_source)
+ return {priority_queue_.PopTaskSource(), std::move(run_intent)};
+
+ return {std::move(task_source), std::move(run_intent)};
+}
+
void ThreadGroup::UpdateSortKeyImpl(
BaseScopedWorkersExecutor* executor,
- TaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithOwnedTaskSource transaction_with_task_source) {
CheckedAutoLock auto_lock(lock_);
- priority_queue_.UpdateSortKey(std::move(task_source_and_transaction));
+ priority_queue_.UpdateSortKey(std::move(transaction_with_task_source));
EnsureEnoughWorkersLockRequired(executor);
}
void ThreadGroup::PushTaskSourceAndWakeUpWorkersImpl(
BaseScopedWorkersExecutor* executor,
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
CheckedAutoLock auto_lock(lock_);
DCHECK(!replacement_thread_group_);
- priority_queue_.Push(std::move(task_source_and_transaction));
+ DCHECK_EQ(
+ delegate_->GetThreadGroupForTraits(transaction_with_task_source.traits()),
+ this);
+ if (transaction_with_task_source.task_source()->heap_handle().IsValid()) {
+ // If the task source changed group, it is possible that multiple concurrent
+ // workers try to enqueue it. Only the first enqueue should succeed.
+ executor->ScheduleReleaseTaskSource(
+ transaction_with_task_source.take_task_source());
+ return;
+ }
+ priority_queue_.Push(std::move(transaction_with_task_source));
EnsureEnoughWorkersLockRequired(executor);
}
@@ -154,6 +237,15 @@ void ThreadGroup::InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
replacement_thread_group_ = destination_thread_group;
}
+bool ThreadGroup::ShouldYield(TaskPriority priority) const {
+ // It is safe to read |min_allowed_priority_| without a lock since this
+ // variable is atomic, keeping in mind that threads may not immediately see
+ // the new value when it is updated.
+ return !task_tracker_->CanRunPriority(priority) ||
+ priority < TS_UNCHECKED_READ(min_allowed_priority_)
+ .load(std::memory_order_relaxed);
+}
+
#if defined(OS_WIN)
// static
std::unique_ptr<win::ScopedWindowsThreadEnvironment>
diff --git a/chromium/base/task/thread_pool/thread_group.h b/chromium/base/task/thread_pool/thread_group.h
index 0c256505637..199e6d42aee 100644
--- a/chromium/base/task/thread_pool/thread_group.h
+++ b/chromium/base/task/thread_pool/thread_group.h
@@ -33,9 +33,9 @@ class BASE_EXPORT ThreadGroup {
public:
virtual ~Delegate() = default;
- // Invoked when the TaskSource in |task_source_and_transaction| is non-empty
- // after the ThreadGroup has run a task from it. The implementation must
- // return the thread group in which the TaskSource should be reenqueued.
+ // Invoked when a TaskSource with |traits| is non-empty after the
+ // ThreadGroup has run a task from it. The implementation must return the
+ // thread group in which the TaskSource should be reenqueued.
virtual ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) = 0;
};
@@ -67,21 +67,21 @@ class BASE_EXPORT ThreadGroup {
// is running a task from it.
RegisteredTaskSource RemoveTaskSource(scoped_refptr<TaskSource> task_source);
- // Updates the position of the TaskSource in |task_source_and_transaction| in
+ // Updates the position of the TaskSource in |transaction_with_task_source| in
// this ThreadGroup's PriorityQueue based on the TaskSource's current traits.
//
// Implementations should instantiate a concrete ScopedWorkersExecutor and
// invoke UpdateSortKeyImpl().
virtual void UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) = 0;
+ TransactionWithOwnedTaskSource transaction_with_task_source) = 0;
- // Pushes the TaskSource in |task_source_and_transaction| into this
+ // Pushes the TaskSource in |transaction_with_task_source| into this
// ThreadGroup's PriorityQueue and wakes up workers as appropriate.
//
// Implementations should instantiate a concrete ScopedWorkersExecutor and
// invoke PushTaskSourceAndWakeUpWorkersImpl().
virtual void PushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) = 0;
+ TransactionWithRegisteredTaskSource transaction_with_task_source) = 0;
// Removes all task sources from this ThreadGroup's PriorityQueue and enqueues
// them in another |destination_thread_group|. After this method is called,
@@ -93,6 +93,13 @@ class BASE_EXPORT ThreadGroup {
void InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
ThreadGroup* destination_thread_group);
+ // Returns true if a task with |priority| running in this thread group should
+ // return ASAP, either because this priority is not allowed to run or because
+ // work of higher priority is pending. Thread-safe but may return an outdated
+ // result (if a task unnecessarily yields due to this, it will simply be
+ // re-scheduled).
+ bool ShouldYield(TaskPriority priority) const;
+
// Prevents new tasks from starting to run and waits for currently running
// tasks to complete their execution. It is guaranteed that no thread will do
// work on behalf of this ThreadGroup after this returns. It is
@@ -119,9 +126,16 @@ class BASE_EXPORT ThreadGroup {
// this to perform operations on workers at the end of a scope, when all locks
// have been released.
class BaseScopedWorkersExecutor {
+ public:
+ void ScheduleReleaseTaskSource(RegisteredTaskSource task_source);
+
protected:
- BaseScopedWorkersExecutor() = default;
- ~BaseScopedWorkersExecutor() = default;
+ BaseScopedWorkersExecutor();
+ ~BaseScopedWorkersExecutor();
+
+ private:
+ std::vector<RegisteredTaskSource> task_sources_to_release_;
+
DISALLOW_COPY_AND_ASSIGN(BaseScopedWorkersExecutor);
};
@@ -132,16 +146,16 @@ class BASE_EXPORT ThreadGroup {
ScopedReenqueueExecutor();
~ScopedReenqueueExecutor();
- // A RegisteredTaskSourceAndTransaction and the ThreadGroup in which it
+ // A TransactionWithRegisteredTaskSource and the ThreadGroup in which it
// should be enqueued.
void SchedulePushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction,
+ TransactionWithRegisteredTaskSource transaction_with_task_source,
ThreadGroup* destination_thread_group);
private:
- // A RegisteredTaskSourceAndTransaction and the thread group in which it
+ // A TransactionWithRegisteredTaskSource and the thread group in which it
// should be enqueued.
- Optional<RegisteredTaskSourceAndTransaction> task_source_and_transaction_;
+ Optional<TransactionWithRegisteredTaskSource> transaction_with_task_source_;
ThreadGroup* destination_thread_group_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(ScopedReenqueueExecutor);
@@ -166,14 +180,15 @@ class BASE_EXPORT ThreadGroup {
const TrackedRef<TaskTracker> task_tracker_;
const TrackedRef<Delegate> delegate_;
- // Returns the number of queued BEST_EFFORT task sources allowed to run by the
- // current CanRunPolicy.
- size_t GetNumQueuedCanRunBestEffortTaskSources() const
+ // Returns the number of workers required of workers to run all queued
+ // BEST_EFFORT task sources allowed to run by the current CanRunPolicy.
+ size_t GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() const
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- // Returns the number of queued USER_VISIBLE/USER_BLOCKING task sources
- // allowed to run by the current CanRunPolicy.
- size_t GetNumQueuedCanRunForegroundTaskSources() const
+ // Returns the number of workers required to run all queued
+ // USER_VISIBLE/USER_BLOCKING task sources allowed to run by the current
+ // CanRunPolicy.
+ size_t GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired() const
EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Ensures that there are enough workers to run queued task sources.
@@ -182,20 +197,28 @@ class BASE_EXPORT ThreadGroup {
virtual void EnsureEnoughWorkersLockRequired(
BaseScopedWorkersExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
- // Reenqueues a |task_source_and_transaction| from which a Task just ran in
+ // Reenqueues a |transaction_with_task_source| from which a Task just ran in
// the current ThreadGroup into the appropriate ThreadGroup.
void ReEnqueueTaskSourceLockRequired(
BaseScopedWorkersExecutor* workers_executor,
ScopedReenqueueExecutor* reenqueue_executor,
- RegisteredTaskSourceAndTransaction task_source_and_transaction)
+ TransactionWithRegisteredTaskSource transaction_with_task_source)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Returns the next task source from |priority_queue_| if permitted to run and
+ // pops |priority_queue_| if the task source returned no longer needs to be
+ // queued (reached its maximum concurrency). Otherwise returns nullptr and
+ // pops |priority_queue_| so this can be called again.
+ RunIntentWithRegisteredTaskSource TakeRunIntentWithRegisteredTaskSource(
+ BaseScopedWorkersExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
// Must be invoked by implementations of the corresponding non-Impl() methods.
- void UpdateSortKeyImpl(BaseScopedWorkersExecutor* executor,
- TaskSourceAndTransaction task_source_and_transaction);
+ void UpdateSortKeyImpl(
+ BaseScopedWorkersExecutor* executor,
+ TransactionWithOwnedTaskSource transaction_with_task_source);
void PushTaskSourceAndWakeUpWorkersImpl(
BaseScopedWorkersExecutor* executor,
- RegisteredTaskSourceAndTransaction task_source_and_transaction);
+ TransactionWithRegisteredTaskSource transaction_with_task_source);
// Synchronizes accesses to all members of this class which are neither const,
// atomic, nor immutable after start. Since this lock is a bottleneck to post
@@ -206,6 +229,14 @@ class BASE_EXPORT ThreadGroup {
// PriorityQueue from which all threads of this ThreadGroup get work.
PriorityQueue priority_queue_ GUARDED_BY(lock_);
+ // Minimum priority allowed to run below which tasks should yield. This is
+ // expected to be always kept up-to-date by derived classes when |lock_| is
+ // released. It is annotated as GUARDED_BY(lock_) because it is always updated
+ // under the lock (to avoid races with other state during the update) but it
+ // is nonetheless always safe to read it without the lock (since it's atomic).
+ std::atomic<TaskPriority> min_allowed_priority_ GUARDED_BY(lock_){
+ TaskPriority::BEST_EFFORT};
+
// If |replacement_thread_group_| is non-null, this ThreadGroup is invalid and
// all task sources should be scheduled on |replacement_thread_group_|. Used
// to support the UseNativeThreadPool experiment.
diff --git a/chromium/base/task/thread_pool/thread_group_impl.cc b/chromium/base/task/thread_pool/thread_group_impl.cc
index 17dcc82efdd..511fa193d9d 100644
--- a/chromium/base/task/thread_pool/thread_group_impl.cc
+++ b/chromium/base/task/thread_pool/thread_group_impl.cc
@@ -20,6 +20,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/numerics/clamped_math.h"
+#include "base/optional.h"
#include "base/sequence_token.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
@@ -30,6 +31,7 @@
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_restrictions.h"
+#include "base/time/time_override.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -110,12 +112,7 @@ class ThreadGroupImpl::ScopedWorkersExecutor
workers_to_start_.AddWorker(std::move(worker));
}
- void Flush(CheckedLock* held_lock) {
- static_assert(std::is_pod<BaseScopedWorkersExecutor>::value &&
- sizeof(BaseScopedWorkersExecutor) == 1,
- "Must add BaseScopedWorkersExecutor::Flush() if it becomes "
- "non-trivial");
-
+ void FlushWorkerCreation(CheckedLock* held_lock) {
if (workers_to_wake_up_.empty() && workers_to_start_.empty())
return;
CheckedAutoUnlock auto_unlock(*held_lock);
@@ -208,8 +205,8 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate,
// WorkerThread::Delegate:
WorkerThread::ThreadLabel GetThreadLabel() const override;
void OnMainEntry(const WorkerThread* worker) override;
- RegisteredTaskSource GetWork(WorkerThread* worker) override;
- void DidRunTask(RegisteredTaskSource task_source) override;
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override;
+ void DidProcessTask(RegisteredTaskSource task_source) override;
TimeDelta GetSleepTimeout() override;
void OnMainExit(WorkerThread* worker) override;
@@ -232,9 +229,9 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate,
bool MustIncrementMaxTasksLockRequired()
EXCLUSIVE_LOCKS_REQUIRED(outer_->lock_);
- bool is_running_best_effort_task_lock_required() const
+ TaskPriority current_task_priority_lock_required() const
EXCLUSIVE_LOCKS_REQUIRED(outer_->lock_) {
- return read_any().is_running_best_effort_task;
+ return *read_any().current_task_priority;
}
// Exposed for AnnotateCheckedLockAcquired in
@@ -270,7 +267,7 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate,
size_t num_tasks_since_last_detach = 0;
// Whether the worker is currently running a task (i.e. GetWork() has
- // returned a non-empty task source and DidRunTask() hasn't been called
+ // returned a non-empty task source and DidProcessTask() hasn't been called
// yet).
bool is_running_task = false;
@@ -282,8 +279,8 @@ class ThreadGroupImpl::WorkerThreadDelegateImpl : public WorkerThread::Delegate,
// Writes from the worker thread protected by |outer_->lock_|. Reads from any
// thread, protected by |outer_->lock_| when not on the worker thread.
struct WriteWorkerReadAny {
- // Whether the worker is currently running a TaskPriority::BEST_EFFORT task.
- bool is_running_best_effort_task = false;
+ // The priority of the task the worker is currently running if any.
+ base::Optional<TaskPriority> current_task_priority;
// Time when MayBlockScopeEntered() was last called. Reset when
// BlockingScopeExited() is called.
@@ -433,16 +430,16 @@ ThreadGroupImpl::~ThreadGroupImpl() {
}
void ThreadGroupImpl::UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithOwnedTaskSource transaction_with_task_source) {
ScopedWorkersExecutor executor(this);
- UpdateSortKeyImpl(&executor, std::move(task_source_and_transaction));
+ UpdateSortKeyImpl(&executor, std::move(transaction_with_task_source));
}
void ThreadGroupImpl::PushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
ScopedWorkersExecutor executor(this);
PushTaskSourceAndWakeUpWorkersImpl(&executor,
- std::move(task_source_and_transaction));
+ std::move(transaction_with_task_source));
}
size_t ThreadGroupImpl::GetMaxConcurrentNonBlockedTasksDeprecated() const {
@@ -581,11 +578,10 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::OnMainEntry(
SetBlockingObserverForCurrentThread(this);
}
-RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
- WorkerThread* worker) {
+RunIntentWithRegisteredTaskSource
+ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(WorkerThread* worker) {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
DCHECK(!worker_only().is_running_task);
- DCHECK(!read_worker().is_running_best_effort_task);
ScopedWorkersExecutor executor(outer_.get());
CheckedAutoLock auto_lock(outer_->lock_);
@@ -596,48 +592,41 @@ RegisteredTaskSource ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork(
// additional workers if needed (doing this here allows us to reduce
// potentially expensive create/wake directly on PostTask()).
outer_->EnsureEnoughWorkersLockRequired(&executor);
- executor.Flush(&outer_->lock_);
+ executor.FlushWorkerCreation(&outer_->lock_);
if (!CanGetWorkLockRequired(worker))
return nullptr;
- if (outer_->priority_queue_.IsEmpty()) {
- OnWorkerBecomesIdleLockRequired(worker);
- return nullptr;
- }
+ RunIntentWithRegisteredTaskSource task_source;
+ TaskPriority priority;
+ while (!task_source && !outer_->priority_queue_.IsEmpty()) {
+ // Enforce the CanRunPolicy and that no more than |max_best_effort_tasks_|
+ // BEST_EFFORT tasks run concurrently.
+ priority = outer_->priority_queue_.PeekSortKey().priority();
+ if (!outer_->task_tracker_->CanRunPriority(priority) ||
+ (priority == TaskPriority::BEST_EFFORT &&
+ outer_->num_running_best_effort_tasks_ >=
+ outer_->max_best_effort_tasks_)) {
+ break;
+ }
- // Enforce the CanRunPolicy and that no more than |max_best_effort_tasks_|
- // BEST_EFFORT tasks run concurrently.
- const TaskPriority priority =
- outer_->priority_queue_.PeekSortKey().priority();
- if (!outer_->task_tracker_->CanRunPriority(priority) ||
- (priority == TaskPriority::BEST_EFFORT &&
- outer_->num_running_best_effort_tasks_ >=
- outer_->max_best_effort_tasks_)) {
+ task_source = outer_->TakeRunIntentWithRegisteredTaskSource(&executor);
+ }
+ if (!task_source) {
OnWorkerBecomesIdleLockRequired(worker);
return nullptr;
}
// Running task bookkeeping.
worker_only().is_running_task = true;
- ++outer_->num_running_tasks_;
+ outer_->IncrementTasksRunningLockRequired(priority);
DCHECK(!outer_->idle_workers_stack_.Contains(worker));
- DCHECK_LE(outer_->num_running_tasks_, outer_->max_tasks_);
- DCHECK_LE(outer_->num_running_tasks_, kMaxNumberOfWorkers);
-
- // Running BEST_EFFORT task bookkeeping.
- if (priority == TaskPriority::BEST_EFFORT) {
- write_worker().is_running_best_effort_task = true;
- ++outer_->num_running_best_effort_tasks_;
- DCHECK_LE(outer_->num_running_best_effort_tasks_,
- outer_->max_best_effort_tasks_);
- }
+ write_worker().current_task_priority = priority;
- // Pop the TaskSource from which to run a task from the PriorityQueue.
- return outer_->priority_queue_.PopTaskSource();
+ return task_source;
}
-void ThreadGroupImpl::WorkerThreadDelegateImpl::DidRunTask(
+void ThreadGroupImpl::WorkerThreadDelegateImpl::DidProcessTask(
RegisteredTaskSource task_source) {
DCHECK_CALLED_ON_VALID_THREAD(worker_thread_checker_);
DCHECK(worker_only().is_running_task);
@@ -649,10 +638,10 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::DidRunTask(
// A transaction to the TaskSource to reenqueue, if any. Instantiated here as
// |TaskSource::lock_| is a UniversalPredecessor and must always be acquired
// prior to acquiring a second lock
- Optional<RegisteredTaskSourceAndTransaction> task_source_and_transaction;
+ Optional<TransactionWithRegisteredTaskSource> transaction_with_task_source;
if (task_source) {
- task_source_and_transaction.emplace(
- RegisteredTaskSourceAndTransaction::FromTaskSource(
+ transaction_with_task_source.emplace(
+ TransactionWithRegisteredTaskSource::FromTaskSource(
std::move(task_source)));
}
@@ -663,21 +652,14 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::DidRunTask(
DCHECK(!incremented_max_tasks_since_blocked_);
// Running task bookkeeping.
- DCHECK_GT(outer_->num_running_tasks_, 0U);
- --outer_->num_running_tasks_;
+ outer_->DecrementTasksRunningLockRequired(
+ *read_worker().current_task_priority);
worker_only().is_running_task = false;
- // Running BEST_EFFORT task bookkeeping.
- if (read_worker().is_running_best_effort_task) {
- DCHECK_GT(outer_->num_running_best_effort_tasks_, 0U);
- --outer_->num_running_best_effort_tasks_;
- write_worker().is_running_best_effort_task = false;
- }
-
- if (task_source_and_transaction) {
+ if (transaction_with_task_source) {
outer_->ReEnqueueTaskSourceLockRequired(
&workers_executor, &reenqueue_executor,
- std::move(task_source_and_transaction.value()));
+ std::move(transaction_with_task_source.value()));
}
}
@@ -721,7 +703,7 @@ bool ThreadGroupImpl::WorkerThreadDelegateImpl::CanCleanupLockRequired(
const TimeTicks last_used_time = worker->GetLastUsedTime();
return !last_used_time.is_null() &&
- TimeTicks::Now() - last_used_time >=
+ subtle::TimeTicksNowIgnoringOverride() - last_used_time >=
outer_->after_start().suggested_reclaim_time &&
(outer_->workers_.size() > outer_->after_start().initial_max_tasks ||
!FeatureList::IsEnabled(kNoDetachBelowInitialCapacity)) &&
@@ -734,7 +716,7 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::CleanupLockRequired(
outer_->num_tasks_before_detach_histogram_->Add(
worker_only().num_tasks_since_last_detach);
- outer_->cleanup_timestamps_.push(TimeTicks::Now());
+ outer_->cleanup_timestamps_.push(subtle::TimeTicksNowIgnoringOverride());
worker->Cleanup();
outer_->idle_workers_stack_.Remove(worker);
@@ -837,7 +819,7 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::BlockingTypeUpgraded() {
if (!read_worker().may_block_start_time.is_null()) {
write_worker().may_block_start_time = TimeTicks();
--outer_->num_unresolved_may_block_;
- if (read_worker().is_running_best_effort_task)
+ if (*read_worker().current_task_priority == TaskPriority::BEST_EFFORT)
--outer_->num_unresolved_best_effort_may_block_;
}
}
@@ -851,12 +833,11 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::BlockingEnded() {
CheckedAutoLock auto_lock(outer_->lock_);
if (incremented_max_tasks_since_blocked_) {
- outer_->DecrementMaxTasksLockRequired(
- read_worker().is_running_best_effort_task);
+ outer_->DecrementMaxTasksLockRequired(*read_worker().current_task_priority);
} else {
DCHECK(!read_worker().may_block_start_time.is_null());
--outer_->num_unresolved_may_block_;
- if (read_worker().is_running_best_effort_task)
+ if (*read_worker().current_task_priority == TaskPriority::BEST_EFFORT)
--outer_->num_unresolved_best_effort_may_block_;
}
@@ -873,9 +854,9 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::MayBlockEntered() {
DCHECK(!incremented_max_tasks_since_blocked_);
DCHECK(read_worker().may_block_start_time.is_null());
- write_worker().may_block_start_time = TimeTicks::Now();
+ write_worker().may_block_start_time = subtle::TimeTicksNowIgnoringOverride();
++outer_->num_unresolved_may_block_;
- if (read_worker().is_running_best_effort_task)
+ if (*read_worker().current_task_priority == TaskPriority::BEST_EFFORT)
++outer_->num_unresolved_best_effort_may_block_;
outer_->MaybeScheduleAdjustMaxTasksLockRequired(&executor);
@@ -891,8 +872,7 @@ void ThreadGroupImpl::WorkerThreadDelegateImpl::WillBlockEntered() {
DCHECK(!incremented_max_tasks_since_blocked_);
DCHECK(read_worker().may_block_start_time.is_null());
incremented_max_tasks_since_blocked_ = true;
- outer_->IncrementMaxTasksLockRequired(
- read_worker().is_running_best_effort_task);
+ outer_->IncrementMaxTasksLockRequired(*read_worker().current_task_priority);
outer_->EnsureEnoughWorkersLockRequired(&executor);
}
@@ -930,12 +910,13 @@ bool ThreadGroupImpl::WorkerThreadDelegateImpl::
MustIncrementMaxTasksLockRequired() {
if (!incremented_max_tasks_since_blocked_ &&
!read_any().may_block_start_time.is_null() &&
- TimeTicks::Now() - read_any().may_block_start_time >=
+ subtle::TimeTicksNowIgnoringOverride() -
+ read_any().may_block_start_time >=
outer_->after_start().may_block_threshold) {
incremented_max_tasks_since_blocked_ = true;
--outer_->num_unresolved_may_block_;
- if (read_any().is_running_best_effort_task)
+ if (*read_any().current_task_priority == TaskPriority::BEST_EFFORT)
--outer_->num_unresolved_best_effort_may_block_;
return true;
@@ -991,7 +972,7 @@ ThreadGroupImpl::CreateAndRegisterWorkerLockRequired(
DCHECK_LE(workers_.size(), max_tasks_);
if (!cleanup_timestamps_.empty()) {
- detach_duration_histogram_->AddTime(TimeTicks::Now() -
+ detach_duration_histogram_->AddTime(subtle::TimeTicksNowIgnoringOverride() -
cleanup_timestamps_.top());
cleanup_timestamps_.pop();
}
@@ -1011,7 +992,7 @@ size_t ThreadGroupImpl::GetDesiredNumAwakeWorkersLockRequired() const {
// to run by the CanRunPolicy.
const size_t num_running_or_queued_can_run_best_effort_task_sources =
num_running_best_effort_tasks_ +
- GetNumQueuedCanRunBestEffortTaskSources();
+ GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired();
const size_t workers_for_best_effort_task_sources =
std::max(std::min(num_running_or_queued_can_run_best_effort_task_sources,
@@ -1021,7 +1002,7 @@ size_t ThreadGroupImpl::GetDesiredNumAwakeWorkersLockRequired() const {
// Number of USER_{VISIBLE|BLOCKING} task sources that are running or queued.
const size_t num_running_or_queued_foreground_task_sources =
(num_running_tasks_ - num_running_best_effort_tasks_) +
- GetNumQueuedCanRunForegroundTaskSources();
+ GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired();
const size_t workers_for_foreground_task_sources =
num_running_or_queued_foreground_task_sources;
@@ -1069,6 +1050,10 @@ void ThreadGroupImpl::EnsureEnoughWorkersLockRequired(
if (desired_num_awake_workers == num_awake_workers)
MaintainAtLeastOneIdleWorkerLockRequired(executor);
+ // This function is called every time a task source is (re-)enqueued,
+ // hence the minimum priority needs to be updated.
+ UpdateMinAllowedPriorityLockRequired();
+
// Ensure that the number of workers is periodically adjusted if needed.
MaybeScheduleAdjustMaxTasksLockRequired(executor);
}
@@ -1092,7 +1077,7 @@ void ThreadGroupImpl::AdjustMaxTasks() {
AnnotateAcquiredLockAlias annotate(lock_, delegate->lock());
if (delegate->MustIncrementMaxTasksLockRequired()) {
IncrementMaxTasksLockRequired(
- delegate->is_running_best_effort_task_lock_required());
+ delegate->current_task_priority_lock_required());
}
}
@@ -1134,31 +1119,68 @@ bool ThreadGroupImpl::ShouldPeriodicallyAdjustMaxTasksLockRequired() {
const size_t num_running_or_queued_best_effort_task_sources =
num_running_best_effort_tasks_ +
- priority_queue_.GetNumTaskSourcesWithPriority(TaskPriority::BEST_EFFORT);
+ GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired();
if (num_running_or_queued_best_effort_task_sources > max_best_effort_tasks_ &&
num_unresolved_best_effort_may_block_ > 0) {
return true;
}
const size_t num_running_or_queued_task_sources =
- num_running_tasks_ + priority_queue_.Size();
+ num_running_tasks_ +
+ GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() +
+ GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired();
constexpr size_t kIdleWorker = 1;
return num_running_or_queued_task_sources + kIdleWorker > max_tasks_ &&
num_unresolved_may_block_ > 0;
}
-void ThreadGroupImpl::DecrementMaxTasksLockRequired(
- bool is_running_best_effort_task) {
+void ThreadGroupImpl::UpdateMinAllowedPriorityLockRequired() {
+ if (priority_queue_.IsEmpty() || num_running_tasks_ < max_tasks_) {
+ min_allowed_priority_.store(TaskPriority::BEST_EFFORT,
+ std::memory_order_relaxed);
+ } else {
+ min_allowed_priority_.store(priority_queue_.PeekSortKey().priority(),
+ std::memory_order_relaxed);
+ }
+}
+
+void ThreadGroupImpl::DecrementTasksRunningLockRequired(TaskPriority priority) {
+ DCHECK_GT(num_running_tasks_, 0U);
+ --num_running_tasks_;
+ if (priority == TaskPriority::BEST_EFFORT) {
+ DCHECK_GT(num_running_best_effort_tasks_, 0U);
+ --num_running_best_effort_tasks_;
+ }
+ UpdateMinAllowedPriorityLockRequired();
+}
+
+void ThreadGroupImpl::IncrementTasksRunningLockRequired(TaskPriority priority) {
+ ++num_running_tasks_;
+ DCHECK_LE(num_running_tasks_, max_tasks_);
+ DCHECK_LE(num_running_tasks_, kMaxNumberOfWorkers);
+ if (priority == TaskPriority::BEST_EFFORT) {
+ ++num_running_best_effort_tasks_;
+ DCHECK_LE(num_running_best_effort_tasks_, num_running_tasks_);
+ DCHECK_LE(num_running_best_effort_tasks_, max_best_effort_tasks_);
+ }
+ UpdateMinAllowedPriorityLockRequired();
+}
+
+void ThreadGroupImpl::DecrementMaxTasksLockRequired(TaskPriority priority) {
+ DCHECK_GT(num_running_tasks_, 0U);
+ DCHECK_GT(max_tasks_, 0U);
--max_tasks_;
- if (is_running_best_effort_task)
+ if (priority == TaskPriority::BEST_EFFORT)
--max_best_effort_tasks_;
+ UpdateMinAllowedPriorityLockRequired();
}
-void ThreadGroupImpl::IncrementMaxTasksLockRequired(
- bool is_running_best_effort_task) {
+void ThreadGroupImpl::IncrementMaxTasksLockRequired(TaskPriority priority) {
+ DCHECK_GT(num_running_tasks_, 0U);
++max_tasks_;
- if (is_running_best_effort_task)
+ if (priority == TaskPriority::BEST_EFFORT)
++max_best_effort_tasks_;
+ UpdateMinAllowedPriorityLockRequired();
}
ThreadGroupImpl::InitializedInStart::InitializedInStart() = default;
diff --git a/chromium/base/task/thread_pool/thread_group_impl.h b/chromium/base/task/thread_pool/thread_group_impl.h
index 01c41fbf9c5..2205dca1423 100644
--- a/chromium/base/task/thread_pool/thread_group_impl.h
+++ b/chromium/base/task/thread_pool/thread_group_impl.h
@@ -147,9 +147,10 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup {
// ThreadGroup:
void UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) override;
+ TransactionWithOwnedTaskSource transaction_with_task_source) override;
void PushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) override;
+ TransactionWithRegisteredTaskSource transaction_with_task_source)
+ override;
void EnsureEnoughWorkersLockRequired(BaseScopedWorkersExecutor* executor)
override EXCLUSIVE_LOCKS_REQUIRED(lock_);
@@ -205,12 +206,24 @@ class BASE_EXPORT ThreadGroupImpl : public ThreadGroup {
bool ShouldPeriodicallyAdjustMaxTasksLockRequired()
EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Updates the minimum priority allowed to run below which tasks should yield.
+ // This should be called whenever |num_running_tasks_| or |max_tasks| changes,
+ // or when a new task is added to |priority_queue_|.
+ void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ // Increments/decrements the number of tasks of |priority| that are currently
+ // running in this thread group. Must be invoked before/after running a task.
+ void DecrementTasksRunningLockRequired(TaskPriority priority)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void IncrementTasksRunningLockRequired(TaskPriority priority)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
// Increments/decrements the number of tasks that can run in this thread
- // group. |is_running_best_effort_task| indicates whether the worker causing
- // the change is currently running a TaskPriority::BEST_EFFORT task.
- void DecrementMaxTasksLockRequired(bool is_running_best_effort_task)
+ // group. May only be called in a scope where a task is running with
+ // |priority|.
+ void DecrementMaxTasksLockRequired(TaskPriority priority)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
- void IncrementMaxTasksLockRequired(bool is_running_best_effort_task)
+ void IncrementMaxTasksLockRequired(TaskPriority priority)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Values set at Start() and never modified afterwards.
diff --git a/chromium/base/task/thread_pool/thread_group_impl_unittest.cc b/chromium/base/task/thread_pool/thread_group_impl_unittest.cc
index 87e63e71345..27bca7a36a1 100644
--- a/chromium/base/task/thread_pool/thread_group_impl_unittest.cc
+++ b/chromium/base/task/thread_pool/thread_group_impl_unittest.cc
@@ -119,7 +119,7 @@ class ThreadGroupImplImplTestBase : public ThreadGroup::Delegate {
}
Thread service_thread_;
- TaskTracker task_tracker_ = {"Test"};
+ TaskTracker task_tracker_{"Test"};
std::unique_ptr<ThreadGroupImpl> thread_group_;
DelayedTaskManager delayed_task_manager_;
TrackedRefFactory<ThreadGroup::Delegate> tracked_ref_factory_;
@@ -289,6 +289,80 @@ TEST_P(ThreadGroupImplImplTestParam, Saturate) {
thread_group_->WaitForAllWorkersIdleForTesting();
}
+// Verifies that ShouldYield() returns true for priorities lower than the
+// highest priority pending while the thread group is flooded with USER_VISIBLE
+// tasks.
+TEST_F(ThreadGroupImplImplTest, ShouldYieldFloodedUserVisible) {
+ WaitableEvent threads_running;
+ WaitableEvent threads_continue;
+
+ // Saturate workers with USER_VISIBLE tasks to ensure ShouldYield() returns
+ // true when a tasks of higher priority
+ // is posted.
+ RepeatingClosure threads_running_barrier = BarrierClosure(
+ kMaxTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
+
+ auto task_source = base::MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE,
+ BindLambdaForTesting([&threads_running_barrier, &threads_continue]() {
+ threads_running_barrier.Run();
+ test::WaitWithoutBlockingObserver(&threads_continue);
+ }),
+ TaskPriority::USER_VISIBLE, /* num_tasks_to_run */ kMaxTasks,
+ /* max_concurrency */ kMaxTasks);
+ auto registered_task_source = task_tracker_.WillQueueTaskSource(task_source);
+ ASSERT_TRUE(registered_task_source);
+ static_cast<ThreadGroup*>(thread_group_.get())
+ ->PushTaskSourceAndWakeUpWorkers(
+ TransactionWithRegisteredTaskSource::FromTaskSource(
+ std::move(registered_task_source)));
+
+ threads_running.Wait();
+
+ // Posting a BEST_EFFORT task should not cause any other tasks to yield.
+ // Once this task gets to run, no other task needs to yield.
+ // Note: This is only true because this test is using a single ThreadGroup.
+ // Under the ThreadPool this wouldn't be racy because BEST_EFFORT tasks
+ // run in an independent ThreadGroup.
+ test::CreateTaskRunner(TaskPriority::BEST_EFFORT,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ }));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_BLOCKING));
+
+ // Posting a USER_VISIBLE task should cause BEST_EFFORT tasks to yield.
+ test::CreateTaskRunner(TaskPriority::USER_VISIBLE,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(
+ thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ }));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_BLOCKING));
+
+ // Posting a USER_BLOCKING task should cause BEST_EFFORT and USER_VISIBLE
+ // tasks to yield.
+ test::CreateTaskRunner(TaskPriority::USER_BLOCKING,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
+ // Once this task got to start, no other task needs to yield.
+ EXPECT_FALSE(
+ thread_group_->ShouldYield(TaskPriority::USER_BLOCKING));
+ }));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_BLOCKING));
+
+ threads_continue.Signal();
+ task_tracker_.FlushForTesting();
+}
+
INSTANTIATE_TEST_SUITE_P(Parallel,
ThreadGroupImplImplTestParam,
::testing::Values(TaskSourceExecutionMode::kParallel));
@@ -297,6 +371,10 @@ INSTANTIATE_TEST_SUITE_P(
ThreadGroupImplImplTestParam,
::testing::Values(TaskSourceExecutionMode::kSequenced));
+INSTANTIATE_TEST_SUITE_P(Job,
+ ThreadGroupImplImplTestParam,
+ ::testing::Values(TaskSourceExecutionMode::kJob));
+
namespace {
class ThreadGroupImplImplStartInBodyTest : public ThreadGroupImplImplTest {
@@ -331,14 +409,14 @@ TEST_F(ThreadGroupImplImplStartInBodyTest, PostTasksBeforeStart) {
// up.
WaitableEvent barrier;
- test::CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_)
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_)
->PostTask(
FROM_HERE,
BindOnce(&TaskPostedBeforeStart, Unretained(&task_1_thread_ref),
Unretained(&task_1_running), Unretained(&barrier)));
- test::CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_)
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_)
->PostTask(
FROM_HERE,
BindOnce(&TaskPostedBeforeStart, Unretained(&task_2_thread_ref),
@@ -366,8 +444,9 @@ TEST_F(ThreadGroupImplImplStartInBodyTest, PostTasksBeforeStart) {
// Verify that posting many tasks before Start will cause the number of workers
// to grow to |max_tasks_| after Start.
TEST_F(ThreadGroupImplImplStartInBodyTest, PostManyTasks) {
- scoped_refptr<TaskRunner> task_runner = test::CreateTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ scoped_refptr<TaskRunner> task_runner =
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
constexpr size_t kNumTasksPosted = 2 * kMaxTasks;
WaitableEvent threads_running;
@@ -446,8 +525,8 @@ TEST_F(ThreadGroupImplCheckTlsReuse, CheckCleanupWorkers) {
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
for (size_t i = 0; i < kMaxTasks; ++i) {
factories.push_back(std::make_unique<test::TestTaskFactory>(
- test::CreateTaskRunnerWithTraits({WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_),
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_),
TaskSourceExecutionMode::kParallel));
ASSERT_TRUE(factories.back()->PostTask(
PostNestedTask::NO,
@@ -507,8 +586,9 @@ class ThreadGroupImplHistogramTest : public ThreadGroupImplImplTest {
void FloodPool(WaitableEvent* continue_event) {
ASSERT_FALSE(continue_event->IsSignaled());
- auto task_runner = test::CreateTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ auto task_runner =
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
const auto max_tasks = thread_group_->GetMaxTasksForTesting();
@@ -541,8 +621,9 @@ class ThreadGroupImplHistogramTest : public ThreadGroupImplImplTest {
TEST_F(ThreadGroupImplHistogramTest, NumTasksBetweenWaits) {
WaitableEvent event;
CreateAndStartThreadGroup(TimeDelta::Max(), kMaxTasks);
- auto task_runner = test::CreateSequencedTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ auto task_runner =
+ test::CreateSequencedTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
// Post a task.
task_runner->PostTask(FROM_HERE, BindOnce(&test::WaitWithoutBlockingObserver,
@@ -626,8 +707,8 @@ TEST_F(ThreadGroupImplHistogramTest,
TEST_F(ThreadGroupImplHistogramTest, NumTasksBeforeCleanup) {
CreateThreadGroup();
auto histogrammed_thread_task_runner =
- test::CreateSequencedTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ test::CreateSequencedTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
// Post 3 tasks and hold the thread for idle thread stack ordering.
// This test assumes |histogrammed_thread_task_runner| gets assigned the same
@@ -692,8 +773,9 @@ TEST_F(ThreadGroupImplHistogramTest, NumTasksBeforeCleanup) {
// |histogrammed_thread_task_runner| to cleanup.
WaitableEvent top_idle_thread_running;
WaitableEvent top_idle_thread_continue;
- auto task_runner_for_top_idle = test::CreateSequencedTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ auto task_runner_for_top_idle =
+ test::CreateSequencedTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
task_runner_for_top_idle->PostTask(
FROM_HERE,
BindOnce(
@@ -757,8 +839,9 @@ TEST_F(ThreadGroupImplStandbyPolicyTest, InitOne) {
// Verify that the ThreadGroupImpl keeps at least one idle standby
// thread, capacity permitting.
TEST_F(ThreadGroupImplStandbyPolicyTest, VerifyStandbyThread) {
- auto task_runner = test::CreateTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ auto task_runner =
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
WaitableEvent threads_continue;
@@ -792,8 +875,8 @@ TEST_F(ThreadGroupImplStandbyPolicyTest, VerifyStandbyThread) {
// reclaimed even if not on top of the idle stack when reclaim timeout expires).
// Regression test for https://crbug.com/847501.
TEST_F(ThreadGroupImplStandbyPolicyTest, InAndOutStandbyThreadIsActive) {
- auto sequenced_task_runner = test::CreateSequencedTaskRunnerWithTraits(
- {}, &mock_pooled_task_runner_delegate_);
+ auto sequenced_task_runner = test::CreateSequencedTaskRunner(
+ {ThreadPool()}, &mock_pooled_task_runner_delegate_);
WaitableEvent timer_started;
@@ -828,8 +911,8 @@ TEST_F(ThreadGroupImplStandbyPolicyTest, InAndOutStandbyThreadIsActive) {
// Verify that being "the" idle thread counts as being active but isn't sticky.
// Regression test for https://crbug.com/847501.
TEST_F(ThreadGroupImplStandbyPolicyTest, OnlyKeepActiveStandbyThreads) {
- auto sequenced_task_runner = test::CreateSequencedTaskRunnerWithTraits(
- {}, &mock_pooled_task_runner_delegate_);
+ auto sequenced_task_runner = test::CreateSequencedTaskRunner(
+ {ThreadPool()}, &mock_pooled_task_runner_delegate_);
// Start this test like
// ThreadGroupImplStandbyPolicyTest.InAndOutStandbyThreadIsActive and
@@ -846,8 +929,9 @@ TEST_F(ThreadGroupImplStandbyPolicyTest, OnlyKeepActiveStandbyThreads) {
// Then also flood the thread group (cycling the top of the idle stack).
{
- auto task_runner = test::CreateTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ auto task_runner =
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
WaitableEvent thread_running(WaitableEvent::ResetPolicy::AUTOMATIC);
WaitableEvent threads_continue;
@@ -1017,9 +1101,9 @@ class ThreadGroupImplBlockingTest
// Unblocks tasks posted by SaturateWithBusyTasks().
void UnblockBusyTasks() { busy_threads_continue_.Signal(); }
- const scoped_refptr<TaskRunner> task_runner_ =
- test::CreateTaskRunnerWithTraits({MayBlock(), WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> task_runner_ = test::CreateTaskRunner(
+ {ThreadPool(), MayBlock(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
private:
WaitableEvent blocking_threads_continue_;
@@ -1070,8 +1154,8 @@ TEST_P(ThreadGroupImplBlockingTest, TooManyBestEffortTasks) {
kMaxBestEffortTasks + 1,
BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
- const auto best_effort_task_runner = test::CreateTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT, MayBlock()},
+ const auto best_effort_task_runner = test::CreateTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
&mock_pooled_task_runner_delegate_);
for (size_t i = 0; i < kMaxBestEffortTasks + 1; ++i) {
best_effort_task_runner->PostTask(
@@ -1234,6 +1318,50 @@ TEST_P(ThreadGroupImplBlockingTest, WorkersIdleWhenOverCapacity) {
task_tracker_.FlushForTesting();
}
+// Verify that an increase of max tasks with SaturateWithBlockingTasks()
+// increases the number of tasks that can run before ShouldYield returns true.
+TEST_P(ThreadGroupImplBlockingTest, ThreadBlockedUnblockedShouldYield) {
+ CreateAndStartThreadGroup();
+
+ ASSERT_EQ(thread_group_->GetMaxTasksForTesting(), kMaxTasks);
+
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ SaturateWithBlockingTasks(GetParam());
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+
+ // Forces |kMaxTasks| extra workers to be instantiated by posting tasks. This
+ // should not block forever.
+ SaturateWithBusyTasks();
+
+ // All tasks can run, hence ShouldYield returns false.
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+
+ // Post a USER_VISIBLE task that can't run since workers are saturated. This
+ // should cause BEST_EFFORT tasks to yield.
+ test::CreateTaskRunner(TaskPriority::USER_VISIBLE,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ }));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+
+ // Post a USER_BLOCKING task that can't run since workers are saturated. This
+ // should cause USER_VISIBLE tasks to yield.
+ test::CreateTaskRunner(TaskPriority::USER_BLOCKING,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(
+ thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ }));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+
+ UnblockBusyTasks();
+ UnblockBlockingTasks();
+ task_tracker_.FlushForTesting();
+ EXPECT_EQ(thread_group_->GetMaxTasksForTesting(), kMaxTasks);
+}
+
INSTANTIATE_TEST_SUITE_P(
,
ThreadGroupImplBlockingTest,
@@ -1285,9 +1413,9 @@ TEST_F(ThreadGroupImplBlockingTest, MayBlockIncreaseCapacityNestedWillBlock) {
CreateAndStartThreadGroup();
ASSERT_EQ(thread_group_->GetMaxTasksForTesting(), kMaxTasks);
- auto task_runner =
- test::CreateTaskRunnerWithTraits({MayBlock(), WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_);
+ auto task_runner = test::CreateTaskRunner(
+ {ThreadPool(), MayBlock(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
WaitableEvent can_return;
// Saturate the thread group so that a MAY_BLOCK ScopedBlockingCall would
@@ -1340,9 +1468,9 @@ class ThreadGroupImplOverCapacityTest : public ThreadGroupImplImplTestBase,
void SetUp() override {
CreateAndStartThreadGroup(kReclaimTimeForCleanupTests, kLocalMaxTasks);
- task_runner_ =
- test::CreateTaskRunnerWithTraits({MayBlock(), WithBaseSyncPrimitives()},
- &mock_pooled_task_runner_delegate_);
+ task_runner_ = test::CreateTaskRunner(
+ {ThreadPool(), MayBlock(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
}
void TearDown() override { ThreadGroupImplImplTestBase::CommonTearDown(); }
@@ -1548,12 +1676,11 @@ TEST_F(ThreadGroupImplImplStartInBodyTest, MaxBestEffortTasks) {
StartThreadGroup(TimeDelta::Max(), // |suggested_reclaim_time|
kMaxTasks, // |max_tasks|
kMaxBestEffortTasks); // |max_best_effort_tasks|
- const scoped_refptr<TaskRunner> foreground_runner =
- test::CreateTaskRunnerWithTraits({MayBlock()},
- &mock_pooled_task_runner_delegate_);
- const scoped_refptr<TaskRunner> background_runner =
- test::CreateTaskRunnerWithTraits({TaskPriority::BEST_EFFORT, MayBlock()},
- &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> foreground_runner = test::CreateTaskRunner(
+ {ThreadPool(), MayBlock()}, &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> background_runner = test::CreateTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
+ &mock_pooled_task_runner_delegate_);
// It should be possible to have |kMaxBestEffortTasks|
// TaskPriority::BEST_EFFORT tasks running concurrently.
@@ -1607,9 +1734,9 @@ TEST_F(ThreadGroupImplImplStartInBodyTest,
kMaxTasks, // |max_tasks|
kMaxBestEffortTasks); // |max_best_effort_tasks|
- const scoped_refptr<TaskRunner> runner =
- test::CreateTaskRunnerWithTraits({TaskPriority::BEST_EFFORT, MayBlock()},
- &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> runner = test::CreateTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
+ &mock_pooled_task_runner_delegate_);
for (size_t i = 0; i < kLargeNumber; ++i) {
runner->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
@@ -1672,8 +1799,8 @@ TEST_F(ThreadGroupImplImplStartInBodyTest,
kNumWorkers, // |max_tasks|
nullopt, // |max_best_effort_tasks|
&worker_observer); // |worker_observer|
- const scoped_refptr<TaskRunner> runner = test::CreateTaskRunnerWithTraits(
- {MayBlock()}, &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> runner = test::CreateTaskRunner(
+ {ThreadPool(), MayBlock()}, &mock_pooled_task_runner_delegate_);
WaitableEvent hold_will_block_task;
runner->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
@@ -1731,9 +1858,9 @@ class ThreadGroupImplBlockingCallAndMaxBestEffortTasksTest
TEST_P(ThreadGroupImplBlockingCallAndMaxBestEffortTasksTest,
BlockingCallAndMaxBestEffortTasksTest) {
- const scoped_refptr<TaskRunner> background_runner =
- test::CreateTaskRunnerWithTraits({TaskPriority::BEST_EFFORT, MayBlock()},
- &mock_pooled_task_runner_delegate_);
+ const scoped_refptr<TaskRunner> background_runner = test::CreateTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
+ &mock_pooled_task_runner_delegate_);
// Post |kMaxBestEffortTasks| TaskPriority::BEST_EFFORT tasks that block in a
// ScopedBlockingCall.
@@ -1790,13 +1917,7 @@ INSTANTIATE_TEST_SUITE_P(WillBlock,
// Verify that worker detachment doesn't race with worker cleanup, regression
// test for https://crbug.com/810464.
TEST_F(ThreadGroupImplImplStartInBodyTest, RacyCleanup) {
-#if defined(OS_FUCHSIA)
- // Fuchsia + QEMU doesn't deal well with *many* threads being
- // created/destroyed at once: https://crbug.com/816575.
- constexpr size_t kLocalMaxTasks = 16;
-#else // defined(OS_FUCHSIA)
constexpr size_t kLocalMaxTasks = 256;
-#endif // defined(OS_FUCHSIA)
constexpr TimeDelta kReclaimTimeForRacyCleanupTest =
TimeDelta::FromMilliseconds(10);
@@ -1805,8 +1926,9 @@ TEST_F(ThreadGroupImplImplStartInBodyTest, RacyCleanup) {
service_thread_.task_runner(), nullptr,
ThreadGroup::WorkerEnvironment::NONE);
- scoped_refptr<TaskRunner> task_runner = test::CreateTaskRunnerWithTraits(
- {WithBaseSyncPrimitives()}, &mock_pooled_task_runner_delegate_);
+ scoped_refptr<TaskRunner> task_runner =
+ test::CreateTaskRunner({ThreadPool(), WithBaseSyncPrimitives()},
+ &mock_pooled_task_runner_delegate_);
WaitableEvent threads_running;
WaitableEvent unblock_threads;
diff --git a/chromium/base/task/thread_pool/thread_group_native.cc b/chromium/base/task/thread_pool/thread_group_native.cc
index 48c86cc05d4..a7aec1debaf 100644
--- a/chromium/base/task/thread_pool/thread_group_native.cc
+++ b/chromium/base/task/thread_pool/thread_group_native.cc
@@ -79,53 +79,67 @@ void ThreadGroupNative::JoinForTesting() {
}
void ThreadGroupNative::RunNextTaskSourceImpl() {
- RegisteredTaskSource task_source = GetWork();
+ RunIntentWithRegisteredTaskSource run_intent_with_task_source = GetWork();
- if (task_source) {
+ if (run_intent_with_task_source) {
BindToCurrentThread();
- task_source = task_tracker_->RunAndPopNextTask(std::move(task_source));
+ RegisteredTaskSource task_source = task_tracker_->RunAndPopNextTask(
+ std::move(run_intent_with_task_source));
UnbindFromCurrentThread();
if (task_source) {
ScopedWorkersExecutor workers_executor(this);
ScopedReenqueueExecutor reenqueue_executor;
- auto task_source_and_transaction =
- RegisteredTaskSourceAndTransaction::FromTaskSource(
+ auto transaction_with_task_source =
+ TransactionWithRegisteredTaskSource::FromTaskSource(
std::move(task_source));
CheckedAutoLock auto_lock(lock_);
ReEnqueueTaskSourceLockRequired(&workers_executor, &reenqueue_executor,
- std::move(task_source_and_transaction));
+ std::move(transaction_with_task_source));
}
}
}
-RegisteredTaskSource ThreadGroupNative::GetWork() {
+void ThreadGroupNative::UpdateMinAllowedPriorityLockRequired() {
+ // Tasks should yield as soon as there is work of higher priority in
+ // |priority_queue_|.
+ min_allowed_priority_.store(priority_queue_.IsEmpty()
+ ? TaskPriority::BEST_EFFORT
+ : priority_queue_.PeekSortKey().priority(),
+ std::memory_order_relaxed);
+}
+
+RunIntentWithRegisteredTaskSource ThreadGroupNative::GetWork() {
+ ScopedWorkersExecutor workers_executor(this);
CheckedAutoLock auto_lock(lock_);
DCHECK_GT(num_pending_threadpool_work_, 0U);
--num_pending_threadpool_work_;
- // There can be more pending threadpool work than TaskSources in the
- // PriorityQueue after RemoveTaskSource().
- if (priority_queue_.IsEmpty())
- return nullptr;
-
- // Enforce the CanRunPolicy.
- const TaskPriority priority = priority_queue_.PeekSortKey().priority();
- if (!task_tracker_->CanRunPriority(priority))
- return nullptr;
- return priority_queue_.PopTaskSource();
+
+ RunIntentWithRegisteredTaskSource task_source;
+ TaskPriority priority;
+ while (!task_source && !priority_queue_.IsEmpty()) {
+ priority = priority_queue_.PeekSortKey().priority();
+ // Enforce the CanRunPolicy.
+ if (!task_tracker_->CanRunPriority(priority))
+ return nullptr;
+
+ task_source = TakeRunIntentWithRegisteredTaskSource(&workers_executor);
+ }
+ UpdateMinAllowedPriorityLockRequired();
+ return task_source;
}
void ThreadGroupNative::UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithOwnedTaskSource transaction_with_task_source) {
ScopedWorkersExecutor executor(this);
- UpdateSortKeyImpl(&executor, std::move(task_source_and_transaction));
+ UpdateSortKeyImpl(&executor, std::move(transaction_with_task_source));
}
void ThreadGroupNative::PushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) {
+ TransactionWithRegisteredTaskSource transaction_with_task_source) {
ScopedWorkersExecutor executor(this);
PushTaskSourceAndWakeUpWorkersImpl(&executor,
- std::move(task_source_and_transaction));
+ std::move(transaction_with_task_source));
}
void ThreadGroupNative::EnsureEnoughWorkersLockRequired(
@@ -135,8 +149,8 @@ void ThreadGroupNative::EnsureEnoughWorkersLockRequired(
// Ensure that there is at least one pending threadpool work per TaskSource in
// the PriorityQueue.
const size_t desired_num_pending_threadpool_work =
- GetNumQueuedCanRunBestEffortTaskSources() +
- GetNumQueuedCanRunForegroundTaskSources();
+ GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() +
+ GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired();
if (desired_num_pending_threadpool_work > num_pending_threadpool_work_) {
static_cast<ScopedWorkersExecutor*>(executor)
@@ -144,6 +158,9 @@ void ThreadGroupNative::EnsureEnoughWorkersLockRequired(
desired_num_pending_threadpool_work - num_pending_threadpool_work_);
num_pending_threadpool_work_ = desired_num_pending_threadpool_work;
}
+ // This function is called every time a task source is queued or re-enqueued,
+ // hence the minimum priority needs to be updated.
+ UpdateMinAllowedPriorityLockRequired();
}
size_t ThreadGroupNative::GetMaxConcurrentNonBlockedTasksDeprecated() const {
diff --git a/chromium/base/task/thread_pool/thread_group_native.h b/chromium/base/task/thread_pool/thread_group_native.h
index 21c3d870bd0..bab7f46bd59 100644
--- a/chromium/base/task/thread_pool/thread_group_native.h
+++ b/chromium/base/task/thread_pool/thread_group_native.h
@@ -49,15 +49,20 @@ class BASE_EXPORT ThreadGroupNative : public ThreadGroup {
// ThreadGroup:
void UpdateSortKey(
- TaskSourceAndTransaction task_source_and_transaction) override;
+ TransactionWithOwnedTaskSource transaction_with_task_source) override;
void PushTaskSourceAndWakeUpWorkers(
- RegisteredTaskSourceAndTransaction task_source_and_transaction) override;
+ TransactionWithRegisteredTaskSource transaction_with_task_source)
+ override;
void EnsureEnoughWorkersLockRequired(BaseScopedWorkersExecutor* executor)
override EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ // Updates the minimum priority allowed to run below which tasks should yield,
+ // based on task sources in |priority_queue_|.
+ void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
// Returns the top TaskSource off the |priority_queue_|. Returns nullptr
// if the |priority_queue_| is empty.
- RegisteredTaskSource GetWork();
+ RunIntentWithRegisteredTaskSource GetWork();
// Indicates whether the thread group has been started yet.
bool started_ GUARDED_BY(lock_) = false;
diff --git a/chromium/base/task/thread_pool/thread_group_unittest.cc b/chromium/base/task/thread_pool/thread_group_unittest.cc
index 3a84ca8f75a..1a62bb9919b 100644
--- a/chromium/base/task/thread_pool/thread_group_unittest.cc
+++ b/chromium/base/task/thread_pool/thread_group_unittest.cc
@@ -59,11 +59,6 @@ constexpr size_t kMaxBestEffortTasks = kMaxTasks / 2;
constexpr size_t kNumThreadsPostingTasks = 4;
constexpr size_t kNumTasksPostedPerThread = 150;
-struct PoolExecutionType {
- test::PoolType pool_type;
- TaskSourceExecutionMode execution_mode;
-};
-
using PostNestedTask = test::TestTaskFactory::PostNestedTask;
class ThreadPostingTasks : public SimpleThread {
@@ -100,10 +95,9 @@ class ThreadPostingTasks : public SimpleThread {
DISALLOW_COPY_AND_ASSIGN(ThreadPostingTasks);
};
-class ThreadGroupTest : public testing::TestWithParam<PoolExecutionType>,
- public ThreadGroup::Delegate {
+class ThreadGroupTestBase : public testing::Test, public ThreadGroup::Delegate {
protected:
- ThreadGroupTest()
+ ThreadGroupTestBase()
: service_thread_("ThreadPoolServiceThread"),
tracked_ref_factory_(this) {}
@@ -122,7 +116,7 @@ class ThreadGroupTest : public testing::TestWithParam<PoolExecutionType>,
void CreateThreadGroup() {
ASSERT_FALSE(thread_group_);
- switch (GetParam().pool_type) {
+ switch (GetPoolType()) {
case test::PoolType::GENERIC:
thread_group_ = std::make_unique<ThreadGroupImpl>(
"TestThreadGroup", "A", ThreadPriority::NORMAL,
@@ -145,7 +139,7 @@ class ThreadGroupTest : public testing::TestWithParam<PoolExecutionType>,
void StartThreadGroup(ThreadGroup::WorkerEnvironment worker_environment =
ThreadGroup::WorkerEnvironment::NONE) {
ASSERT_TRUE(thread_group_);
- switch (GetParam().pool_type) {
+ switch (GetPoolType()) {
case test::PoolType::GENERIC: {
ThreadGroupImpl* thread_group_impl =
static_cast<ThreadGroupImpl*>(thread_group_.get());
@@ -165,14 +159,10 @@ class ThreadGroupTest : public testing::TestWithParam<PoolExecutionType>,
}
}
- scoped_refptr<TaskRunner> CreateTaskRunner(
- const TaskTraits& traits = TaskTraits()) {
- return test::CreateTaskRunnerWithExecutionMode(
- GetParam().execution_mode, &mock_pooled_task_runner_delegate_, traits);
- }
+ virtual test::PoolType GetPoolType() const = 0;
Thread service_thread_;
- TaskTracker task_tracker_ = {"Test"};
+ TaskTracker task_tracker_{"Test"};
DelayedTaskManager delayed_task_manager_;
test::MockPooledTaskRunnerDelegate mock_pooled_task_runner_delegate_ = {
task_tracker_.GetTrackedRef(), &delayed_task_manager_};
@@ -187,22 +177,60 @@ class ThreadGroupTest : public testing::TestWithParam<PoolExecutionType>,
TrackedRefFactory<ThreadGroup::Delegate> tracked_ref_factory_;
+ DISALLOW_COPY_AND_ASSIGN(ThreadGroupTestBase);
+};
+
+class ThreadGroupTest : public ThreadGroupTestBase,
+ public testing::WithParamInterface<test::PoolType> {
+ public:
+ ThreadGroupTest() = default;
+
+ test::PoolType GetPoolType() const override { return GetParam(); }
+
+ private:
DISALLOW_COPY_AND_ASSIGN(ThreadGroupTest);
};
+// TODO(etiennep): Audit tests that don't need TaskSourceExecutionMode
+// parameter.
+class ThreadGroupTestAllExecutionModes
+ : public ThreadGroupTestBase,
+ public testing::WithParamInterface<
+ std::tuple<test::PoolType, TaskSourceExecutionMode>> {
+ public:
+ ThreadGroupTestAllExecutionModes() = default;
+
+ test::PoolType GetPoolType() const override {
+ return std::get<0>(GetParam());
+ }
+
+ TaskSourceExecutionMode execution_mode() const {
+ return std::get<1>(GetParam());
+ }
+
+ scoped_refptr<TaskRunner> CreateTaskRunner(
+ const TaskTraits& traits = TaskTraits(ThreadPool())) {
+ return test::CreateTaskRunnerWithExecutionMode(
+ execution_mode(), &mock_pooled_task_runner_delegate_, traits);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThreadGroupTestAllExecutionModes);
+};
+
void ShouldNotRun() {
ADD_FAILURE() << "Ran a task that shouldn't run.";
}
} // namespace
-TEST_P(ThreadGroupTest, PostTasks) {
+TEST_P(ThreadGroupTestAllExecutionModes, PostTasks) {
StartThreadGroup();
// Create threads to post tasks.
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
threads_posting_tasks.push_back(std::make_unique<ThreadPostingTasks>(
- &mock_pooled_task_runner_delegate_, GetParam().execution_mode,
+ &mock_pooled_task_runner_delegate_, execution_mode(),
PostNestedTask::NO));
threads_posting_tasks.back()->Start();
}
@@ -218,14 +246,14 @@ TEST_P(ThreadGroupTest, PostTasks) {
task_tracker_.FlushForTesting();
}
-TEST_P(ThreadGroupTest, NestedPostTasks) {
+TEST_P(ThreadGroupTestAllExecutionModes, NestedPostTasks) {
StartThreadGroup();
// Create threads to post tasks. Each task posted by these threads will post
// another task when it runs.
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (size_t i = 0; i < kNumThreadsPostingTasks; ++i) {
threads_posting_tasks.push_back(std::make_unique<ThreadPostingTasks>(
- &mock_pooled_task_runner_delegate_, GetParam().execution_mode,
+ &mock_pooled_task_runner_delegate_, execution_mode(),
PostNestedTask::YES));
threads_posting_tasks.back()->Start();
}
@@ -242,28 +270,19 @@ TEST_P(ThreadGroupTest, NestedPostTasks) {
}
// Verify that a Task can't be posted after shutdown.
-TEST_P(ThreadGroupTest, PostTaskAfterShutdown) {
+TEST_P(ThreadGroupTestAllExecutionModes, PostTaskAfterShutdown) {
StartThreadGroup();
auto task_runner = CreateTaskRunner();
test::ShutdownTaskTracker(&task_tracker_);
EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
}
-// Verify that posting tasks after the thread group was destroyed fails but
-// doesn't crash.
-TEST_P(ThreadGroupTest, PostAfterDestroy) {
- StartThreadGroup();
- auto task_runner = CreateTaskRunner();
- EXPECT_TRUE(task_runner->PostTask(FROM_HERE, DoNothing()));
- test::ShutdownTaskTracker(&task_tracker_);
- thread_group_->JoinForTesting();
- thread_group_.reset();
- EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
-}
-
// Verify that a Task runs shortly after its delay expires.
-TEST_P(ThreadGroupTest, PostDelayedTask) {
+TEST_P(ThreadGroupTestAllExecutionModes, PostDelayedTask) {
StartThreadGroup();
+ // kJob doesn't support delays.
+ if (execution_mode() == TaskSourceExecutionMode::kJob)
+ return;
WaitableEvent task_ran(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -285,12 +304,12 @@ TEST_P(ThreadGroupTest, PostDelayedTask) {
// Wait until the task runs.
task_ran.Wait();
- // Expect the task to run after its delay expires, but no more than 250
- // ms after that.
+ // Expect the task to run after its delay expires, but no more than a
+ // reasonable amount of time after that (overloaded bots can be slow sometimes
+ // so give it 10X flexibility).
const TimeDelta actual_delay = TimeTicks::Now() - start_time;
EXPECT_GE(actual_delay, TestTimeouts::tiny_timeout());
- EXPECT_LT(actual_delay,
- TimeDelta::FromMilliseconds(250) + TestTimeouts::tiny_timeout());
+ EXPECT_LT(actual_delay, 10 * TestTimeouts::tiny_timeout());
}
// Verify that the RunsTasksInCurrentSequence() method of a SEQUENCED TaskRunner
@@ -298,11 +317,11 @@ TEST_P(ThreadGroupTest, PostDelayedTask) {
// Tests that use TestTaskFactory already verify that
// RunsTasksInCurrentSequence() returns true when appropriate so this method
// complements it to get full coverage of that method.
-TEST_P(ThreadGroupTest, SequencedRunsTasksInCurrentSequence) {
+TEST_P(ThreadGroupTestAllExecutionModes, SequencedRunsTasksInCurrentSequence) {
StartThreadGroup();
auto task_runner = CreateTaskRunner();
- auto sequenced_task_runner = test::CreateSequencedTaskRunnerWithTraits(
- TaskTraits(), &mock_pooled_task_runner_delegate_);
+ auto sequenced_task_runner = test::CreateSequencedTaskRunner(
+ TaskTraits(ThreadPool()), &mock_pooled_task_runner_delegate_);
WaitableEvent task_ran;
task_runner->PostTask(
@@ -318,7 +337,7 @@ TEST_P(ThreadGroupTest, SequencedRunsTasksInCurrentSequence) {
}
// Verify that tasks posted before Start run after Start.
-TEST_P(ThreadGroupTest, PostBeforeStart) {
+TEST_P(ThreadGroupTestAllExecutionModes, PostBeforeStart) {
WaitableEvent task_1_running;
WaitableEvent task_2_running;
@@ -345,11 +364,13 @@ TEST_P(ThreadGroupTest, PostBeforeStart) {
}
// Verify that tasks only run when allowed by the CanRunPolicy.
-TEST_P(ThreadGroupTest, CanRunPolicyBasic) {
+TEST_P(ThreadGroupTestAllExecutionModes, CanRunPolicyBasic) {
StartThreadGroup();
test::TestCanRunPolicyBasic(
thread_group_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return CreateTaskRunner({ThreadPool(), priority});
+ },
&task_tracker_);
}
@@ -357,22 +378,46 @@ TEST_P(ThreadGroupTest, CanRunPolicyUpdatedBeforeRun) {
StartThreadGroup();
// This test only works with SequencedTaskRunner become it assumes
// ordered execution of 2 posted tasks.
- if (GetParam().execution_mode != TaskSourceExecutionMode::kSequenced)
- return;
test::TestCanRunPolicyChangedBeforeRun(
thread_group_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return test::CreateSequencedTaskRunner(
+ {ThreadPool(), priority}, &mock_pooled_task_runner_delegate_);
+ },
&task_tracker_);
}
-TEST_P(ThreadGroupTest, CanRunPolicyLoad) {
+TEST_P(ThreadGroupTestAllExecutionModes, CanRunPolicyLoad) {
StartThreadGroup();
test::TestCanRunPolicyLoad(
thread_group_.get(),
- [this](TaskPriority priority) { return CreateTaskRunner({priority}); },
+ [this](TaskPriority priority) {
+ return CreateTaskRunner({ThreadPool(), priority});
+ },
&task_tracker_);
}
+// Verifies that ShouldYield() returns true for a priority that is not allowed
+// to run by the CanRunPolicy.
+TEST_P(ThreadGroupTest, CanRunPolicyShouldYield) {
+ StartThreadGroup();
+
+ task_tracker_.SetCanRunPolicy(CanRunPolicy::kNone);
+ thread_group_->DidUpdateCanRunPolicy();
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+
+ task_tracker_.SetCanRunPolicy(CanRunPolicy::kForegroundOnly);
+ thread_group_->DidUpdateCanRunPolicy();
+ EXPECT_TRUE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+
+ task_tracker_.SetCanRunPolicy(CanRunPolicy::kAll);
+ thread_group_->DidUpdateCanRunPolicy();
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+}
+
// Verify that the maximum number of BEST_EFFORT tasks that can run concurrently
// in a thread group does not affect Sequences with a priority that was
// increased from BEST_EFFORT to USER_BLOCKING.
@@ -389,7 +434,7 @@ TEST_P(ThreadGroupTest, UpdatePriorityBestEffortToUserBlocking) {
for (size_t i = 0; i < kMaxTasks; ++i) {
task_runners.push_back(MakeRefCounted<PooledSequencedTaskRunner>(
- TaskTraits(TaskPriority::BEST_EFFORT),
+ TaskTraits(ThreadPool(), TaskPriority::BEST_EFFORT),
&mock_pooled_task_runner_delegate_));
task_runners.back()->PostTask(
FROM_HERE, BindLambdaForTesting([&]() {
@@ -435,11 +480,11 @@ TEST_P(ThreadGroupTest, UpdatePriorityBestEffortToUserBlocking) {
}
// Regression test for crbug.com/955953.
-TEST_P(ThreadGroupTest, ScopedBlockingCallTwice) {
+TEST_P(ThreadGroupTestAllExecutionModes, ScopedBlockingCallTwice) {
StartThreadGroup();
auto task_runner = test::CreateTaskRunnerWithExecutionMode(
- GetParam().execution_mode, &mock_pooled_task_runner_delegate_,
- {MayBlock()});
+ execution_mode(), &mock_pooled_task_runner_delegate_,
+ {ThreadPool(), MayBlock()});
WaitableEvent task_ran;
task_runner->PostTask(FROM_HERE,
@@ -460,10 +505,10 @@ TEST_P(ThreadGroupTest, ScopedBlockingCallTwice) {
}
#if defined(OS_WIN)
-TEST_P(ThreadGroupTest, COMMTAWorkerEnvironment) {
+TEST_P(ThreadGroupTestAllExecutionModes, COMMTAWorkerEnvironment) {
StartThreadGroup(ThreadGroup::WorkerEnvironment::COM_MTA);
auto task_runner = test::CreateTaskRunnerWithExecutionMode(
- GetParam().execution_mode, &mock_pooled_task_runner_delegate_);
+ execution_mode(), &mock_pooled_task_runner_delegate_);
WaitableEvent task_ran;
task_runner->PostTask(
@@ -476,10 +521,10 @@ TEST_P(ThreadGroupTest, COMMTAWorkerEnvironment) {
task_ran.Wait();
}
-TEST_P(ThreadGroupTest, COMSTAWorkerEnvironment) {
+TEST_P(ThreadGroupTestAllExecutionModes, COMSTAWorkerEnvironment) {
StartThreadGroup(ThreadGroup::WorkerEnvironment::COM_STA);
auto task_runner = test::CreateTaskRunnerWithExecutionMode(
- GetParam().execution_mode, &mock_pooled_task_runner_delegate_);
+ execution_mode(), &mock_pooled_task_runner_delegate_);
WaitableEvent task_ran;
task_runner->PostTask(
@@ -498,10 +543,10 @@ TEST_P(ThreadGroupTest, COMSTAWorkerEnvironment) {
task_ran.Wait();
}
-TEST_P(ThreadGroupTest, NoWorkerEnvironment) {
+TEST_P(ThreadGroupTestAllExecutionModes, NoWorkerEnvironment) {
StartThreadGroup(ThreadGroup::WorkerEnvironment::NONE);
auto task_runner = test::CreateTaskRunnerWithExecutionMode(
- GetParam().execution_mode, &mock_pooled_task_runner_delegate_);
+ execution_mode(), &mock_pooled_task_runner_delegate_);
WaitableEvent task_ran;
task_runner->PostTask(
@@ -515,28 +560,162 @@ TEST_P(ThreadGroupTest, NoWorkerEnvironment) {
}
#endif
-INSTANTIATE_TEST_SUITE_P(GenericParallel,
- ThreadGroupTest,
- ::testing::Values(PoolExecutionType{
- test::PoolType::GENERIC,
- TaskSourceExecutionMode::kParallel}));
-INSTANTIATE_TEST_SUITE_P(GenericSequenced,
+// Verifies that ShouldYield() returns false when there is no pending task.
+TEST_P(ThreadGroupTest, ShouldYieldSingleTask) {
+ StartThreadGroup();
+
+ test::CreateTaskRunner(TaskPriority::USER_BLOCKING,
+ &mock_pooled_task_runner_delegate_)
+ ->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(thread_group_->ShouldYield(TaskPriority::BEST_EFFORT));
+ EXPECT_FALSE(
+ thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ EXPECT_FALSE(
+ thread_group_->ShouldYield(TaskPriority::USER_VISIBLE));
+ }));
+
+ task_tracker_.FlushForTesting();
+}
+
+// Verify that tasks from a JobTaskSource run at the intended concurrency.
+TEST_P(ThreadGroupTest, ScheduleJobTaskSource) {
+ StartThreadGroup();
+
+ WaitableEvent threads_running;
+ WaitableEvent threads_continue;
+
+ RepeatingClosure threads_running_barrier = BarrierClosure(
+ kMaxTasks,
+ BindOnce(&WaitableEvent::Signal, Unretained(&threads_running)));
+
+ auto task_source = MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE,
+ BindLambdaForTesting([&threads_running_barrier, &threads_continue]() {
+ threads_running_barrier.Run();
+ test::WaitWithoutBlockingObserver(&threads_continue);
+ }),
+ TaskTraits(), /* num_tasks_to_run */ kMaxTasks,
+ /* max_concurrency */ kMaxTasks);
+
+ auto registered_task_source =
+ task_tracker_.WillQueueTaskSource(std::move(task_source));
+ EXPECT_TRUE(registered_task_source);
+ thread_group_->PushTaskSourceAndWakeUpWorkers(
+ TransactionWithRegisteredTaskSource::FromTaskSource(
+ std::move(registered_task_source)));
+
+ threads_running.Wait();
+ threads_continue.Signal();
+
+ // Flush the task tracker to be sure that no local variables are accessed by
+ // tasks after the end of the scope.
+ task_tracker_.FlushForTesting();
+}
+
+// Verify that the maximum number of BEST_EFFORT tasks that can run concurrently
+// in a thread group does not affect JobTaskSource with a priority that was
+// increased from BEST_EFFORT to USER_BLOCKING.
+TEST_P(ThreadGroupTest, JobTaskSourceUpdatePriority) {
+ StartThreadGroup();
+
+ CheckedLock num_tasks_running_lock;
+ std::unique_ptr<ConditionVariable> num_tasks_running_cv =
+ num_tasks_running_lock.CreateConditionVariable();
+ size_t num_tasks_running = 0;
+
+ auto task_source = base::MakeRefCounted<test::MockJobTaskSource>(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ // Increment the number of tasks running.
+ {
+ CheckedAutoLock auto_lock(num_tasks_running_lock);
+ ++num_tasks_running;
+ }
+ num_tasks_running_cv->Broadcast();
+
+ // Wait until all posted tasks are running.
+ CheckedAutoLock auto_lock(num_tasks_running_lock);
+ while (num_tasks_running < kMaxTasks) {
+ ScopedClearBlockingObserverForTesting clear_blocking_observer;
+ ScopedAllowBaseSyncPrimitivesForTesting allow_base_sync_primitives;
+ num_tasks_running_cv->Wait();
+ }
+ }),
+ TaskTraits(TaskPriority::BEST_EFFORT), /* num_tasks_to_run */ kMaxTasks,
+ /* max_concurrency */ kMaxTasks);
+
+ auto registered_task_source = task_tracker_.WillQueueTaskSource(task_source);
+ EXPECT_TRUE(registered_task_source);
+ thread_group_->PushTaskSourceAndWakeUpWorkers(
+ TransactionWithRegisteredTaskSource::FromTaskSource(
+ std::move(registered_task_source)));
+
+ // Wait until |kMaxBestEffort| tasks start running.
+ {
+ CheckedAutoLock auto_lock(num_tasks_running_lock);
+ while (num_tasks_running < kMaxBestEffortTasks)
+ num_tasks_running_cv->Wait();
+ }
+
+ // Update the priority to USER_BLOCKING.
+ auto transaction = task_source->BeginTransaction();
+ transaction.UpdatePriority(TaskPriority::USER_BLOCKING);
+ thread_group_->UpdateSortKey(
+ {std::move(task_source), std::move(transaction)});
+
+ // Wait until all posted tasks start running. This should not block forever,
+ // even in a thread group that enforces a maximum number of concurrent
+ // BEST_EFFORT tasks lower than |kMaxTasks|.
+ static_assert(kMaxBestEffortTasks < kMaxTasks, "");
+ {
+ CheckedAutoLock auto_lock(num_tasks_running_lock);
+ while (num_tasks_running < kMaxTasks)
+ num_tasks_running_cv->Wait();
+ }
+
+ // Flush the task tracker to be sure that no local variables are accessed by
+ // tasks after the end of the scope.
+ task_tracker_.FlushForTesting();
+}
+
+INSTANTIATE_TEST_SUITE_P(Generic,
ThreadGroupTest,
- ::testing::Values(PoolExecutionType{
- test::PoolType::GENERIC,
- TaskSourceExecutionMode::kSequenced}));
+ ::testing::Values(test::PoolType::GENERIC));
+INSTANTIATE_TEST_SUITE_P(
+ GenericParallel,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::GENERIC),
+ ::testing::Values(TaskSourceExecutionMode::kParallel)));
+INSTANTIATE_TEST_SUITE_P(
+ GenericSequenced,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::GENERIC),
+ ::testing::Values(TaskSourceExecutionMode::kSequenced)));
+INSTANTIATE_TEST_SUITE_P(
+ GenericJob,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::GENERIC),
+ ::testing::Values(TaskSourceExecutionMode::kJob)));
#if HAS_NATIVE_THREAD_POOL()
-INSTANTIATE_TEST_SUITE_P(NativeParallel,
- ThreadGroupTest,
- ::testing::Values(PoolExecutionType{
- test::PoolType::NATIVE,
- TaskSourceExecutionMode::kParallel}));
-INSTANTIATE_TEST_SUITE_P(NativeSequenced,
+INSTANTIATE_TEST_SUITE_P(Native,
ThreadGroupTest,
- ::testing::Values(PoolExecutionType{
- test::PoolType::NATIVE,
- TaskSourceExecutionMode::kSequenced}));
+ ::testing::Values(test::PoolType::NATIVE));
+INSTANTIATE_TEST_SUITE_P(
+ NativeParallel,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::NATIVE),
+ ::testing::Values(TaskSourceExecutionMode::kParallel)));
+INSTANTIATE_TEST_SUITE_P(
+ NativeSequenced,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::NATIVE),
+ ::testing::Values(TaskSourceExecutionMode::kSequenced)));
+INSTANTIATE_TEST_SUITE_P(
+ NativeJob,
+ ThreadGroupTestAllExecutionModes,
+ ::testing::Combine(::testing::Values(test::PoolType::NATIVE),
+ ::testing::Values(TaskSourceExecutionMode::kJob)));
#endif
} // namespace internal
diff --git a/chromium/base/task/thread_pool/thread_pool.h b/chromium/base/task/thread_pool/thread_pool.h
index 6da0e954ad6..637f8c75386 100644
--- a/chromium/base/task/thread_pool/thread_pool.h
+++ b/chromium/base/task/thread_pool/thread_pool.h
@@ -59,7 +59,7 @@ class BASE_EXPORT ThreadPoolInstance {
// Place the pool's *foreground* workers in a COM STA. This exists to
// mimic the behavior of SequencedWorkerPool and BrowserThreadImpl that
// ThreadPool has replaced. Tasks that need a COM STA should use
- // CreateCOMSTATaskRunnerWithTraits() instead of
+ // CreateCOMSTATaskRunner() instead of
// Create(Sequenced)TaskRunnerWithTraits() + this init param.
DEPRECATED_COM_STA_IN_FOREGROUND_GROUP,
#endif // defined(OS_WIN)
diff --git a/chromium/base/task/thread_pool/thread_pool_clock.cc b/chromium/base/task/thread_pool/thread_pool_clock.cc
new file mode 100644
index 00000000000..ab54944900c
--- /dev/null
+++ b/chromium/base/task/thread_pool/thread_pool_clock.cc
@@ -0,0 +1,35 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/thread_pool/thread_pool_clock.h"
+
+#include "base/logging.h"
+#include "base/time/tick_clock.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+const TickClock* g_tick_clock = nullptr;
+}
+
+ThreadPoolClock::ThreadPoolClock(const TickClock* tick_clock) {
+ DCHECK(!g_tick_clock);
+ g_tick_clock = tick_clock;
+}
+
+ThreadPoolClock::~ThreadPoolClock() {
+ DCHECK(g_tick_clock);
+ g_tick_clock = nullptr;
+}
+
+// static
+TimeTicks ThreadPoolClock::Now() {
+ // Allow |g_tick_clock| to be null so simple thread_pool/ unit tests don't
+ // need to install one.
+ return g_tick_clock ? g_tick_clock->NowTicks() : TimeTicks::Now();
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task/thread_pool/thread_pool_clock.h b/chromium/base/task/thread_pool/thread_pool_clock.h
new file mode 100644
index 00000000000..452fca81a56
--- /dev/null
+++ b/chromium/base/task/thread_pool/thread_pool_clock.h
@@ -0,0 +1,40 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_THREAD_POOL_THREAD_POOL_CLOCK_H_
+#define BASE_TASK_THREAD_POOL_THREAD_POOL_CLOCK_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+
+namespace base {
+
+class TickClock;
+
+namespace internal {
+
+class BASE_EXPORT ThreadPoolClock {
+ public:
+ // |tick_clock| will service ThreadPoolClock::Now() in the scope of this
+ // ThreadPoolClock.
+ ThreadPoolClock(const TickClock* tick_clock);
+ ~ThreadPoolClock();
+
+ // Returns the current TimeTicks::Now(). All calls to TimeTicks::Now() in
+ // base/task/thread_pool should use this or
+ // subtle::TimeTicksNowIgnoringOverride() depending on whether they want to
+ // respect mock time (e.g. delayed tasks) or need real-time timeouts (e.g.
+ // recycling threads). TODO(gab): Make MOCK_TIME always mock TimeTicks::Now()
+ // and get rid of this indirection.
+ static TimeTicks Now();
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThreadPoolClock);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_THREAD_POOL_THREAD_POOL_CLOCK_H_
diff --git a/chromium/base/task/thread_pool/thread_pool_impl.cc b/chromium/base/task/thread_pool/thread_pool_impl.cc
index 79f0bf6073f..e1f51c0f33e 100644
--- a/chromium/base/task/thread_pool/thread_pool_impl.cc
+++ b/chromium/base/task/thread_pool/thread_pool_impl.cc
@@ -27,6 +27,7 @@
#include "base/task/thread_pool/task_source.h"
#include "base/task/thread_pool/thread_group_impl.h"
#include "base/threading/platform_thread.h"
+#include "base/time/default_tick_clock.h"
#include "base/time/time.h"
#if defined(OS_WIN)
@@ -52,8 +53,8 @@ constexpr int kMaxBestEffortTasks = 2;
// Indicates whether BEST_EFFORT tasks are disabled by a command line switch.
bool HasDisableBestEffortTasksSwitch() {
- // The CommandLine might not be initialized if TaskScheduler is initialized
- // in a dynamic library which doesn't have access to argc/argv.
+ // The CommandLine might not be initialized if ThreadPool is initialized in a
+ // dynamic library which doesn't have access to argc/argv.
return CommandLine::InitializedForCurrentProcess() &&
CommandLine::ForCurrentProcess()->HasSwitch(
switches::kDisableBestEffortTasks);
@@ -63,11 +64,14 @@ bool HasDisableBestEffortTasksSwitch() {
ThreadPoolImpl::ThreadPoolImpl(StringPiece histogram_label)
: ThreadPoolImpl(histogram_label,
- std::make_unique<TaskTrackerImpl>(histogram_label)) {}
+ std::make_unique<TaskTrackerImpl>(histogram_label),
+ DefaultTickClock::GetInstance()) {}
ThreadPoolImpl::ThreadPoolImpl(StringPiece histogram_label,
- std::unique_ptr<TaskTrackerImpl> task_tracker)
- : task_tracker_(std::move(task_tracker)),
+ std::unique_ptr<TaskTrackerImpl> task_tracker,
+ const TickClock* tick_clock)
+ : thread_pool_clock_(tick_clock),
+ task_tracker_(std::move(task_tracker)),
service_thread_(std::make_unique<ServiceThread>(
task_tracker_.get(),
BindRepeating(&ThreadPoolImpl::ReportHeartbeatMetrics,
@@ -176,6 +180,11 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params,
#endif
}
+ const base::TimeDelta suggested_reclaim_time =
+ FeatureList::IsEnabled(kUseFiveMinutesThreadReclaimTime)
+ ? base::TimeDelta::FromMinutes(5)
+ : init_params.suggested_reclaim_time;
+
#if HAS_NATIVE_THREAD_POOL()
if (FeatureList::IsEnabled(kUseNativeThreadPool)) {
static_cast<ThreadGroupNative*>(foreground_thread_group_.get())
@@ -190,15 +199,14 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params,
// of best-effort tasks.
static_cast<ThreadGroupImpl*>(foreground_thread_group_.get())
->Start(init_params.max_num_foreground_threads, max_best_effort_tasks,
- init_params.suggested_reclaim_time, service_thread_task_runner,
+ suggested_reclaim_time, service_thread_task_runner,
worker_thread_observer, worker_environment);
}
if (background_thread_group_) {
background_thread_group_->Start(
- max_best_effort_tasks, max_best_effort_tasks,
- init_params.suggested_reclaim_time, service_thread_task_runner,
- worker_thread_observer,
+ max_best_effort_tasks, max_best_effort_tasks, suggested_reclaim_time,
+ service_thread_task_runner, worker_thread_observer,
#if defined(OS_WIN)
// COM STA is a backward-compatibility feature for the foreground thread
// group only.
@@ -212,10 +220,10 @@ void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params,
started_ = true;
}
-bool ThreadPoolImpl::PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay) {
+bool ThreadPoolImpl::PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) {
// Post |task| as part of a one-off single-task Sequence.
const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
return PostTaskWithSequence(
@@ -224,44 +232,51 @@ bool ThreadPoolImpl::PostDelayedTaskWithTraits(const Location& from_here,
TaskSourceExecutionMode::kParallel));
}
-scoped_refptr<TaskRunner> ThreadPoolImpl::CreateTaskRunnerWithTraits(
+scoped_refptr<TaskRunner> ThreadPoolImpl::CreateTaskRunner(
const TaskTraits& traits) {
const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
return MakeRefCounted<PooledParallelTaskRunner>(new_traits, this);
}
-scoped_refptr<SequencedTaskRunner>
-ThreadPoolImpl::CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) {
+scoped_refptr<SequencedTaskRunner> ThreadPoolImpl::CreateSequencedTaskRunner(
+ const TaskTraits& traits) {
const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
return MakeRefCounted<PooledSequencedTaskRunner>(new_traits, this);
}
scoped_refptr<SingleThreadTaskRunner>
-ThreadPoolImpl::CreateSingleThreadTaskRunnerWithTraits(
+ThreadPoolImpl::CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- return single_thread_task_runner_manager_
- .CreateSingleThreadTaskRunnerWithTraits(
- SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+ return single_thread_task_runner_manager_.CreateSingleThreadTaskRunner(
+ SetUserBlockingPriorityIfNeeded(traits), thread_mode);
}
#if defined(OS_WIN)
-scoped_refptr<SingleThreadTaskRunner>
-ThreadPoolImpl::CreateCOMSTATaskRunnerWithTraits(
+scoped_refptr<SingleThreadTaskRunner> ThreadPoolImpl::CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- return single_thread_task_runner_manager_.CreateCOMSTATaskRunnerWithTraits(
+ return single_thread_task_runner_manager_.CreateCOMSTATaskRunner(
SetUserBlockingPriorityIfNeeded(traits), thread_mode);
}
#endif // defined(OS_WIN)
scoped_refptr<UpdateableSequencedTaskRunner>
-ThreadPoolImpl::CreateUpdateableSequencedTaskRunnerWithTraits(
- const TaskTraits& traits) {
+ThreadPoolImpl::CreateUpdateableSequencedTaskRunner(const TaskTraits& traits) {
const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
return MakeRefCounted<PooledSequencedTaskRunner>(new_traits, this);
}
+Optional<TimeTicks> ThreadPoolImpl::NextScheduledRunTimeForTesting() const {
+ if (task_tracker_->HasIncompleteTaskSourcesForTesting())
+ return ThreadPoolClock::Now();
+ return delayed_task_manager_.NextScheduledRunTime();
+}
+
+void ThreadPoolImpl::ProcessRipeDelayedTasksForTesting() {
+ delayed_task_manager_.ProcessRipeTasks();
+}
+
int ThreadPoolImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
const TaskTraits& traits) const {
// This method does not support getting the maximum number of BEST_EFFORT
@@ -274,6 +289,14 @@ int ThreadPoolImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
void ThreadPoolImpl::Shutdown() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ // Stop() the ServiceThread before triggering shutdown. This ensures that no
+ // more delayed tasks or file descriptor watches will trigger during shutdown
+ // (preventing http://crbug.com/698140). None of these asynchronous tasks
+ // being guaranteed to happen anyways, stopping right away is valid behavior
+ // and avoids the more complex alternative of shutting down the service thread
+ // atomically during TaskTracker shutdown.
+ service_thread_->Stop();
+
task_tracker_->StartShutdown();
// Allow all tasks to run. Done after initiating shutdown to ensure that non-
@@ -335,6 +358,7 @@ bool ThreadPoolImpl::PostTaskWithSequenceNow(Task task,
if (!task_source)
return false;
}
+ task_tracker_->WillPostTaskNow(task, transaction.traits().priority());
transaction.PushTask(std::move(task));
if (task_source) {
const TaskTraits traits = transaction.traits();
@@ -375,6 +399,19 @@ bool ThreadPoolImpl::PostTaskWithSequence(Task task,
return true;
}
+bool ThreadPoolImpl::EnqueueJobTaskSource(
+ scoped_refptr<JobTaskSource> task_source) {
+ auto registered_task_source =
+ task_tracker_->WillQueueTaskSource(std::move(task_source));
+ if (registered_task_source)
+ return false;
+ auto transaction = task_source->BeginTransaction();
+ const TaskTraits traits = transaction.traits();
+ GetThreadGroupForTraits(traits)->PushTaskSourceAndWakeUpWorkers(
+ {std::move(registered_task_source), std::move(transaction)});
+ return true;
+}
+
bool ThreadPoolImpl::IsRunningPoolWithTraits(const TaskTraits& traits) const {
return GetThreadGroupForTraits(traits)->IsBoundToCurrentThread();
}
@@ -436,13 +473,14 @@ void ThreadPoolImpl::UpdateCanRunPolicy() {
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
CanRunPolicy can_run_policy;
- if ((!has_fence_ && !has_best_effort_fence_) ||
+ if ((!has_fence_ && !has_best_effort_fence_ &&
+ !has_disable_best_effort_switch_) ||
task_tracker_->HasShutdownStarted()) {
can_run_policy = CanRunPolicy::kAll;
} else if (has_fence_) {
can_run_policy = CanRunPolicy::kNone;
} else {
- DCHECK(has_best_effort_fence_);
+ DCHECK(has_best_effort_fence_ || has_disable_best_effort_switch_);
can_run_policy = CanRunPolicy::kForegroundOnly;
}
diff --git a/chromium/base/task/thread_pool/thread_pool_impl.h b/chromium/base/task/thread_pool/thread_pool_impl.h
index 5d5e7b1b46a..7c43c86c7c3 100644
--- a/chromium/base/task/thread_pool/thread_pool_impl.h
+++ b/chromium/base/task/thread_pool/thread_pool_impl.h
@@ -14,6 +14,7 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
+#include "base/optional.h"
#include "base/sequence_checker.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/atomic_flag.h"
@@ -29,6 +30,7 @@
#include "base/task/thread_pool/thread_group.h"
#include "base/task/thread_pool/thread_group_impl.h"
#include "base/task/thread_pool/thread_pool.h"
+#include "base/task/thread_pool/thread_pool_clock.h"
#include "base/updateable_sequenced_task_runner.h"
#include "build/build_config.h"
@@ -63,9 +65,11 @@ class BASE_EXPORT ThreadPoolImpl : public ThreadPoolInstance,
//|histogram_label| is used to label histograms, it must not be empty.
explicit ThreadPoolImpl(StringPiece histogram_label);
- // For testing only. Creates a ThreadPoolImpl with a custom TaskTracker.
+ // For testing only. Creates a ThreadPoolImpl with a custom TaskTracker and
+ // TickClock.
ThreadPoolImpl(StringPiece histogram_label,
- std::unique_ptr<TaskTrackerImpl> task_tracker);
+ std::unique_ptr<TaskTrackerImpl> task_tracker,
+ const TickClock* tick_clock);
~ThreadPoolImpl() override;
@@ -82,24 +86,33 @@ class BASE_EXPORT ThreadPoolImpl : public ThreadPoolInstance,
void SetHasBestEffortFence(bool has_best_effort_fence) override;
// TaskExecutor:
- bool PostDelayedTaskWithTraits(const Location& from_here,
- const TaskTraits& traits,
- OnceClosure task,
- TimeDelta delay) override;
- scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ bool PostDelayedTask(const Location& from_here,
+ const TaskTraits& traits,
+ OnceClosure task,
+ TimeDelta delay) override;
+ scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits) override;
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits) override;
- scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
- const TaskTraits& traits) override;
- scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) override;
#if defined(OS_WIN)
- scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
+ scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) override;
#endif // defined(OS_WIN)
scoped_refptr<UpdateableSequencedTaskRunner>
- CreateUpdateableSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+ CreateUpdateableSequencedTaskRunner(const TaskTraits& traits);
+
+ // Returns the TimeTicks of the next task scheduled on ThreadPool (Now() if
+ // immediate, nullopt if none). This is thread-safe, i.e., it's safe if tasks
+ // are being posted in parallel with this call but such a situation obviously
+ // results in a race as to whether this call will see the new tasks in time.
+ Optional<TimeTicks> NextScheduledRunTimeForTesting() const;
+
+ // Forces ripe delayed tasks to be posted (e.g. when time is mocked and
+ // advances faster than the real-time delay on ServiceThread).
+ void ProcessRipeDelayedTasksForTesting();
private:
// Invoked after |has_fence_| or |has_best_effort_fence_| is updated. Sets the
@@ -125,10 +138,15 @@ class BASE_EXPORT ThreadPoolImpl : public ThreadPoolInstance,
// PooledTaskRunnerDelegate:
bool PostTaskWithSequence(Task task,
scoped_refptr<Sequence> sequence) override;
+ bool EnqueueJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
bool IsRunningPoolWithTraits(const TaskTraits& traits) const override;
void UpdatePriority(scoped_refptr<TaskSource> task_source,
TaskPriority priority) override;
+ // The clock instance used by all classes in base/task/thread_pool. Must
+ // outlive everything else to ensure no discrepancy in Now().
+ ThreadPoolClock thread_pool_clock_;
+
const std::unique_ptr<TaskTrackerImpl> task_tracker_;
std::unique_ptr<Thread> service_thread_;
DelayedTaskManager delayed_task_manager_;
diff --git a/chromium/base/task/thread_pool/thread_pool_impl_unittest.cc b/chromium/base/task/thread_pool/thread_pool_impl_unittest.cc
index d37b2c6623c..7df882c4bb4 100644
--- a/chromium/base/task/thread_pool/thread_pool_impl_unittest.cc
+++ b/chromium/base/task/thread_pool/thread_pool_impl_unittest.cc
@@ -161,7 +161,7 @@ void VerifyOrderAndTaskEnvironmentAndSignalEvent(
event->Signal();
}
-scoped_refptr<TaskRunner> CreateTaskRunnerWithTraitsAndExecutionMode(
+scoped_refptr<TaskRunner> CreateTaskRunnerAndExecutionMode(
ThreadPoolImpl* thread_pool,
const TaskTraits& traits,
TaskSourceExecutionMode execution_mode,
@@ -169,13 +169,15 @@ scoped_refptr<TaskRunner> CreateTaskRunnerWithTraitsAndExecutionMode(
SingleThreadTaskRunnerThreadMode::SHARED) {
switch (execution_mode) {
case TaskSourceExecutionMode::kParallel:
- return thread_pool->CreateTaskRunnerWithTraits(traits);
+ return thread_pool->CreateTaskRunner(traits);
case TaskSourceExecutionMode::kSequenced:
- return thread_pool->CreateSequencedTaskRunnerWithTraits(traits);
+ return thread_pool->CreateSequencedTaskRunner(traits);
case TaskSourceExecutionMode::kSingleThread: {
- return thread_pool->CreateSingleThreadTaskRunnerWithTraits(
+ return thread_pool->CreateSingleThreadTaskRunner(
traits, default_single_thread_task_runner_mode);
}
+ case TaskSourceExecutionMode::kJob:
+ break;
}
ADD_FAILURE() << "Unknown ExecutionMode";
return nullptr;
@@ -192,9 +194,9 @@ class ThreadPostingTasks : public SimpleThread {
: SimpleThread("ThreadPostingTasks"),
traits_(traits),
pool_type_(pool_type),
- factory_(CreateTaskRunnerWithTraitsAndExecutionMode(thread_pool,
- traits,
- execution_mode),
+ factory_(CreateTaskRunnerAndExecutionMode(thread_pool,
+ traits,
+ execution_mode),
execution_mode) {}
void WaitForAllTasksToRun() { factory_.WaitForAllTasksToRun(); }
@@ -234,10 +236,11 @@ std::vector<TraitsExecutionModePair> GetTraitsExecutionModePair() {
priority_index <= static_cast<size_t>(TaskPriority::HIGHEST);
++priority_index) {
const TaskPriority priority = static_cast<TaskPriority>(priority_index);
- params.push_back(
- TraitsExecutionModePair({priority, thread_policy}, execution_mode));
params.push_back(TraitsExecutionModePair(
- {priority, thread_policy, MayBlock()}, execution_mode));
+ {ThreadPool(), priority, thread_policy}, execution_mode));
+ params.push_back(TraitsExecutionModePair(
+ {ThreadPool(), priority, thread_policy, MayBlock()},
+ execution_mode));
}
}
}
@@ -247,7 +250,8 @@ std::vector<TraitsExecutionModePair> GetTraitsExecutionModePair() {
class ThreadPoolImplTestBase : public testing::Test {
public:
- ThreadPoolImplTestBase() : thread_pool_("Test") {}
+ ThreadPoolImplTestBase()
+ : thread_pool_(std::make_unique<ThreadPoolImpl>("Test")) {}
void EnableAllTasksUserBlocking() {
should_enable_all_tasks_user_blocking_ = true;
@@ -266,21 +270,23 @@ class ThreadPoolImplTestBase : public testing::Test {
ThreadPoolInstance::InitParams init_params(max_num_foreground_threads);
init_params.suggested_reclaim_time = reclaim_time;
- thread_pool_.Start(init_params, worker_thread_observer_);
+ thread_pool_->Start(init_params, worker_thread_observer_);
}
void TearDown() override {
if (did_tear_down_)
return;
- thread_pool_.FlushForTesting();
- thread_pool_.JoinForTesting();
+ if (thread_pool_) {
+ thread_pool_->FlushForTesting();
+ thread_pool_->JoinForTesting();
+ }
did_tear_down_ = true;
}
virtual test::PoolType GetPoolType() const = 0;
- ThreadPoolImpl thread_pool_;
+ std::unique_ptr<ThreadPoolImpl> thread_pool_;
private:
void SetupFeatures() {
@@ -338,14 +344,13 @@ class ThreadPoolImplTestAllTraitsExecutionModes
} // namespace
-// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
-// TaskTraits and no delay runs on a thread with the expected priority and I/O
+// Verifies that a Task posted via PostDelayedTask with parameterized TaskTraits
+// and no delay runs on a thread with the expected priority and I/O
// restrictions. The ExecutionMode parameter is ignored by this test.
-TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
- PostDelayedTaskWithTraitsNoDelay) {
+TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, PostDelayedTaskNoDelay) {
StartThreadPool();
WaitableEvent task_ran;
- thread_pool_.PostDelayedTaskWithTraits(
+ thread_pool_->PostDelayedTask(
FROM_HERE, GetTraits(),
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetTraits(), GetPoolType(),
Unretained(&task_ran)),
@@ -353,15 +358,14 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
task_ran.Wait();
}
-// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
+// Verifies that a Task posted via PostDelayedTask with parameterized
// TaskTraits and a non-zero delay runs on a thread with the expected priority
// and I/O restrictions after the delay expires. The ExecutionMode parameter is
// ignored by this test.
-TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
- PostDelayedTaskWithTraitsWithDelay) {
+TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, PostDelayedTaskWithDelay) {
StartThreadPool();
WaitableEvent task_ran;
- thread_pool_.PostDelayedTaskWithTraits(
+ thread_pool_->PostDelayedTask(
FROM_HERE, GetTraits(),
BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetTraits(),
GetPoolType(), TimeTicks::Now() + TestTimeouts::tiny_timeout(),
@@ -376,8 +380,8 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, PostTasksViaTaskRunner) {
StartThreadPool();
test::TestTaskFactory factory(
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode()),
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode()),
GetExecutionMode());
EXPECT_FALSE(factory.task_runner()->RunsTasksInCurrentSequence());
@@ -391,12 +395,12 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, PostTasksViaTaskRunner) {
factory.WaitForAllTasksToRun();
}
-// Verifies that a task posted via PostDelayedTaskWithTraits without a delay
-// doesn't run before Start() is called.
+// Verifies that a task posted via PostDelayedTask without a delay doesn't run
+// before Start() is called.
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
- PostDelayedTaskWithTraitsNoDelayBeforeStart) {
+ PostDelayedTaskNoDelayBeforeStart) {
WaitableEvent task_running;
- thread_pool_.PostDelayedTaskWithTraits(
+ thread_pool_->PostDelayedTask(
FROM_HERE, GetTraits(),
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetTraits(), GetPoolType(),
Unretained(&task_running)),
@@ -413,12 +417,12 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
task_running.Wait();
}
-// Verifies that a task posted via PostDelayedTaskWithTraits with a delay
-// doesn't run before Start() is called.
+// Verifies that a task posted via PostDelayedTask with a delay doesn't run
+// before Start() is called.
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
- PostDelayedTaskWithTraitsWithDelayBeforeStart) {
+ PostDelayedTaskWithDelayBeforeStart) {
WaitableEvent task_running;
- thread_pool_.PostDelayedTaskWithTraits(
+ thread_pool_->PostDelayedTask(
FROM_HERE, GetTraits(),
BindOnce(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetTraits(),
GetPoolType(), TimeTicks::Now() + TestTimeouts::tiny_timeout(),
@@ -441,8 +445,8 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
PostTaskViaTaskRunnerBeforeStart) {
WaitableEvent task_running;
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode())
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode())
->PostTask(FROM_HERE,
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, GetTraits(),
GetPoolType(), Unretained(&task_running)));
@@ -460,6 +464,21 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
task_running.Wait();
}
+// Verify that posting tasks after the thread pool was destroyed fails but
+// doesn't crash.
+TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, PostTaskAfterDestroy) {
+ StartThreadPool();
+
+ auto task_runner = CreateTaskRunnerAndExecutionMode(
+ thread_pool_.get(), GetTraits(), GetExecutionMode());
+ EXPECT_TRUE(task_runner->PostTask(FROM_HERE, DoNothing()));
+ thread_pool_->JoinForTesting();
+ thread_pool_.reset();
+
+ EXPECT_FALSE(
+ task_runner->PostTask(FROM_HERE, MakeExpectedNotRunClosure(FROM_HERE)));
+}
+
// Verify that all tasks posted to a TaskRunner after Start() run in a
// USER_BLOCKING environment when the AllTasksUserBlocking variation param of
// the BrowserScheduler experiment is true.
@@ -472,17 +491,17 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes,
StartThreadPool();
WaitableEvent task_running;
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode())
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode())
->PostTask(FROM_HERE, BindOnce(&VerifyTaskEnvironmentAndSignalEvent,
user_blocking_traits, GetPoolType(),
Unretained(&task_running)));
task_running.Wait();
}
-// Verify that all tasks posted via PostDelayedTaskWithTraits() after Start()
-// run in a USER_BLOCKING environment when the AllTasksUserBlocking variation
-// param of the BrowserScheduler experiment is true.
+// Verify that all tasks posted via PostDelayedTask() after Start() run in a
+// USER_BLOCKING environment when the AllTasksUserBlocking variation param of
+// the BrowserScheduler experiment is true.
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, AllTasksAreUserBlocking) {
TaskTraits user_blocking_traits = GetTraits();
user_blocking_traits.UpdatePriority(TaskPriority::USER_BLOCKING);
@@ -492,7 +511,7 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, AllTasksAreUserBlocking) {
WaitableEvent task_running;
// Ignore |params.execution_mode| in this test.
- thread_pool_.PostDelayedTaskWithTraits(
+ thread_pool_->PostDelayedTask(
FROM_HERE, GetTraits(),
BindOnce(&VerifyTaskEnvironmentAndSignalEvent, user_blocking_traits,
GetPoolType(), Unretained(&task_running)),
@@ -506,14 +525,14 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, FlushAsyncForTestingSimple) {
StartThreadPool();
WaitableEvent unblock_task;
- CreateTaskRunnerWithTraitsAndExecutionMode(
- &thread_pool_, GetTraits(), GetExecutionMode(),
- SingleThreadTaskRunnerThreadMode::DEDICATED)
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode(),
+ SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(FROM_HERE, BindOnce(&test::WaitWithoutBlockingObserver,
Unretained(&unblock_task)));
WaitableEvent flush_event;
- thread_pool_.FlushAsyncForTesting(
+ thread_pool_->FlushAsyncForTesting(
BindOnce(&WaitableEvent::Signal, Unretained(&flush_event)));
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
EXPECT_FALSE(flush_event.IsSignaled());
@@ -529,10 +548,10 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasFence) {
AtomicFlag can_run;
WaitableEvent did_run;
- thread_pool_.SetHasFence(true);
+ thread_pool_->SetHasFence(true);
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode())
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode())
->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
EXPECT_TRUE(can_run.IsSet());
did_run.Signal();
@@ -541,20 +560,20 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasFence) {
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
can_run.Set();
- thread_pool_.SetHasFence(false);
+ thread_pool_->SetHasFence(false);
did_run.Wait();
}
// Verifies that a call to SetHasFence(true) before Start() is honored.
TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasFenceBeforeStart) {
- thread_pool_.SetHasFence(true);
+ thread_pool_->SetHasFence(true);
StartThreadPool();
AtomicFlag can_run;
WaitableEvent did_run;
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode())
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode())
->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
EXPECT_TRUE(can_run.IsSet());
did_run.Signal();
@@ -563,7 +582,7 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasFenceBeforeStart) {
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
can_run.Set();
- thread_pool_.SetHasFence(false);
+ thread_pool_->SetHasFence(false);
did_run.Wait();
}
@@ -574,10 +593,10 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasBestEffortFence) {
AtomicFlag can_run;
WaitableEvent did_run;
- thread_pool_.SetHasBestEffortFence(true);
+ thread_pool_->SetHasBestEffortFence(true);
- CreateTaskRunnerWithTraitsAndExecutionMode(&thread_pool_, GetTraits(),
- GetExecutionMode())
+ CreateTaskRunnerAndExecutionMode(thread_pool_.get(), GetTraits(),
+ GetExecutionMode())
->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
if (GetTraits().priority() == TaskPriority::BEST_EFFORT)
EXPECT_TRUE(can_run.IsSet());
@@ -587,7 +606,7 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, SetHasBestEffortFence) {
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
can_run.Set();
- thread_pool_.SetHasBestEffortFence(false);
+ thread_pool_->SetHasBestEffortFence(false);
did_run.Wait();
}
@@ -600,7 +619,7 @@ TEST_P(ThreadPoolImplTest, MultipleTraitsExecutionModePair) {
std::vector<std::unique_ptr<ThreadPostingTasks>> threads_posting_tasks;
for (const auto& test_params : GetTraitsExecutionModePair()) {
threads_posting_tasks.push_back(std::make_unique<ThreadPostingTasks>(
- &thread_pool_, test_params.traits, GetPoolType(),
+ thread_pool_.get(), test_params.traits, GetPoolType(),
test_params.execution_mode));
threads_posting_tasks.back()->Start();
}
@@ -624,33 +643,36 @@ TEST_P(ThreadPoolImplTest,
// TaskPriority::BEST_EFFORT.
testing::GTEST_FLAG(death_test_style) = "threadsafe";
EXPECT_DCHECK_DEATH({
- thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {TaskPriority::BEST_EFFORT});
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), TaskPriority::BEST_EFFORT});
});
EXPECT_DCHECK_DEATH({
- thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {MayBlock(), TaskPriority::BEST_EFFORT});
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), MayBlock(), TaskPriority::BEST_EFFORT});
});
- EXPECT_EQ(4, thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {TaskPriority::USER_VISIBLE}));
- EXPECT_EQ(4, thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {MayBlock(), TaskPriority::USER_VISIBLE}));
- EXPECT_EQ(4, thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {TaskPriority::USER_BLOCKING}));
- EXPECT_EQ(4, thread_pool_.GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
- {MayBlock(), TaskPriority::USER_BLOCKING}));
+ EXPECT_EQ(4,
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), TaskPriority::USER_VISIBLE}));
+ EXPECT_EQ(4,
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), MayBlock(), TaskPriority::USER_VISIBLE}));
+ EXPECT_EQ(4,
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), TaskPriority::USER_BLOCKING}));
+ EXPECT_EQ(4,
+ thread_pool_->GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
+ {ThreadPool(), MayBlock(), TaskPriority::USER_BLOCKING}));
}
// Verify that the RunsTasksInCurrentSequence() method of a SequencedTaskRunner
// returns false when called from a task that isn't part of the sequence.
TEST_P(ThreadPoolImplTest, SequencedRunsTasksInCurrentSequence) {
StartThreadPool();
- auto single_thread_task_runner =
- thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+ auto single_thread_task_runner = thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool()}, SingleThreadTaskRunnerThreadMode::SHARED);
auto sequenced_task_runner =
- thread_pool_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ thread_pool_->CreateSequencedTaskRunner({ThreadPool()});
WaitableEvent task_ran;
single_thread_task_runner->PostTask(
@@ -671,10 +693,9 @@ TEST_P(ThreadPoolImplTest, SequencedRunsTasksInCurrentSequence) {
TEST_P(ThreadPoolImplTest, SingleThreadRunsTasksInCurrentSequence) {
StartThreadPool();
auto sequenced_task_runner =
- thread_pool_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
- auto single_thread_task_runner =
- thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+ thread_pool_->CreateSequencedTaskRunner({ThreadPool()});
+ auto single_thread_task_runner = thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool()}, SingleThreadTaskRunnerThreadMode::SHARED);
WaitableEvent task_ran;
sequenced_task_runner->PostTask(
@@ -693,8 +714,8 @@ TEST_P(ThreadPoolImplTest, SingleThreadRunsTasksInCurrentSequence) {
#if defined(OS_WIN)
TEST_P(ThreadPoolImplTest, COMSTATaskRunnersRunWithCOMSTA) {
StartThreadPool();
- auto com_sta_task_runner = thread_pool_.CreateCOMSTATaskRunnerWithTraits(
- TaskTraits(), SingleThreadTaskRunnerThreadMode::SHARED);
+ auto com_sta_task_runner = thread_pool_->CreateCOMSTATaskRunner(
+ {ThreadPool()}, SingleThreadTaskRunnerThreadMode::SHARED);
WaitableEvent task_ran;
com_sta_task_runner->PostTask(
@@ -722,10 +743,10 @@ TEST_P(ThreadPoolImplTest, DelayedTasksNotRunAfterShutdown) {
// and signalling the WaitableEvent after Shutdown() on a different thread
// since Shutdown() will block. However, the cost of managing this extra
// thread was deemed to be too great for the unlikely race.
- thread_pool_.PostDelayedTaskWithTraits(FROM_HERE, TaskTraits(),
- BindOnce([]() { ADD_FAILURE(); }),
- TestTimeouts::tiny_timeout());
- thread_pool_.Shutdown();
+ thread_pool_->PostDelayedTask(FROM_HERE, {ThreadPool()},
+ BindOnce([]() { ADD_FAILURE(); }),
+ TestTimeouts::tiny_timeout());
+ thread_pool_->Shutdown();
PlatformThread::Sleep(TestTimeouts::tiny_timeout() * 2);
}
@@ -738,8 +759,8 @@ TEST_P(ThreadPoolImplTest, FileDescriptorWatcherNoOpsAfterShutdown) {
ASSERT_EQ(0, pipe(pipes));
scoped_refptr<TaskRunner> blocking_task_runner =
- thread_pool_.CreateSequencedTaskRunnerWithTraits(
- {TaskShutdownBehavior::BLOCK_SHUTDOWN});
+ thread_pool_->CreateSequencedTaskRunner(
+ {ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN});
blocking_task_runner->PostTask(
FROM_HERE,
BindOnce(
@@ -763,7 +784,7 @@ TEST_P(ThreadPoolImplTest, FileDescriptorWatcherNoOpsAfterShutdown) {
},
pipes[0]));
- thread_pool_.Shutdown();
+ thread_pool_->Shutdown();
constexpr char kByte = '!';
ASSERT_TRUE(WriteFileDescriptor(pipes[1], &kByte, sizeof(kByte)));
@@ -783,9 +804,9 @@ TEST_P(ThreadPoolImplTest, SequenceLocalStorage) {
SequenceLocalStorageSlot<int> slot;
auto sequenced_task_runner1 =
- thread_pool_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ thread_pool_->CreateSequencedTaskRunner({ThreadPool()});
auto sequenced_task_runner2 =
- thread_pool_.CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ thread_pool_->CreateSequencedTaskRunner({ThreadPool()});
sequenced_task_runner1->PostTask(
FROM_HERE,
@@ -806,13 +827,13 @@ TEST_P(ThreadPoolImplTest, SequenceLocalStorage) {
},
&slot));
- thread_pool_.FlushForTesting();
+ thread_pool_->FlushForTesting();
}
TEST_P(ThreadPoolImplTest, FlushAsyncNoTasks) {
StartThreadPool();
bool called_back = false;
- thread_pool_.FlushAsyncForTesting(
+ thread_pool_->FlushAsyncForTesting(
BindOnce([](bool* called_back) { *called_back = true; },
Unretained(&called_back)));
EXPECT_TRUE(called_back);
@@ -863,39 +884,39 @@ TEST_P(ThreadPoolImplTest, MAYBE_IdentifiableStacks) {
{TaskShutdownBehavior::BLOCK_SHUTDOWN, "RunBlockShutdown"}};
for (const auto& shutdown_behavior : shutdown_behaviors) {
- const TaskTraits traits = {shutdown_behavior.first};
- const TaskTraits best_effort_traits = {shutdown_behavior.first,
- TaskPriority::BEST_EFFORT};
+ const TaskTraits traits = {ThreadPool(), shutdown_behavior.first};
+ const TaskTraits best_effort_traits = {
+ ThreadPool(), shutdown_behavior.first, TaskPriority::BEST_EFFORT};
- thread_pool_.CreateSequencedTaskRunnerWithTraits(traits)->PostTask(
+ thread_pool_->CreateSequencedTaskRunner(traits)->PostTask(
FROM_HERE, BindOnce(&VerifyHasStringsOnStack, "RunPooledWorker",
shutdown_behavior.second));
- thread_pool_.CreateSequencedTaskRunnerWithTraits(best_effort_traits)
+ thread_pool_->CreateSequencedTaskRunner(best_effort_traits)
->PostTask(FROM_HERE, BindOnce(&VerifyHasStringsOnStack,
"RunBackgroundPooledWorker",
shutdown_behavior.second));
thread_pool_
- .CreateSingleThreadTaskRunnerWithTraits(
- traits, SingleThreadTaskRunnerThreadMode::SHARED)
+ ->CreateSingleThreadTaskRunner(traits,
+ SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE,
BindOnce(&VerifyHasStringsOnStack, "RunSharedWorker",
shutdown_behavior.second));
thread_pool_
- .CreateSingleThreadTaskRunnerWithTraits(
- best_effort_traits, SingleThreadTaskRunnerThreadMode::SHARED)
+ ->CreateSingleThreadTaskRunner(best_effort_traits,
+ SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE, BindOnce(&VerifyHasStringsOnStack,
"RunBackgroundSharedWorker",
shutdown_behavior.second));
thread_pool_
- .CreateSingleThreadTaskRunnerWithTraits(
+ ->CreateSingleThreadTaskRunner(
traits, SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(FROM_HERE,
BindOnce(&VerifyHasStringsOnStack, "RunDedicatedWorker",
shutdown_behavior.second));
thread_pool_
- .CreateSingleThreadTaskRunnerWithTraits(
+ ->CreateSingleThreadTaskRunner(
best_effort_traits, SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(FROM_HERE, BindOnce(&VerifyHasStringsOnStack,
"RunBackgroundDedicatedWorker",
@@ -903,34 +924,34 @@ TEST_P(ThreadPoolImplTest, MAYBE_IdentifiableStacks) {
#if defined(OS_WIN)
thread_pool_
- .CreateCOMSTATaskRunnerWithTraits(
- traits, SingleThreadTaskRunnerThreadMode::SHARED)
+ ->CreateCOMSTATaskRunner(traits,
+ SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE,
BindOnce(&VerifyHasStringsOnStack, "RunSharedCOMWorker",
shutdown_behavior.second));
thread_pool_
- .CreateCOMSTATaskRunnerWithTraits(
- best_effort_traits, SingleThreadTaskRunnerThreadMode::SHARED)
+ ->CreateCOMSTATaskRunner(best_effort_traits,
+ SingleThreadTaskRunnerThreadMode::SHARED)
->PostTask(FROM_HERE, BindOnce(&VerifyHasStringsOnStack,
"RunBackgroundSharedCOMWorker",
shutdown_behavior.second));
thread_pool_
- .CreateCOMSTATaskRunnerWithTraits(
- traits, SingleThreadTaskRunnerThreadMode::DEDICATED)
+ ->CreateCOMSTATaskRunner(traits,
+ SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(FROM_HERE,
BindOnce(&VerifyHasStringsOnStack, "RunDedicatedCOMWorker",
shutdown_behavior.second));
thread_pool_
- .CreateCOMSTATaskRunnerWithTraits(
- best_effort_traits, SingleThreadTaskRunnerThreadMode::DEDICATED)
+ ->CreateCOMSTATaskRunner(best_effort_traits,
+ SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(FROM_HERE, BindOnce(&VerifyHasStringsOnStack,
"RunBackgroundDedicatedCOMWorker",
shutdown_behavior.second));
#endif // defined(OS_WIN)
}
- thread_pool_.FlushForTesting();
+ thread_pool_->FlushForTesting();
}
TEST_P(ThreadPoolImplTest, WorkerThreadObserver) {
@@ -981,52 +1002,54 @@ TEST_P(ThreadPoolImplTest, WorkerThreadObserver) {
std::vector<scoped_refptr<SingleThreadTaskRunner>> task_runners;
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT}, SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT, MayBlock()},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT},
SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::USER_BLOCKING}, SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::USER_BLOCKING, MayBlock()},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
+ SingleThreadTaskRunnerThreadMode::SHARED));
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::USER_BLOCKING},
+ SingleThreadTaskRunnerThreadMode::SHARED));
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::USER_BLOCKING, MayBlock()},
SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT, MayBlock()},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT, MayBlock()},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::USER_BLOCKING},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::USER_BLOCKING},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateSingleThreadTaskRunnerWithTraits(
- {TaskPriority::USER_BLOCKING, MayBlock()},
+ task_runners.push_back(thread_pool_->CreateSingleThreadTaskRunner(
+ {ThreadPool(), TaskPriority::USER_BLOCKING, MayBlock()},
SingleThreadTaskRunnerThreadMode::DEDICATED));
#if defined(OS_WIN)
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::BEST_EFFORT}, SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::BEST_EFFORT, MayBlock()},
SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::USER_BLOCKING}, SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::USER_BLOCKING, MayBlock()},
SingleThreadTaskRunnerThreadMode::SHARED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::BEST_EFFORT},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::BEST_EFFORT, MayBlock()},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::USER_BLOCKING},
SingleThreadTaskRunnerThreadMode::DEDICATED));
- task_runners.push_back(thread_pool_.CreateCOMSTATaskRunnerWithTraits(
+ task_runners.push_back(thread_pool_->CreateCOMSTATaskRunner(
{TaskPriority::USER_BLOCKING, MayBlock()},
SingleThreadTaskRunnerThreadMode::DEDICATED));
#endif
@@ -1074,8 +1097,8 @@ TEST_P(ThreadPoolImplTestAllTraitsExecutionModes, NoLeakWhenPostingNestedTask) {
bool was_destroyed = false;
auto must_be_destroyed = std::make_unique<MustBeDestroyed>(&was_destroyed);
- auto task_runner = CreateTaskRunnerWithTraitsAndExecutionMode(
- &thread_pool_, GetTraits(), GetExecutionMode());
+ auto task_runner = CreateTaskRunnerAndExecutionMode(
+ thread_pool_.get(), GetTraits(), GetExecutionMode());
task_runner->PostTask(FROM_HERE, BindLambdaForTesting([&] {
sls.emplace(std::move(must_be_destroyed));
@@ -1132,16 +1155,16 @@ std::vector<std::unique_ptr<TaskRunnerAndEvents>> CreateTaskRunnersAndEvents(
// Task runner that will start as USER_VISIBLE and update to USER_BLOCKING.
// Its task is expected to run first.
task_runners_and_events.push_back(std::make_unique<TaskRunnerAndEvents>(
- thread_pool->CreateUpdateableSequencedTaskRunnerWithTraits(
- TaskTraits({TaskPriority::USER_VISIBLE, thread_policy})),
+ thread_pool->CreateUpdateableSequencedTaskRunner(TaskTraits(
+ {ThreadPool(), TaskPriority::USER_VISIBLE, thread_policy})),
TaskPriority::USER_BLOCKING, nullptr));
// -----
// Task runner that will start as BEST_EFFORT and update to USER_VISIBLE.
// Its task is expected to run after the USER_BLOCKING task runner's task.
task_runners_and_events.push_back(std::make_unique<TaskRunnerAndEvents>(
- thread_pool->CreateUpdateableSequencedTaskRunnerWithTraits(
- TaskTraits({TaskPriority::BEST_EFFORT, thread_policy})),
+ thread_pool->CreateUpdateableSequencedTaskRunner(
+ TaskTraits({ThreadPool(), TaskPriority::BEST_EFFORT, thread_policy})),
TaskPriority::USER_VISIBLE, &task_runners_and_events.back()->task_ran));
// -----
@@ -1159,8 +1182,8 @@ std::vector<std::unique_ptr<TaskRunnerAndEvents>> CreateTaskRunnersAndEvents(
: &task_runners_and_events.back()->task_ran;
task_runners_and_events.push_back(std::make_unique<TaskRunnerAndEvents>(
- thread_pool->CreateUpdateableSequencedTaskRunnerWithTraits(
- TaskTraits({TaskPriority::USER_BLOCKING, thread_policy})),
+ thread_pool->CreateUpdateableSequencedTaskRunner(TaskTraits(
+ {ThreadPool(), TaskPriority::USER_BLOCKING, thread_policy})),
TaskPriority::BEST_EFFORT, expected_previous_event));
return task_runners_and_events;
@@ -1177,10 +1200,10 @@ void TestUpdatePrioritySequenceNotScheduled(ThreadPoolImplTest* test,
test->StartThreadPool(kLocalMaxNumForegroundThreads);
auto task_runners_and_events =
- CreateTaskRunnersAndEvents(&test->thread_pool_, thread_policy);
+ CreateTaskRunnersAndEvents(test->thread_pool_.get(), thread_policy);
// Prevent tasks from running.
- test->thread_pool_.SetHasFence(true);
+ test->thread_pool_->SetHasFence(true);
// Post tasks to multiple task runners while they are at initial priority.
// They won't run immediately because of the call to SetHasFence(true) above.
@@ -1189,7 +1212,8 @@ void TestUpdatePrioritySequenceNotScheduled(ThreadPoolImplTest* test,
FROM_HERE,
BindOnce(
&VerifyOrderAndTaskEnvironmentAndSignalEvent,
- TaskTraits(task_runner_and_events->updated_priority, thread_policy),
+ TaskTraits{ThreadPool(), task_runner_and_events->updated_priority,
+ thread_policy},
test->GetPoolType(),
// Native pools ignore the maximum number of threads per pool
// and therefore don't guarantee that tasks run in priority
@@ -1211,7 +1235,7 @@ void TestUpdatePrioritySequenceNotScheduled(ThreadPoolImplTest* test,
}
// Allow tasks to run.
- test->thread_pool_.SetHasFence(false);
+ test->thread_pool_->SetHasFence(false);
for (auto& task_runner_and_events : task_runners_and_events)
test::WaitWithoutBlockingObserver(&task_runner_and_events->task_ran);
@@ -1223,7 +1247,7 @@ void TestUpdatePrioritySequenceScheduled(ThreadPoolImplTest* test,
ThreadPolicy thread_policy) {
test->StartThreadPool();
auto task_runners_and_events =
- CreateTaskRunnersAndEvents(&test->thread_pool_, thread_policy);
+ CreateTaskRunnersAndEvents(test->thread_pool_.get(), thread_policy);
// Post blocking tasks to all task runners to prevent tasks from being
// scheduled later in the test.
@@ -1252,7 +1276,8 @@ void TestUpdatePrioritySequenceScheduled(ThreadPoolImplTest* test,
FROM_HERE,
BindOnce(
&VerifyOrderAndTaskEnvironmentAndSignalEvent,
- TaskTraits(task_runner_and_events->updated_priority, thread_policy),
+ TaskTraits{ThreadPool(), task_runner_and_events->updated_priority,
+ thread_policy},
test->GetPoolType(),
Unretained(task_runner_and_events->expected_previous_event),
Unretained(&task_runner_and_events->task_ran)));
@@ -1293,16 +1318,14 @@ TEST_P(ThreadPoolImplTest, UpdatePrioritySequenceScheduled_MustUseForeground) {
TEST_P(ThreadPoolImplTest, UpdatePriorityFromBestEffortNoThreadPolicy) {
StartThreadPool();
{
- auto task_runner =
- thread_pool_.CreateUpdateableSequencedTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT});
+ auto task_runner = thread_pool_->CreateUpdateableSequencedTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT});
EXPECT_DCHECK_DEATH(
{ task_runner->UpdatePriority(TaskPriority::USER_VISIBLE); });
}
{
- auto task_runner =
- thread_pool_.CreateUpdateableSequencedTaskRunnerWithTraits(
- {TaskPriority::BEST_EFFORT});
+ auto task_runner = thread_pool_->CreateUpdateableSequencedTaskRunner(
+ {ThreadPool(), TaskPriority::BEST_EFFORT});
EXPECT_DCHECK_DEATH(
{ task_runner->UpdatePriority(TaskPriority::USER_BLOCKING); });
}
diff --git a/chromium/base/task/thread_pool/thread_pool_perftest.cc b/chromium/base/task/thread_pool/thread_pool_perftest.cc
index 47d01f88eea..25d6bcfde2f 100644
--- a/chromium/base/task/thread_pool/thread_pool_perftest.cc
+++ b/chromium/base/task/thread_pool/thread_pool_perftest.cc
@@ -69,7 +69,7 @@ class ThreadPoolPerfTest : public testing::Test {
// Posting actions:
void ContinuouslyBindAndPostNoOpTasks(size_t num_tasks) {
- scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
+ scoped_refptr<TaskRunner> task_runner = CreateTaskRunner({ThreadPool()});
for (size_t i = 0; i < num_tasks; ++i) {
++num_tasks_pending_;
++num_posted_tasks_;
@@ -83,7 +83,7 @@ class ThreadPoolPerfTest : public testing::Test {
}
void ContinuouslyPostNoOpTasks(size_t num_tasks) {
- scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
+ scoped_refptr<TaskRunner> task_runner = CreateTaskRunner({ThreadPool()});
base::RepeatingClosure closure = base::BindRepeating(
[](std::atomic_size_t* num_task_pending) { (*num_task_pending)--; },
&num_tasks_pending_);
@@ -96,7 +96,7 @@ class ThreadPoolPerfTest : public testing::Test {
void ContinuouslyPostBusyWaitTasks(size_t num_tasks,
base::TimeDelta duration) {
- scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits({});
+ scoped_refptr<TaskRunner> task_runner = CreateTaskRunner({ThreadPool()});
base::RepeatingClosure closure = base::BindRepeating(
[](std::atomic_size_t* num_task_pending, base::TimeDelta duration) {
base::TimeTicks end_time = base::TimeTicks::Now() + duration;
diff --git a/chromium/base/task/thread_pool/worker_thread.cc b/chromium/base/task/thread_pool/worker_thread.cc
index e23183beea7..07b5b15b207 100644
--- a/chromium/base/task/thread_pool/worker_thread.cc
+++ b/chromium/base/task/thread_pool/worker_thread.cc
@@ -14,6 +14,7 @@
#include "base/task/thread_pool/environment_config.h"
#include "base/task/thread_pool/task_tracker.h"
#include "base/task/thread_pool/worker_thread_observer.h"
+#include "base/time/time_override.h"
#include "base/trace_event/trace_event.h"
#if defined(OS_MACOSX)
@@ -133,7 +134,7 @@ void WorkerThread::Cleanup() {
void WorkerThread::BeginUnusedPeriod() {
CheckedAutoLock auto_lock(thread_lock_);
DCHECK(last_used_time_.is_null());
- last_used_time_ = TimeTicks::Now();
+ last_used_time_ = subtle::TimeTicksNowIgnoringOverride();
}
void WorkerThread::EndUnusedPeriod() {
@@ -305,8 +306,9 @@ void WorkerThread::RunWorker() {
UpdateThreadPriority(GetDesiredThreadPriority());
// Get the task source containing the next task to execute.
- RegisteredTaskSource task_source = delegate_->GetWork(this);
- if (!task_source) {
+ RunIntentWithRegisteredTaskSource run_intent_with_task_source =
+ delegate_->GetWork(this);
+ if (!run_intent_with_task_source) {
// Exit immediately if GetWork() resulted in detaching this worker.
if (ShouldExit())
break;
@@ -317,9 +319,10 @@ void WorkerThread::RunWorker() {
continue;
}
- task_source = task_tracker_->RunAndPopNextTask(std::move(task_source));
+ RegisteredTaskSource task_source = task_tracker_->RunAndPopNextTask(
+ std::move(run_intent_with_task_source));
- delegate_->DidRunTask(std::move(task_source));
+ delegate_->DidProcessTask(std::move(task_source));
// Calling WakeUp() guarantees that this WorkerThread will run Tasks from
// TaskSources returned by the GetWork() method of |delegate_| until it
diff --git a/chromium/base/task/thread_pool/worker_thread.h b/chromium/base/task/thread_pool/worker_thread.h
index 59cc9dd6462..3602054de51 100644
--- a/chromium/base/task/thread_pool/worker_thread.h
+++ b/chromium/base/task/thread_pool/worker_thread.h
@@ -68,12 +68,12 @@ class BASE_EXPORT WorkerThread : public RefCountedThreadSafe<WorkerThread>,
virtual void OnMainEntry(const WorkerThread* worker) = 0;
// Called by |worker|'s thread to get a TaskSource from which to run a Task.
- virtual RegisteredTaskSource GetWork(WorkerThread* worker) = 0;
+ virtual RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) = 0;
// Called by the WorkerThread after it ran a Task. If the Task's
// TaskSource should be reenqueued, it is passed to |task_source|.
// Otherwise, |task_source| is nullptr.
- virtual void DidRunTask(RegisteredTaskSource task_source) = 0;
+ virtual void DidProcessTask(RegisteredTaskSource task_source) = 0;
// Called to determine how long to sleep before the next call to GetWork().
// GetWork() may be called before this timeout expires if the worker's
diff --git a/chromium/base/task/thread_pool/worker_thread_stack.cc b/chromium/base/task/thread_pool/worker_thread_stack.cc
index f6f261048a6..40eadde9695 100644
--- a/chromium/base/task/thread_pool/worker_thread_stack.cc
+++ b/chromium/base/task/thread_pool/worker_thread_stack.cc
@@ -41,7 +41,7 @@ WorkerThread* WorkerThreadStack::Peek() const {
}
bool WorkerThreadStack::Contains(const WorkerThread* worker) const {
- return ContainsValue(stack_, worker);
+ return base::Contains(stack_, worker);
}
void WorkerThreadStack::Remove(const WorkerThread* worker) {
diff --git a/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc b/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc
index 2640ec7d77b..513c478b38e 100644
--- a/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc
+++ b/chromium/base/task/thread_pool/worker_thread_stack_unittest.cc
@@ -25,10 +25,10 @@ class MockWorkerThreadDelegate : public WorkerThread::Delegate {
return WorkerThread::ThreadLabel::DEDICATED;
}
void OnMainEntry(const WorkerThread* worker) override {}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
return nullptr;
}
- void DidRunTask(RegisteredTaskSource task_source) override {
+ void DidProcessTask(RegisteredTaskSource task_source) override {
ADD_FAILURE() << "Unexpected call to DidRunTask()";
}
TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
@@ -52,7 +52,7 @@ class ThreadPoolWorkerStackTest : public testing::Test {
}
private:
- TaskTracker task_tracker_ = {"Test"};
+ TaskTracker task_tracker_{"Test"};
protected:
scoped_refptr<WorkerThread> worker_a_;
diff --git a/chromium/base/task/thread_pool/worker_thread_unittest.cc b/chromium/base/task/thread_pool/worker_thread_unittest.cc
index e95ea7bb188..9058bec7729 100644
--- a/chromium/base/task/thread_pool/worker_thread_unittest.cc
+++ b/chromium/base/task/thread_pool/worker_thread_unittest.cc
@@ -51,10 +51,10 @@ class WorkerThreadDefaultDelegate : public WorkerThread::Delegate {
return WorkerThread::ThreadLabel::DEDICATED;
}
void OnMainEntry(const WorkerThread* worker) override {}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
return nullptr;
}
- void DidRunTask(RegisteredTaskSource task_source) override {
+ void DidProcessTask(RegisteredTaskSource task_source) override {
ADD_FAILURE() << "Unexpected call to DidRunTask()";
}
TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
@@ -116,7 +116,7 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
return created_sequences_;
}
- std::vector<scoped_refptr<TaskSource>> DidRunTaskSequences() {
+ std::vector<scoped_refptr<TaskSource>> DidProcessTaskSequences() {
CheckedAutoLock auto_lock(lock_);
return did_run_task_sources_;
}
@@ -129,14 +129,14 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
TestWorkerThreadDelegate(ThreadPoolWorkerTest* outer) : outer_(outer) {}
~TestWorkerThreadDelegate() override {
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidProcessTaskExpected());
}
// WorkerThread::Delegate:
void OnMainEntry(const WorkerThread* worker) override {
outer_->worker_set_.Wait();
EXPECT_EQ(outer_->worker_.get(), worker);
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidProcessTaskExpected());
// Without synchronization, OnMainEntry() could be called twice without
// generating an error.
@@ -145,8 +145,8 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
outer_->main_entry_called_.Signal();
}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
+ EXPECT_FALSE(IsCallToDidProcessTaskExpected());
EXPECT_EQ(outer_->worker_.get(), worker);
{
@@ -166,8 +166,9 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
}
// Create a Sequence with TasksPerSequence() Tasks.
- scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(), nullptr, TaskSourceExecutionMode::kParallel);
+ scoped_refptr<Sequence> sequence =
+ MakeRefCounted<Sequence>(TaskTraits{ThreadPool()}, nullptr,
+ TaskSourceExecutionMode::kParallel);
Sequence::Transaction sequence_transaction(sequence->BeginTransaction());
for (int i = 0; i < outer_->TasksPerSequence(); ++i) {
Task task(FROM_HERE,
@@ -182,21 +183,23 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
outer_->task_tracker_.WillQueueTaskSource(sequence);
EXPECT_TRUE(registered_task_source);
- ExpectCallToDidRunTask();
+ ExpectCallToDidProcessTask();
{
// Add the Sequence to the vector of created Sequences.
CheckedAutoLock auto_lock(outer_->lock_);
outer_->created_sequences_.push_back(sequence);
}
- return registered_task_source;
+ auto run_intent = registered_task_source->WillRunTask();
+ EXPECT_TRUE(run_intent);
+ return {std::move(registered_task_source), std::move(run_intent)};
}
// This override verifies that |task_source| has the expected number of
// Tasks and adds it to |did_run_task_sources_|. Unlike a normal
- // DidRunTask() implementation, it doesn't add |task_source| to a queue for
- // further execution.
- void DidRunTask(RegisteredTaskSource registered_task_source) override {
+ // DidProcessTask() implementation, it doesn't add |task_source| to a queue
+ // for further execution.
+ void DidProcessTask(RegisteredTaskSource registered_task_source) override {
{
CheckedAutoLock auto_lock(expect_did_run_task_lock_);
EXPECT_TRUE(expect_did_run_task_);
@@ -214,9 +217,10 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
// Verify the number of Tasks in |registered_task_source|.
auto transaction(registered_task_source->BeginTransaction());
for (int i = 0; i < outer_->TasksPerSequence() - 1; ++i) {
- EXPECT_TRUE(transaction.TakeTask());
+ auto run_intent = registered_task_source->WillRunTask();
+ EXPECT_TRUE(transaction.TakeTask(&run_intent));
EXPECT_EQ(i == outer_->TasksPerSequence() - 2,
- !transaction.DidRunTask());
+ !transaction.DidProcessTask(std::move(run_intent)));
}
scoped_refptr<TaskSource> task_source =
@@ -232,14 +236,14 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
}
private:
- // Expect a call to DidRunTask() before the next call to any other method of
- // this delegate.
- void ExpectCallToDidRunTask() {
+ // Expect a call to DidProcessTask() before the next call to any other
+ // method of this delegate.
+ void ExpectCallToDidProcessTask() {
CheckedAutoLock auto_lock(expect_did_run_task_lock_);
expect_did_run_task_ = true;
}
- bool IsCallToDidRunTaskExpected() const {
+ bool IsCallToDidProcessTaskExpected() const {
CheckedAutoLock auto_lock(expect_did_run_task_lock_);
return expect_did_run_task_;
}
@@ -249,7 +253,8 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
// Synchronizes access to |expect_did_run_task_|.
mutable CheckedLock expect_did_run_task_lock_;
- // Whether the next method called on this delegate should be DidRunTask().
+ // Whether the next method called on this delegate should be
+ // DidProcessTask().
bool expect_did_run_task_ = false;
DISALLOW_COPY_AND_ASSIGN(TestWorkerThreadDelegate);
@@ -261,7 +266,7 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
EXPECT_LE(num_run_tasks_, created_sequences_.size());
}
- TaskTracker task_tracker_ = {"Test"};
+ TaskTracker task_tracker_{"Test"};
// Synchronizes access to all members below.
mutable CheckedLock lock_;
@@ -285,7 +290,7 @@ class ThreadPoolWorkerTest : public testing::TestWithParam<int> {
// Sequences created by GetWork().
std::vector<scoped_refptr<TaskSource>> created_sequences_;
- // Sequences passed to DidRunTask().
+ // Sequences passed to DidProcessTask().
std::vector<scoped_refptr<TaskSource>> did_run_task_sources_;
// Number of times that RunTaskCallback() has been called.
@@ -321,11 +326,11 @@ TEST_P(ThreadPoolWorkerTest, ContinuousWork) {
// If Sequences returned by GetWork() contain more than one Task, they aren't
// empty after the worker pops Tasks from them and thus should be returned to
- // DidRunTask().
+ // DidProcessTask().
if (TasksPerSequence() > 1)
- EXPECT_EQ(CreatedTaskSources(), DidRunTaskSequences());
+ EXPECT_EQ(CreatedTaskSources(), DidProcessTaskSequences());
else
- EXPECT_TRUE(DidRunTaskSequences().empty());
+ EXPECT_TRUE(DidProcessTaskSequences().empty());
}
// Verify that when GetWork() alternates between returning a Sequence and
@@ -352,11 +357,11 @@ TEST_P(ThreadPoolWorkerTest, IntermittentWork) {
// If Sequences returned by GetWork() contain more than one Task, they
// aren't empty after the worker pops Tasks from them and thus should be
- // returned to DidRunTask().
+ // returned to DidProcessTask().
if (TasksPerSequence() > 1)
- EXPECT_EQ(CreatedTaskSources(), DidRunTaskSequences());
+ EXPECT_EQ(CreatedTaskSources(), DidProcessTaskSequences());
else
- EXPECT_TRUE(DidRunTaskSequences().empty());
+ EXPECT_TRUE(DidProcessTaskSequences().empty());
}
}
@@ -425,7 +430,7 @@ class ControllableCleanupDelegate : public WorkerThreadDefaultDelegate {
~ControllableCleanupDelegate() override { controls_->destroyed_.Signal(); }
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
EXPECT_TRUE(controls_->expect_get_work_);
// Sends one item of work to signal |work_processed_|. On subsequent calls,
@@ -441,7 +446,7 @@ class ControllableCleanupDelegate : public WorkerThreadDefaultDelegate {
controls_->work_requested_ = true;
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
- TaskTraits(WithBaseSyncPrimitives(),
+ TaskTraits(ThreadPool(), WithBaseSyncPrimitives(),
TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
nullptr, TaskSourceExecutionMode::kParallel);
Task task(
@@ -460,10 +465,11 @@ class ControllableCleanupDelegate : public WorkerThreadDefaultDelegate {
auto registered_task_source =
task_tracker_->WillQueueTaskSource(std::move(sequence));
EXPECT_TRUE(registered_task_source);
- return registered_task_source;
+ auto run_intent = registered_task_source->WillRunTask();
+ return {std::move(registered_task_source), std::move(run_intent)};
}
- void DidRunTask(RegisteredTaskSource) override {}
+ void DidProcessTask(RegisteredTaskSource) override {}
void OnMainExit(WorkerThread* worker) override {
controls_->exited_.Signal();
@@ -706,7 +712,7 @@ class ExpectThreadPriorityDelegate : public WorkerThreadDefaultDelegate {
void OnMainEntry(const WorkerThread* worker) override {
VerifyThreadPriority();
}
- RegisteredTaskSource GetWork(WorkerThread* worker) override {
+ RunIntentWithRegisteredTaskSource GetWork(WorkerThread* worker) override {
VerifyThreadPriority();
priority_verified_in_get_work_event_.Signal();
return nullptr;
@@ -741,9 +747,9 @@ TEST(ThreadPoolWorkerTest, BumpPriorityOfAliveThreadDuringShutdown) {
// Block shutdown to ensure that the worker doesn't exit when StartShutdown()
// is called.
- scoped_refptr<Sequence> sequence =
- MakeRefCounted<Sequence>(TaskShutdownBehavior::BLOCK_SHUTDOWN, nullptr,
- TaskSourceExecutionMode::kParallel);
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
+ TaskTraits{ThreadPool(), TaskShutdownBehavior::BLOCK_SHUTDOWN}, nullptr,
+ TaskSourceExecutionMode::kParallel);
auto registered_task_source =
task_tracker.WillQueueTaskSource(std::move(sequence));
diff --git a/chromium/base/task_runner.cc b/chromium/base/task_runner.cc
index d8644a989dc..e2787f3d1ac 100644
--- a/chromium/base/task_runner.cc
+++ b/chromium/base/task_runner.cc
@@ -10,6 +10,7 @@
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/task/promise/abstract_promise.h"
+#include "base/task/promise/helpers.h"
#include "base/threading/post_task_and_reply_impl.h"
namespace base {
@@ -40,40 +41,8 @@ bool PostTaskAndReplyTaskRunner::PostTask(const Location& from_here,
return destination_->PostTask(from_here, std::move(task));
}
-// TODO(alexclarke): Remove this when TaskRunner::PostPromiseInternal becomes
-// pure virtual.
-class PromiseHolder {
- public:
- explicit PromiseHolder(scoped_refptr<internal::AbstractPromise> promise)
- : promise_(std::move(promise)) {}
-
- ~PromiseHolder() {
- // Detect if the promise was not executed and if so cancel to ensure memory
- // is released.
- if (promise_)
- promise_->OnCanceled();
- }
-
- PromiseHolder(PromiseHolder&& other) : promise_(std::move(other.promise_)) {}
-
- scoped_refptr<internal::AbstractPromise> Unwrap() const {
- return std::move(promise_);
- }
-
- private:
- mutable scoped_refptr<internal::AbstractPromise> promise_;
-};
-
} // namespace
-template <>
-struct BindUnwrapTraits<PromiseHolder> {
- static scoped_refptr<internal::AbstractPromise> Unwrap(
- const PromiseHolder& o) {
- return o.Unwrap();
- }
-};
-
bool TaskRunner::PostTask(const Location& from_here, OnceClosure task) {
return PostDelayedTask(from_here, std::move(task), base::TimeDelta());
}
@@ -88,10 +57,10 @@ bool TaskRunner::PostTaskAndReply(const Location& from_here,
bool TaskRunner::PostPromiseInternal(
const scoped_refptr<internal::AbstractPromise>& promise,
base::TimeDelta delay) {
- return PostDelayedTask(
- promise->from_here(),
- BindOnce(&internal::AbstractPromise::Execute, PromiseHolder(promise)),
- delay);
+ return PostDelayedTask(promise->from_here(),
+ BindOnce(&internal::AbstractPromise::Execute,
+ internal::PromiseHolder(promise)),
+ delay);
}
TaskRunner::TaskRunner() = default;
diff --git a/chromium/base/template_util.h b/chromium/base/template_util.h
index 6eff1362157..51bd0852799 100644
--- a/chromium/base/template_util.h
+++ b/chromium/base/template_util.h
@@ -72,6 +72,12 @@ struct SupportsOstreamOperator<T,
<< std::declval<T>()))>
: std::true_type {};
+template <typename T, typename = void>
+struct SupportsToString : std::false_type {};
+template <typename T>
+struct SupportsToString<T, decltype(void(std::declval<T>().ToString()))>
+ : std::true_type {};
+
// Used to detech whether the given type is an iterator. This is normally used
// with std::enable_if to provide disambiguation for functions that take
// templatzed iterators as input.
diff --git a/chromium/base/template_util_unittest.cc b/chromium/base/template_util_unittest.cc
index 2c42445f785..48add1c2a5b 100644
--- a/chromium/base/template_util_unittest.cc
+++ b/chromium/base/template_util_unittest.cc
@@ -25,6 +25,9 @@ struct StructWithOperator {};
std::ostream& operator<<(std::ostream& os, const StructWithOperator& v) {
return os;
}
+struct StructWithToString {
+ std::string ToString() const { return ""; }
+};
// is_non_const_reference<Type>
static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
@@ -71,6 +74,16 @@ static_assert(
internal::SupportsOstreamOperator<const StructWithOperator&>::value,
"struct with operator<< should be printable by const ref");
+// .ToString() support on structs.
+static_assert(!internal::SupportsToString<SimpleStruct>::value,
+ "simple struct value doesn't support .ToString()");
+static_assert(!internal::SupportsToString<const SimpleStruct&>::value,
+ "simple struct const ref doesn't support .ToString()");
+static_assert(internal::SupportsToString<StructWithToString>::value,
+ "struct with .ToString() should be printable by value");
+static_assert(internal::SupportsToString<const StructWithToString&>::value,
+ "struct with .ToString() should be printable by const ref");
+
// base::is_trivially_copyable
class TrivialCopy {
public:
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index b987a4bc3d5..fe1864baa78 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -53,7 +53,7 @@ static_library("test_support") {
"copy_only_int.h",
"do_nothing_promise.cc",
"do_nothing_promise.h",
- "fuzzed_data_provider.h",
+ "gmock_callback_support.h",
"gtest_util.cc",
"gtest_util.h",
"gtest_xml_unittest_result_printer.cc",
@@ -471,7 +471,6 @@ if (is_android) {
"android/java/src/org/chromium/base/ContentUriTestUtils.java",
"android/java/src/org/chromium/base/JavaHandlerThreadHelpers.java",
]
- jni_package = "base"
}
generate_jni("test_support_jni_headers") {
@@ -482,7 +481,6 @@ if (is_android) {
"android/javatests/src/org/chromium/base/test/task/ThreadPoolTestHelpers.java",
"android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
]
- jni_package = "base"
}
android_library("test_support_java") {
diff --git a/chromium/base/test/clang_coverage.h b/chromium/base/test/clang_coverage.h
index 44337f1f02e..ed2e3d71216 100644
--- a/chromium/base/test/clang_coverage.h
+++ b/chromium/base/test/clang_coverage.h
@@ -5,6 +5,12 @@
#ifndef BASE_TEST_CLANG_COVERAGE_H_
#define BASE_TEST_CLANG_COVERAGE_H_
+#include "base/clang_coverage_buildflags.h"
+
+#if !BUILDFLAG(CLANG_COVERAGE)
+#error "Clang coverage can only be used if CLANG_COVERAGE macro is defined"
+#endif
+
namespace base {
// Write out the accumulated code coverage profile to the configured file.
@@ -13,7 +19,6 @@ namespace base {
// (or triggering a debug crash), where the automatic at-exit writer will not
// be invoked.
// This call is thread-safe, and will write profiling data at-most-once.
-// Call-sites invoke this API only if the CLANG_COVERAGE macro is defined.
void WriteClangCoverageProfile();
} // namespace base
diff --git a/chromium/base/threading/platform_thread_android.cc b/chromium/base/threading/platform_thread_android.cc
index a8520277a72..c04519288e4 100644
--- a/chromium/base/threading/platform_thread_android.cc
+++ b/chromium/base/threading/platform_thread_android.cc
@@ -12,11 +12,11 @@
#include <unistd.h>
#include "base/android/jni_android.h"
+#include "base/base_jni_headers/ThreadUtils_jni.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/threading/platform_thread_internal_posix.h"
#include "base/threading/thread_id_name_manager.h"
-#include "jni/ThreadUtils_jni.h"
namespace base {
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index 63b9473d5a5..e7733d9945c 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -17,6 +17,7 @@
#include "base/threading/thread_restrictions.h"
#include "base/win/scoped_handle.h"
#include "base/win/windows_version.h"
+#include "build/build_config.h"
#include <windows.h>
@@ -115,6 +116,14 @@ bool CreateThreadInternal(size_t stack_size,
unsigned int flags = 0;
if (stack_size > 0) {
flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+#if defined(ARCH_CPU_32_BITS)
+ } else {
+ // The process stack size is increased to give spaces to |RendererMain| in
+ // |chrome/BUILD.gn|, but keep the default stack size of other threads to
+ // 1MB for the address space pressure.
+ flags = STACK_SIZE_PARAM_IS_A_RESERVATION;
+ stack_size = 1024 * 1024;
+#endif
}
ThreadParams* params = new ThreadParams;
diff --git a/chromium/base/threading/platform_thread_win_unittest.cc b/chromium/base/threading/platform_thread_win_unittest.cc
index 25f111d9715..15c9939da46 100644
--- a/chromium/base/threading/platform_thread_win_unittest.cc
+++ b/chromium/base/threading/platform_thread_win_unittest.cc
@@ -13,8 +13,6 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-using testing::Contains;
-
namespace base {
// It has been observed that calling
@@ -71,7 +69,7 @@ TEST(PlatformThreadWinTest,
// NORMAL_PRIORITY_CLASS process.
THREAD_PRIORITY_IDLE, internal::kWin7BackgroundThreadModePriority});
EXPECT_THAT(kExpectedWin7Priorities,
- Contains(priority_after_thread_mode_background_begin));
+ testing::Contains(priority_after_thread_mode_background_begin));
} else {
EXPECT_EQ(priority_after_thread_mode_background_begin,
THREAD_PRIORITY_NORMAL);
diff --git a/chromium/base/threading/scoped_thread_priority.cc b/chromium/base/threading/scoped_thread_priority.cc
new file mode 100644
index 00000000000..63b38875110
--- /dev/null
+++ b/chromium/base/threading/scoped_thread_priority.cc
@@ -0,0 +1,48 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/scoped_thread_priority.h"
+
+#include "base/location.h"
+#include "base/threading/platform_thread.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+// Enable the boost of thread priority when the code may load a library. The
+// thread priority boost is required to avoid priority inversion on the loader
+// lock.
+constexpr base::Feature kBoostThreadPriorityOnLibraryLoading{
+ "BoostThreadPriorityOnLibraryLoading", base::FEATURE_DISABLED_BY_DEFAULT};
+#endif // OS_WIN
+
+ScopedThreadMayLoadLibraryOnBackgroundThread::
+ ScopedThreadMayLoadLibraryOnBackgroundThread(const Location& from_here) {
+ TRACE_EVENT_BEGIN2("base", "ScopedThreadMayLoadLibraryOnBackgroundThread",
+ "file_name", from_here.file_name(), "function_name",
+ from_here.function_name());
+
+#if defined(OS_WIN)
+ if (!base::FeatureList::IsEnabled(kBoostThreadPriorityOnLibraryLoading))
+ return;
+
+ base::ThreadPriority priority = PlatformThread::GetCurrentThreadPriority();
+ if (priority == base::ThreadPriority::BACKGROUND) {
+ original_thread_priority_ = priority;
+ PlatformThread::SetCurrentThreadPriority(base::ThreadPriority::NORMAL);
+ }
+#endif // OS_WIN
+}
+
+ScopedThreadMayLoadLibraryOnBackgroundThread::
+ ~ScopedThreadMayLoadLibraryOnBackgroundThread() {
+ TRACE_EVENT_END0("base", "ScopedThreadMayLoadLibraryOnBackgroundThread");
+#if defined(OS_WIN)
+ if (original_thread_priority_)
+ PlatformThread::SetCurrentThreadPriority(original_thread_priority_.value());
+#endif // OS_WIN
+}
+
+} // namespace base
diff --git a/chromium/base/threading/scoped_thread_priority.h b/chromium/base/threading/scoped_thread_priority.h
new file mode 100644
index 00000000000..f16cbeca8ab
--- /dev/null
+++ b/chromium/base/threading/scoped_thread_priority.h
@@ -0,0 +1,43 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_THREADING_SCOPED_THREAD_PRIORITY_H_
+#define BASE_THREADING_SCOPED_THREAD_PRIORITY_H_
+
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/optional.h"
+#include "build/build_config.h"
+
+namespace base {
+
+class Location;
+enum class ThreadPriority : int;
+
+// This class must be instantiated in every scope where a DLL can be loaded on
+// a background thread. On Windows, loading a DLL on a background thread can
+// lead to a priority inversion on the loader lock and cause huge janks.
+class BASE_EXPORT ScopedThreadMayLoadLibraryOnBackgroundThread {
+ public:
+ explicit ScopedThreadMayLoadLibraryOnBackgroundThread(
+ const Location& from_here);
+ ~ScopedThreadMayLoadLibraryOnBackgroundThread();
+
+ private:
+#if defined(OS_WIN)
+ // The original priority when entering the scope.
+ base::Optional<ThreadPriority> original_thread_priority_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedThreadMayLoadLibraryOnBackgroundThread);
+};
+
+// Feature for mitigation of DLL loading on a background thread.
+#if defined(OS_WIN)
+BASE_EXPORT extern const base::Feature kBoostThreadPriorityOnLibraryLoading;
+#endif // OS_WIN
+
+} // namespace base
+
+#endif // BASE_THREADING_SCOPED_THREAD_PRIORITY_H_
diff --git a/chromium/base/threading/scoped_thread_priority_unittest.cc b/chromium/base/threading/scoped_thread_priority_unittest.cc
new file mode 100644
index 00000000000..1202bec9f1f
--- /dev/null
+++ b/chromium/base/threading/scoped_thread_priority_unittest.cc
@@ -0,0 +1,84 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/threading/scoped_thread_priority.h"
+
+#include "base/test/scoped_feature_list.h"
+#include "base/threading/platform_thread.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class ScopedThreadPriorityTest : public testing::Test {
+ protected:
+ void SetUp() override {
+#if defined(OS_WIN)
+ scoped_features_.InitWithFeatures({kBoostThreadPriorityOnLibraryLoading},
+ {});
+#endif // OS_WIN
+
+ // Ensures the default thread priority is set.
+ ASSERT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ }
+
+ private:
+ test::ScopedFeatureList scoped_features_;
+};
+
+TEST_F(ScopedThreadPriorityTest, WithoutPriorityBoost) {
+ // Validates that a thread at normal priority keep the same priority.
+ {
+ ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ }
+ EXPECT_EQ(ThreadPriority::NORMAL, PlatformThread::GetCurrentThreadPriority());
+}
+
+#if defined(OS_WIN)
+TEST_F(ScopedThreadPriorityTest, WithPriorityBoost) {
+ // Validates that a thread at background priority is boosted to normal
+ // priority.
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::BACKGROUND);
+ {
+ ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ }
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ PlatformThread::GetCurrentThreadPriority());
+
+ // Put back the default thread priority.
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL);
+}
+#endif // OS_WIN
+
+#if defined(OS_WIN)
+TEST_F(ScopedThreadPriorityTest, NestedScope) {
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::BACKGROUND);
+
+ {
+ ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ {
+ ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ }
+ EXPECT_EQ(ThreadPriority::NORMAL,
+ PlatformThread::GetCurrentThreadPriority());
+ }
+
+ EXPECT_EQ(ThreadPriority::BACKGROUND,
+ PlatformThread::GetCurrentThreadPriority());
+
+ // Put back the default thread priority.
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL);
+}
+#endif // OS_WIN
+
+} // namespace base
diff --git a/chromium/base/threading/sequence_bound.h b/chromium/base/threading/sequence_bound.h
index 6404a78f71b..37b5b524439 100644
--- a/chromium/base/threading/sequence_bound.h
+++ b/chromium/base/threading/sequence_bound.h
@@ -43,7 +43,7 @@ namespace base {
// // On any thread...
// scoped_refptr<SequencedTaskRunner> main_task_runner = ...;
// auto widget = SequenceBound<MyClass>(main_task_runner, "My Title");
-// widget.Post(&MyClass::DoSomething, 1234);
+// widget.Post(FROM_HERE, &MyClass::DoSomething, 1234);
//
// Note that |widget| is constructed asynchronously on |main_task_runner|,
// but calling Post() immediately is safe, since the actual call is posted
@@ -234,8 +234,8 @@ class SequenceBound {
// Run on impl thread to construct |t|'s storage.
template <typename... Args>
- static void ConstructOwnerRecord(T* t, Args&&... args) {
- new (t) T(std::forward<Args>(args)...);
+ static void ConstructOwnerRecord(T* t, std::decay_t<Args>&&... args) {
+ new (t) T(std::move(args)...);
}
// Destruct the object associated with |t|, and delete |storage|.
diff --git a/chromium/base/threading/sequence_bound_unittest.cc b/chromium/base/threading/sequence_bound_unittest.cc
index 451457357b3..b571c8d536d 100644
--- a/chromium/base/threading/sequence_bound_unittest.cc
+++ b/chromium/base/threading/sequence_bound_unittest.cc
@@ -323,4 +323,25 @@ TEST_F(SequenceBoundTest, IsVirtualBaseClassOf) {
"|VirtuallyDerived shouldn't be a virtual base of |Base|");
}
+TEST_F(SequenceBoundTest, LvalueConstructionParameter) {
+ // Note here that |value_ptr| is an lvalue, while |&value| would be an rvalue.
+ Value value = kInitialValue;
+ Value* value_ptr = &value;
+ SequenceBound<Derived> derived(task_runner_, value_ptr);
+ {
+ derived.Post(FROM_HERE, &Derived::SetValue, kDifferentValue);
+ base::RunLoop run_loop;
+ task_runner_->PostTask(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ EXPECT_EQ(value, kDifferentValue);
+ }
+ {
+ derived.Reset();
+ base::RunLoop run_loop;
+ task_runner_->PostTask(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ EXPECT_EQ(value, kDerivedDtorValue);
+ }
+}
+
} // namespace base
diff --git a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
index f178585db8a..f88e6e427a2 100644
--- a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
@@ -58,17 +58,16 @@ TEST_F(SequencedTaskRunnerHandleTest, FromTaskEnvironment) {
}
TEST_F(SequencedTaskRunnerHandleTest, FromThreadPoolSequencedTask) {
- base::CreateSequencedTaskRunnerWithTraits({})->PostTask(
- FROM_HERE,
- base::BindOnce(
- &SequencedTaskRunnerHandleTest::VerifyCurrentSequencedTaskRunner));
+ base::CreateSequencedTaskRunner({ThreadPool()})
+ ->PostTask(FROM_HERE,
+ base::BindOnce(&SequencedTaskRunnerHandleTest::
+ VerifyCurrentSequencedTaskRunner));
scoped_task_environment_.RunUntilIdle();
}
TEST_F(SequencedTaskRunnerHandleTest, NoHandleFromUnsequencedTask) {
- base::PostTask(FROM_HERE, base::BindOnce([]() {
- EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
- }));
+ base::PostTask(base::BindOnce(
+ []() { EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet()); }));
scoped_task_environment_.RunUntilIdle();
}
diff --git a/chromium/base/threading/thread.h b/chromium/base/threading/thread.h
index b0191543eea..3b943054033 100644
--- a/chromium/base/threading/thread.h
+++ b/chromium/base/threading/thread.h
@@ -30,7 +30,7 @@ class MessagePump;
class RunLoop;
// IMPORTANT: Instead of creating a base::Thread, consider using
-// base::Create(Sequenced|SingleThread)TaskRunnerWithTraits().
+// base::Create(Sequenced|SingleThread)TaskRunner().
//
// A simple thread abstraction that establishes a MessageLoop on a new thread.
// The consumer uses the MessageLoop of the thread to cause code to execute on
diff --git a/chromium/base/threading/thread_id_name_manager.cc b/chromium/base/threading/thread_id_name_manager.cc
index a0ced2c5001..ba2f9b41cb1 100644
--- a/chromium/base/threading/thread_id_name_manager.cc
+++ b/chromium/base/threading/thread_id_name_manager.cc
@@ -10,6 +10,7 @@
#include "base/logging.h"
#include "base/memory/singleton.h"
#include "base/no_destructor.h"
+#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
@@ -26,6 +27,8 @@ ThreadLocalStorage::Slot& GetThreadNameTLS() {
}
}
+ThreadIdNameManager::Observer::~Observer() = default;
+
ThreadIdNameManager::ThreadIdNameManager()
: main_process_name_(nullptr), main_process_id_(kInvalidThreadId) {
g_default_name = new std::string(kDefaultName);
@@ -53,9 +56,16 @@ void ThreadIdNameManager::RegisterThread(PlatformThreadHandle::Handle handle,
name_to_interned_name_[kDefaultName];
}
-void ThreadIdNameManager::InstallSetNameCallback(SetNameCallback callback) {
+void ThreadIdNameManager::AddObserver(Observer* obs) {
AutoLock locked(lock_);
- set_name_callback_ = std::move(callback);
+ DCHECK(!base::Contains(observers_, obs));
+ observers_.push_back(obs);
+}
+
+void ThreadIdNameManager::RemoveObserver(Observer* obs) {
+ AutoLock locked(lock_);
+ DCHECK(base::Contains(observers_, obs));
+ base::Erase(observers_, obs);
}
void ThreadIdNameManager::SetName(const std::string& name) {
@@ -74,9 +84,8 @@ void ThreadIdNameManager::SetName(const std::string& name) {
auto id_to_handle_iter = thread_id_to_handle_.find(id);
GetThreadNameTLS().Set(const_cast<char*>(leaked_str->c_str()));
- if (set_name_callback_) {
- set_name_callback_.Run(leaked_str->c_str());
- }
+ for (Observer* obs : observers_)
+ obs->OnThreadNameChanged(leaked_str->c_str());
// The main thread of a process will not be created as a Thread object which
// means there is no PlatformThreadHandler registered.
diff --git a/chromium/base/threading/thread_id_name_manager.h b/chromium/base/threading/thread_id_name_manager.h
index f17dc1a4e84..e413da5d037 100644
--- a/chromium/base/threading/thread_id_name_manager.h
+++ b/chromium/base/threading/thread_id_name_manager.h
@@ -7,10 +7,12 @@
#include <map>
#include <string>
+#include <vector>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
+#include "base/observer_list.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
@@ -25,14 +27,24 @@ class BASE_EXPORT ThreadIdNameManager {
static const char* GetDefaultInternedString();
+ class BASE_EXPORT Observer {
+ public:
+ virtual ~Observer();
+
+ // Called on the thread whose name is changing, immediately after the name
+ // is set. |name| is a pointer to a C string that is guaranteed to remain
+ // valid for the duration of the process.
+ //
+ // NOTE: Will be called while ThreadIdNameManager's lock is held, so don't
+ // call back into it.
+ virtual void OnThreadNameChanged(const char* name) = 0;
+ };
+
// Register the mapping between a thread |id| and |handle|.
void RegisterThread(PlatformThreadHandle::Handle handle, PlatformThreadId id);
- // The callback is called on the thread, immediately after the name is set.
- // |name| is a pointer to a C string that is guaranteed to remain valid for
- // the duration of the process.
- using SetNameCallback = base::RepeatingCallback<void(const char* name)>;
- void InstallSetNameCallback(SetNameCallback callback);
+ void AddObserver(Observer*);
+ void RemoveObserver(Observer*);
// Set the name for the current thread.
void SetName(const std::string& name);
@@ -70,7 +82,9 @@ class BASE_EXPORT ThreadIdNameManager {
std::string* main_process_name_;
PlatformThreadId main_process_id_;
- SetNameCallback set_name_callback_;
+ // There's no point using a base::ObserverList behind a lock, so we just use
+ // an std::vector instead.
+ std::vector<Observer*> observers_;
DISALLOW_COPY_AND_ASSIGN(ThreadIdNameManager);
};
diff --git a/chromium/base/threading/thread_local_storage_perftest.cc b/chromium/base/threading/thread_local_storage_perftest.cc
new file mode 100644
index 00000000000..198775c9fa2
--- /dev/null
+++ b/chromium/base/threading/thread_local_storage_perftest.cc
@@ -0,0 +1,200 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <memory>
+#include <vector>
+
+#include "base/barrier_closure.h"
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/callback.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/bind_test_util.h"
+#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/win/windows_types.h"
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+#include <pthread.h>
+#endif
+
+namespace base {
+namespace internal {
+
+namespace {
+
+// A thread that waits for the caller to signal an event before proceeding to
+// call action.Run().
+class TLSThread : public SimpleThread {
+ public:
+ // Creates a PostingThread that waits on |start_event| before calling
+ // action.Run().
+ TLSThread(WaitableEvent* start_event,
+ base::OnceClosure action,
+ base::OnceClosure completion)
+ : SimpleThread("TLSThread"),
+ start_event_(start_event),
+ action_(std::move(action)),
+ completion_(std::move(completion)) {
+ Start();
+ }
+
+ void Run() override {
+ start_event_->Wait();
+ std::move(action_).Run();
+ std::move(completion_).Run();
+ }
+
+ private:
+ WaitableEvent* const start_event_;
+ base::OnceClosure action_;
+ base::OnceClosure completion_;
+
+ DISALLOW_COPY_AND_ASSIGN(TLSThread);
+};
+
+class ThreadLocalStoragePerfTest : public testing::Test {
+ public:
+ protected:
+ ThreadLocalStoragePerfTest() = default;
+ ~ThreadLocalStoragePerfTest() override = default;
+
+ template <class Read, class Write>
+ void Benchmark(const std::string& trace,
+ Read read,
+ Write write,
+ size_t num_operation,
+ size_t num_threads) {
+ write(2);
+
+ BenchmarkImpl("TLS read throughput", trace,
+ base::BindLambdaForTesting([&]() {
+ volatile intptr_t total = 0;
+ for (size_t i = 0; i < num_operation; ++i)
+ total += read();
+ }),
+ num_operation, num_threads);
+
+ BenchmarkImpl("TLS write throughput", trace,
+ base::BindLambdaForTesting([&]() {
+ for (size_t i = 0; i < num_operation; ++i)
+ write(i);
+ }),
+ num_operation, num_threads);
+
+ BenchmarkImpl("TLS read-write throughput", trace,
+ base::BindLambdaForTesting([&]() {
+ for (size_t i = 0; i < num_operation; ++i)
+ write(read() + 1);
+ }),
+ num_operation, num_threads);
+ }
+
+ void BenchmarkImpl(const std::string& measurment,
+ const std::string& trace,
+ base::RepeatingClosure action,
+ size_t num_operation,
+ size_t num_threads) {
+ WaitableEvent start_thread;
+ WaitableEvent complete_thread;
+
+ base::RepeatingClosure done = BarrierClosure(
+ num_threads,
+ base::BindLambdaForTesting([&]() { complete_thread.Signal(); }));
+
+ std::vector<std::unique_ptr<TLSThread>> threads;
+ for (size_t i = 0; i < num_threads; ++i) {
+ threads.emplace_back(
+ std::make_unique<TLSThread>(&start_thread, action, done));
+ }
+
+ TimeTicks operation_start = TimeTicks::Now();
+ start_thread.Signal();
+ complete_thread.Wait();
+ TimeDelta operation_duration = TimeTicks::Now() - operation_start;
+
+ for (auto& thread : threads)
+ thread->Join();
+
+ perf_test::PrintResult(
+ measurment, "", trace,
+ num_operation /
+ static_cast<double>(operation_duration.InMilliseconds()),
+ "operations/ms", true);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalStoragePerfTest);
+};
+
+} // namespace
+
+TEST_F(ThreadLocalStoragePerfTest, ThreadLocalStorage) {
+ ThreadLocalStorage::Slot tls;
+ auto read = [&]() { return reinterpret_cast<intptr_t>(tls.Get()); };
+ auto write = [&](intptr_t value) { tls.Set(reinterpret_cast<void*>(value)); };
+
+ Benchmark("ThreadLocalStorage", read, write, 10000000, 1);
+ Benchmark("ThreadLocalStorage 4 threads", read, write, 10000000, 4);
+}
+
+#if defined(OS_WIN)
+
+void WINAPI destroy(void*) {}
+
+TEST_F(ThreadLocalStoragePerfTest, PlatformFls) {
+ DWORD key = FlsAlloc(destroy);
+ ASSERT_NE(PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key);
+
+ auto read = [&]() { return reinterpret_cast<intptr_t>(FlsGetValue(key)); };
+ auto write = [&](intptr_t value) {
+ FlsSetValue(key, reinterpret_cast<void*>(value));
+ };
+
+ Benchmark("PlatformFls", read, write, 10000000, 1);
+ Benchmark("PlatformFls 4 threads", read, write, 10000000, 4);
+}
+
+TEST_F(ThreadLocalStoragePerfTest, PlatformTls) {
+ DWORD key = TlsAlloc();
+ ASSERT_NE(PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key);
+
+ auto read = [&]() { return reinterpret_cast<intptr_t>(TlsGetValue(key)); };
+ auto write = [&](intptr_t value) {
+ TlsSetValue(key, reinterpret_cast<void*>(value));
+ };
+
+ Benchmark("PlatformTls", read, write, 10000000, 1);
+ Benchmark("PlatformTls 4 threads", read, write, 10000000, 4);
+}
+
+#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
+
+TEST_F(ThreadLocalStoragePerfTest, PlatformTls) {
+ pthread_key_t key;
+ ASSERT_FALSE(pthread_key_create(&key, [](void*) {}));
+ ASSERT_NE(PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES, key);
+
+ auto read = [&]() {
+ return reinterpret_cast<intptr_t>(pthread_getspecific(key));
+ };
+ auto write = [&](intptr_t value) {
+ pthread_setspecific(key, reinterpret_cast<void*>(value));
+ };
+
+ Benchmark("PlatformTls", read, write, 10000000, 1);
+ Benchmark("PlatformTls 4 threads", read, write, 10000000, 4);
+}
+
+#endif
+
+} // namespace internal
+} // namespace base \ No newline at end of file
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index 90304501d61..75c37eab4fe 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -150,6 +150,25 @@ ScopedAllowBaseSyncPrimitivesForTesting::
g_base_sync_primitives_disallowed.Get().Set(was_disallowed_);
}
+ScopedAllowUnresponsiveTasksForTesting::ScopedAllowUnresponsiveTasksForTesting()
+ : was_disallowed_base_sync_(g_base_sync_primitives_disallowed.Get().Get()),
+ was_disallowed_blocking_(g_blocking_disallowed.Get().Get()),
+ was_disallowed_cpu_(g_cpu_intensive_work_disallowed.Get().Get()) {
+ g_base_sync_primitives_disallowed.Get().Set(false);
+ g_blocking_disallowed.Get().Set(false);
+ g_cpu_intensive_work_disallowed.Get().Set(false);
+}
+
+ScopedAllowUnresponsiveTasksForTesting::
+ ~ScopedAllowUnresponsiveTasksForTesting() {
+ DCHECK(!g_base_sync_primitives_disallowed.Get().Get());
+ DCHECK(!g_blocking_disallowed.Get().Get());
+ DCHECK(!g_cpu_intensive_work_disallowed.Get().Get());
+ g_base_sync_primitives_disallowed.Get().Set(was_disallowed_base_sync_);
+ g_blocking_disallowed.Get().Set(was_disallowed_blocking_);
+ g_cpu_intensive_work_disallowed.Get().Set(was_disallowed_cpu_);
+}
+
namespace internal {
void AssertBaseSyncPrimitivesAllowed() {
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 1ec75dd5b93..09e7d73b496 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -115,7 +115,7 @@ class VideoFrameResourceProvider;
}
namespace cc {
class CompletionEvent;
-class SingleThreadTaskGraphRunner;
+class TileTaskManagerImpl;
}
namespace chromeos {
class BlockingMethodCaller;
@@ -137,8 +137,8 @@ class BrowserShutdownProfileDumper;
class BrowserTestBase;
class CategorizedWorkerPool;
class DesktopCaptureDevice;
-class DWriteFontLookupTableBuilder;
class GpuProcessTransportFactory;
+class InProcessUtilityThread;
class NestedMessagePumpAndroid;
class RenderWidgetHostViewMac;
class RTCVideoDecoder;
@@ -395,7 +395,6 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitives {
friend class chrome_cleaner::SystemReportComponent;
friend class content::BrowserMainLoop;
friend class content::BrowserProcessSubThread;
- friend class content::DWriteFontLookupTableBuilder;
friend class content::ServiceWorkerContextClient;
friend class content::SessionStorageDatabase;
friend class functions::ExecScriptScopedAllowBaseSyncPrimitives;
@@ -454,7 +453,10 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {
friend class base::MessageLoopImpl;
friend class base::ScopedAllowThreadRecallForStackSamplingProfiler;
friend class base::StackSamplingProfiler;
+ friend class cc::TileTaskManagerImpl;
+ friend class content::CategorizedWorkerPool;
friend class content::DesktopCaptureDevice;
+ friend class content::InProcessUtilityThread;
friend class content::RTCVideoDecoder;
friend class content::RTCVideoDecoderAdapter;
friend class content::RTCVideoEncoder;
@@ -474,10 +476,8 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {
friend class ::chromeos::BlockingMethodCaller; // http://crbug.com/125360
friend class base::Thread; // http://crbug.com/918039
friend class cc::CompletionEvent; // http://crbug.com/902653
- friend class cc::SingleThreadTaskGraphRunner; // http://crbug.com/902823
friend class content::
BrowserGpuChannelHostFactory; // http://crbug.com/125248
- friend class content::CategorizedWorkerPool; // http://crbug.com/902823
friend class dbus::Bus; // http://crbug.com/125222
friend class disk_cache::BackendImpl; // http://crbug.com/74623
friend class disk_cache::InFlightIO; // http://crbug.com/74623
@@ -522,6 +522,23 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesForTesting {
DISALLOW_COPY_AND_ASSIGN(ScopedAllowBaseSyncPrimitivesForTesting);
};
+// Counterpart to base::DisallowUnresponsiveTasks() for tests to allow them to
+// block their thread after it was banned.
+class BASE_EXPORT ScopedAllowUnresponsiveTasksForTesting {
+ public:
+ ScopedAllowUnresponsiveTasksForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+ ~ScopedAllowUnresponsiveTasksForTesting() EMPTY_BODY_IF_DCHECK_IS_OFF;
+
+ private:
+#if DCHECK_IS_ON()
+ const bool was_disallowed_base_sync_;
+ const bool was_disallowed_blocking_;
+ const bool was_disallowed_cpu_;
+#endif
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedAllowUnresponsiveTasksForTesting);
+};
+
namespace internal {
// Asserts that waiting on a //base sync primitive is allowed in the current
diff --git a/chromium/base/time/default_clock.cc b/chromium/base/time/default_clock.cc
index aa08f52bfef..001357e36ca 100644
--- a/chromium/base/time/default_clock.cc
+++ b/chromium/base/time/default_clock.cc
@@ -4,7 +4,7 @@
#include "base/time/default_clock.h"
-#include "base/lazy_instance.h"
+#include "base/no_destructor.h"
namespace base {
@@ -16,8 +16,8 @@ Time DefaultClock::Now() const {
// static
DefaultClock* DefaultClock::GetInstance() {
- static LazyInstance<DefaultClock>::Leaky instance = LAZY_INSTANCE_INITIALIZER;
- return instance.Pointer();
+ static base::NoDestructor<DefaultClock> instance;
+ return instance.get();
}
} // namespace base
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index 661b5661426..e5d3cf7a457 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -130,6 +130,9 @@ class BASE_EXPORT TimeDelta {
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
static TimeDelta FromTimeSpec(const timespec& ts);
#endif
+#if defined(OS_FUCHSIA)
+ static TimeDelta FromZxDuration(zx_duration_t nanos);
+#endif
// Converts an integer value representing TimeDelta to a class. This is used
// when deserializing a |TimeDelta| structure, using a value known to be
@@ -182,6 +185,9 @@ class BASE_EXPORT TimeDelta {
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
struct timespec ToTimeSpec() const;
#endif
+#if defined(OS_FUCHSIA)
+ zx_duration_t ToZxDuration() const;
+#endif
// Returns the time delta in some unit. The InXYZF versions return a floating
// point value. The InXYZ versions return a truncated value (aka rounded
@@ -579,6 +585,11 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
struct timeval ToTimeVal() const;
#endif
+#if defined(OS_FUCHSIA)
+ static Time FromZxTime(zx_time_t time);
+ zx_time_t ToZxTime() const;
+#endif
+
#if defined(OS_MACOSX)
static Time FromCFAbsoluteTime(CFAbsoluteTime t);
CFAbsoluteTime ToCFAbsoluteTime() const;
diff --git a/chromium/base/time/time_exploded_posix.cc b/chromium/base/time/time_exploded_posix.cc
index a80d4cb4e60..0655703a1f4 100644
--- a/chromium/base/time/time_exploded_posix.cc
+++ b/chromium/base/time/time_exploded_posix.cc
@@ -24,18 +24,28 @@
#include "base/os_compat_nacl.h"
#endif
+#if defined(OS_FUCHSIA)
+#include <fuchsia/deprecatedtimezone/cpp/fidl.h>
+#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/service_directory_client.h"
+#include "base/no_destructor.h"
+#include "base/numerics/clamped_math.h"
+#endif
+
#if defined(OS_MACOSX)
static_assert(sizeof(time_t) >= 8, "Y2038 problem!");
#endif
namespace {
+#if !defined(OS_FUCHSIA)
// This prevents a crash on traversing the environment global and looking up
// the 'TZ' variable in libc. See: crbug.com/390567.
base::Lock* GetSysTimeToTimeStructLock() {
static auto* lock = new base::Lock();
return lock;
}
+#endif // !defined(OS_FUCHSIA)
// Define a system-specific SysTime that wraps either to a time_t or
// a time64_t depending on the host system, and associated convertion.
@@ -59,6 +69,70 @@ void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
gmtime64_r(&t, timestruct);
}
+#elif defined(OS_FUCHSIA)
+typedef time_t SysTime;
+
+SysTime GetTimezoneOffset(SysTime utc_time) {
+ static base::NoDestructor<fuchsia::deprecatedtimezone::TimezoneSyncPtr>
+ timezone(
+ base::fuchsia::ServiceDirectoryClient::ForCurrentProcess()
+ ->ConnectToServiceSync<fuchsia::deprecatedtimezone::Timezone>());
+
+ int64_t milliseconds_since_epoch =
+ base::ClampMul(utc_time, base::Time::kMillisecondsPerSecond);
+ int32_t local_offset_minutes = 0;
+ int32_t dst_offset_minutes = 0;
+ zx_status_t status = (*timezone.get())
+ ->GetTimezoneOffsetMinutes(milliseconds_since_epoch,
+ &local_offset_minutes,
+ &dst_offset_minutes);
+ if (status != ZX_OK) {
+ ZX_DLOG(ERROR, status) << "Failed to get current timezone offset.";
+ return 0;
+ }
+ return (local_offset_minutes + dst_offset_minutes) *
+ base::Time::kSecondsPerMinute;
+}
+
+SysTime SysTimeFromTimeStruct(struct tm* timestruct, bool is_local) {
+ SysTime result = timegm(timestruct);
+ if (is_local) {
+ // Local->UTC conversion may be ambiguous, particularly when local clock is
+ // changed back (e.g. in when DST ends). In such cases there are 2 correct
+ // results and this function will return one of them. Also some local time
+ // values may be invalid. Specifically when local time is rolled forward
+ // (when DST starts) the values in the transitional period are invalid and
+ // don't have corresponding values in the UTC timeline. In those cases using
+ // timezone offset either before or after transition is acceptable.
+ //
+ // fuchsia::deprecatedtimezone API returns offset based on UTC time. It may
+ // return incorrect result when called with a value that also includes
+ // timezone offset. Particularly this is a problem when the time is close to
+ // DST transitions. For example, when transitioning from PST (UTC-8,
+ // non-DST) to PDT (UTC-7, DST) GetTimezoneOffset(local_time) will return a
+ // value that's off by 1 hour for 8 hours after the transition. To avoid
+ // this problem the offset is estimated as GetTimezoneOffset(local_time)
+ // from which |approx_utc_time| is calculated. Then
+ // GetTimezoneOffset(approx_utc_time) is used to calculate the actual
+ // offset. This works correctly assuming timezone transition can happen at
+ // most once per day. When both before and after offsets are in the [-1H,
+ // 1H] range then the |approx_utc_time| is correct (see the note above for
+ // definition of what is considered correct). Otherwise |approx_utc_time|
+ // may be off by 1 hour. In those cases GetTimezoneOffset(approx_utc_time)
+ // will return correct offset because we can assume there are no timezone
+ // changes in the [UTC-1H, UTC+1H] period (the transition is scheduled
+ // either before UTC-1H or after UTC+1H).
+ int64_t approx_utc_time = result - GetTimezoneOffset(result);
+ result -= GetTimezoneOffset(approx_utc_time);
+ }
+ return result;
+}
+
+void SysTimeToTimeStruct(SysTime t, struct tm* timestruct, bool is_local) {
+ if (is_local)
+ t += GetTimezoneOffset(t);
+ gmtime_r(&t, timestruct);
+}
#elif defined(OS_AIX)
// The function timegm is not available on AIX.
diff --git a/chromium/base/time/time_fuchsia.cc b/chromium/base/time/time_fuchsia.cc
index 5b896b3f3f7..29c5f72f9a3 100644
--- a/chromium/base/time/time_fuchsia.cc
+++ b/chromium/base/time/time_fuchsia.cc
@@ -6,37 +6,20 @@
#include <zircon/syscalls.h>
-#include "base/compiler_specific.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/numerics/checked_math.h"
#include "base/time/time_override.h"
namespace base {
-namespace {
-
-// Helper function to map an unsigned integer with nanosecond timebase to a
-// signed integer with microsecond timebase.
-ALWAYS_INLINE int64_t ZxTimeToMicroseconds(zx_time_t nanos) {
- const zx_time_t micros =
- nanos / static_cast<zx_time_t>(base::Time::kNanosecondsPerMicrosecond);
- return static_cast<int64_t>(micros);
-}
-
-} // namespace
-
// Time -----------------------------------------------------------------------
namespace subtle {
Time TimeNowIgnoringOverride() {
zx_time_t nanos_since_unix_epoch;
- zx_status_t status = zx_clock_get_new(ZX_CLOCK_UTC, &nanos_since_unix_epoch);
+ zx_status_t status = zx_clock_get(ZX_CLOCK_UTC, &nanos_since_unix_epoch);
ZX_CHECK(status == ZX_OK, status);
- DCHECK(nanos_since_unix_epoch != 0);
// The following expression will overflow in the year 289938 A.D.:
- return Time() + TimeDelta::FromMicroseconds(
- ZxTimeToMicroseconds(nanos_since_unix_epoch) +
- Time::kTimeTToMicrosecondsOffset);
+ return Time::FromZxTime(nanos_since_unix_epoch);
}
Time TimeNowFromSystemTimeIgnoringOverride() {
@@ -51,12 +34,29 @@ namespace subtle {
TimeTicks TimeTicksNowIgnoringOverride() {
const zx_time_t nanos_since_boot = zx_clock_get_monotonic();
CHECK(nanos_since_boot != 0);
- return TimeTicks() +
- TimeDelta::FromMicroseconds(ZxTimeToMicroseconds(nanos_since_boot));
+ return TimeTicks::FromZxTime(nanos_since_boot);
}
} // namespace subtle
// static
+TimeDelta TimeDelta::FromZxDuration(zx_duration_t nanos) {
+ return TimeDelta::FromNanoseconds(nanos);
+}
+
+zx_duration_t TimeDelta::ToZxDuration() const {
+ return InNanoseconds();
+}
+
+// static
+Time Time::FromZxTime(zx_time_t nanos_since_unix_epoch) {
+ return Time::UnixEpoch() + TimeDelta::FromNanoseconds(nanos_since_unix_epoch);
+}
+
+zx_time_t Time::ToZxTime() const {
+ return (*this - Time::UnixEpoch()).InNanoseconds();
+}
+
+// static
TimeTicks::Clock TimeTicks::GetClock() {
return Clock::FUCHSIA_ZX_CLOCK_MONOTONIC;
}
@@ -73,13 +73,11 @@ bool TimeTicks::IsConsistentAcrossProcesses() {
// static
TimeTicks TimeTicks::FromZxTime(zx_time_t nanos_since_boot) {
- return TimeTicks(ZxTimeToMicroseconds(nanos_since_boot));
+ return TimeTicks() + TimeDelta::FromNanoseconds(nanos_since_boot);
}
zx_time_t TimeTicks::ToZxTime() const {
- CheckedNumeric<zx_time_t> result(base::Time::kNanosecondsPerMicrosecond);
- result *= us_;
- return result.ValueOrDie();
+ return (*this - TimeTicks()).InNanoseconds();
}
// ThreadTicks ----------------------------------------------------------------
@@ -88,11 +86,10 @@ namespace subtle {
ThreadTicks ThreadTicksNowIgnoringOverride() {
zx_time_t nanos_since_thread_started;
zx_status_t status =
- zx_clock_get_new(ZX_CLOCK_THREAD, &nanos_since_thread_started);
+ zx_clock_get(ZX_CLOCK_THREAD, &nanos_since_thread_started);
ZX_CHECK(status == ZX_OK, status);
DCHECK(nanos_since_thread_started != 0);
- return ThreadTicks() + TimeDelta::FromMicroseconds(
- ZxTimeToMicroseconds(nanos_since_thread_started));
+ return ThreadTicks() + TimeDelta::FromNanoseconds(nanos_since_thread_started);
}
} // namespace subtle
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index 30ff6fdaf90..5e8bad99065 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -813,6 +813,28 @@ TEST_F(TimeTest, NowOverride) {
EXPECT_GT(Time::Max(), subtle::TimeNowFromSystemTimeIgnoringOverride());
}
+#if defined(OS_FUCHSIA)
+TEST(ZxTimeTest, ToFromConversions) {
+ Time unix_epoch = Time::UnixEpoch();
+ EXPECT_EQ(unix_epoch.ToZxTime(), 0);
+ EXPECT_EQ(Time::FromZxTime(6000000000),
+ unix_epoch + TimeDelta::FromSeconds(6));
+
+ TimeTicks ticks_now = TimeTicks::Now();
+ EXPECT_GE(ticks_now.ToZxTime(), 0);
+ TimeTicks ticks_later = ticks_now + TimeDelta::FromSeconds(2);
+ EXPECT_EQ((ticks_later.ToZxTime() - ticks_now.ToZxTime()), 2000000000);
+ EXPECT_EQ(TimeTicks::FromZxTime(3000000000),
+ TimeTicks() + TimeDelta::FromSeconds(3));
+
+ EXPECT_EQ(TimeDelta().ToZxDuration(), 0);
+ EXPECT_EQ(TimeDelta::FromZxDuration(0), TimeDelta());
+
+ EXPECT_EQ(TimeDelta::FromSeconds(2).ToZxDuration(), 2000000000);
+ EXPECT_EQ(TimeDelta::FromZxDuration(4000000000), TimeDelta::FromSeconds(4));
+}
+#endif // defined(OS_FUCHSIA)
+
TEST(TimeTicks, Deltas) {
for (int index = 0; index < 50; index++) {
TimeTicks ticks_start = TimeTicks::Now();
diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc
index ffb432910bf..9bfdba08f8f 100644
--- a/chromium/base/time/time_win.cc
+++ b/chromium/base/time/time_win.cc
@@ -31,6 +31,7 @@
// will only increase the system-wide timer if we're not running on battery
// power.
+#include "base/feature_list.h"
#include "base/time/time.h"
#include <windows.h>
@@ -84,12 +85,28 @@ void InitializeClock() {
g_initial_time = CurrentWallclockMicroseconds();
}
+const base::Feature kSlowDCTimerInterruptsFeature{
+ "SlowDCTimerInterrups", base::FEATURE_DISABLED_BY_DEFAULT};
+
// The two values that ActivateHighResolutionTimer uses to set the systemwide
// timer interrupt frequency on Windows. It controls how precise timers are
// but also has a big impact on battery life.
-const int kMinTimerIntervalHighResMs = 1;
-const int kMinTimerIntervalLowResMs = 4;
-// Track if kMinTimerIntervalHighResMs or kMinTimerIntervalLowResMs is active.
+// Used when running on AC power - plugged in - when a fast timer is wanted.
+UINT MinTimerIntervalHighResMs() {
+ return 1;
+}
+
+UINT MinTimerIntervalLowResMs() {
+ // Traditionally Chrome has used an interval of 4 ms when raising the timer
+ // interrupt frequency on battery power. However even 4 ms is too short an
+ // interval on modern CPUs - it wastes non-trivial power - so this experiment
+ // tests an interval of 8 ms, recommended by Intel.
+ static const UINT s_interval =
+ base::FeatureList::IsEnabled(kSlowDCTimerInterruptsFeature) ? 8 : 4;
+ return s_interval;
+}
+
+// Track if MinTimerIntervalHighResMs() or MinTimerIntervalLowResMs() is active.
bool g_high_res_timer_enabled = false;
// How many times the high resolution timer has been called.
uint32_t g_high_res_timer_count = 0;
@@ -204,11 +221,11 @@ void Time::EnableHighResolutionTimer(bool enable) {
// call timeEndPeriod with the same value used in timeBeginPeriod and
// therefore undo the period effect.
if (enable) {
- timeEndPeriod(kMinTimerIntervalLowResMs);
- timeBeginPeriod(kMinTimerIntervalHighResMs);
+ timeEndPeriod(MinTimerIntervalLowResMs());
+ timeBeginPeriod(MinTimerIntervalHighResMs());
} else {
- timeEndPeriod(kMinTimerIntervalHighResMs);
- timeBeginPeriod(kMinTimerIntervalLowResMs);
+ timeEndPeriod(MinTimerIntervalHighResMs());
+ timeBeginPeriod(MinTimerIntervalLowResMs());
}
}
@@ -220,8 +237,8 @@ bool Time::ActivateHighResolutionTimer(bool activating) {
const uint32_t max = std::numeric_limits<uint32_t>::max();
AutoLock lock(*GetHighResLock());
- UINT period = g_high_res_timer_enabled ? kMinTimerIntervalHighResMs
- : kMinTimerIntervalLowResMs;
+ UINT period = g_high_res_timer_enabled ? MinTimerIntervalHighResMs()
+ : MinTimerIntervalLowResMs();
if (activating) {
DCHECK_NE(g_high_res_timer_count, max);
++g_high_res_timer_count;
@@ -238,7 +255,7 @@ bool Time::ActivateHighResolutionTimer(bool activating) {
timeEndPeriod(period);
}
}
- return (period == kMinTimerIntervalHighResMs);
+ return period == MinTimerIntervalHighResMs();
}
// static
diff --git a/chromium/base/timer/hi_res_timer_manager_unittest.cc b/chromium/base/timer/hi_res_timer_manager_unittest.cc
index 43f607adb7e..c591993a852 100644
--- a/chromium/base/timer/hi_res_timer_manager_unittest.cc
+++ b/chromium/base/timer/hi_res_timer_manager_unittest.cc
@@ -23,39 +23,40 @@ TEST(HiResTimerManagerTest, ToggleOnOff) {
// goes in or out of AC power.
test::ScopedTaskEnvironment scoped_task_environment(
test::ScopedTaskEnvironment::MainThreadType::UI);
- std::unique_ptr<base::PowerMonitorSource> power_monitor_source(
- new base::PowerMonitorDeviceSource());
- std::unique_ptr<base::PowerMonitor> power_monitor(
- new base::PowerMonitor(std::move(power_monitor_source)));
-
- HighResolutionTimerManager manager;
- // Simulate a on-AC power event to get to a known initial state.
- manager.OnPowerStateChange(false);
-
- // Loop a few times to test power toggling.
- for (int times = 0; times != 3; ++times) {
- // The manager has the high resolution clock enabled now.
- EXPECT_TRUE(manager.hi_res_clock_available());
- // But the Time class has it off, because it hasn't been activated.
- EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
-
- // Activate the high resolution timer.
- base::Time::ActivateHighResolutionTimer(true);
- EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
-
- // Simulate a on-battery power event.
- manager.OnPowerStateChange(true);
- EXPECT_FALSE(manager.hi_res_clock_available());
- EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
-
- // Back to on-AC power.
+ base::PowerMonitor::Initialize(
+ std::make_unique<base::PowerMonitorDeviceSource>());
+
+ {
+ HighResolutionTimerManager manager;
+ // Simulate a on-AC power event to get to a known initial state.
manager.OnPowerStateChange(false);
- EXPECT_TRUE(manager.hi_res_clock_available());
- EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
- // De-activate the high resolution timer.
- base::Time::ActivateHighResolutionTimer(false);
+ // Loop a few times to test power toggling.
+ for (int times = 0; times != 3; ++times) {
+ // The manager has the high resolution clock enabled now.
+ EXPECT_TRUE(manager.hi_res_clock_available());
+ // But the Time class has it off, because it hasn't been activated.
+ EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+ // Activate the high resolution timer.
+ base::Time::ActivateHighResolutionTimer(true);
+ EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+ // Simulate a on-battery power event.
+ manager.OnPowerStateChange(true);
+ EXPECT_FALSE(manager.hi_res_clock_available());
+ EXPECT_FALSE(base::Time::IsHighResolutionTimerInUse());
+
+ // Back to on-AC power.
+ manager.OnPowerStateChange(false);
+ EXPECT_TRUE(manager.hi_res_clock_available());
+ EXPECT_TRUE(base::Time::IsHighResolutionTimerInUse());
+
+ // De-activate the high resolution timer.
+ base::Time::ActivateHighResolutionTimer(false);
+ }
}
+ base::PowerMonitor::ShutdownForTesting();
}
#endif // defined(OS_WIN)
diff --git a/chromium/base/timer/hi_res_timer_manager_win.cc b/chromium/base/timer/hi_res_timer_manager_win.cc
index 8d88179d4b6..911eb925283 100644
--- a/chromium/base/timer/hi_res_timer_manager_win.cc
+++ b/chromium/base/timer/hi_res_timer_manager_win.cc
@@ -29,10 +29,9 @@ void ReportHighResolutionTimerUsage() {
HighResolutionTimerManager::HighResolutionTimerManager()
: hi_res_clock_available_(false) {
- PowerMonitor* power_monitor = PowerMonitor::Get();
- DCHECK(power_monitor != NULL);
- power_monitor->AddObserver(this);
- UseHiResClock(!power_monitor->IsOnBatteryPower());
+ DCHECK(PowerMonitor::IsInitialized());
+ PowerMonitor::AddObserver(this);
+ UseHiResClock(!PowerMonitor::IsOnBatteryPower());
// Start polling the high resolution timer usage.
Time::ResetHighResolutionTimerUsage();
@@ -41,7 +40,7 @@ HighResolutionTimerManager::HighResolutionTimerManager()
}
HighResolutionTimerManager::~HighResolutionTimerManager() {
- PowerMonitor::Get()->RemoveObserver(this);
+ PowerMonitor::RemoveObserver(this);
UseHiResClock(false);
}
diff --git a/chromium/base/timer/lap_timer_unittest.cc b/chromium/base/timer/lap_timer_unittest.cc
index b45ab393dab..beed55cb593 100644
--- a/chromium/base/timer/lap_timer_unittest.cc
+++ b/chromium/base/timer/lap_timer_unittest.cc
@@ -27,11 +27,7 @@ constexpr int kTimeCheckInterval = 10;
TEST(LapTimer, UsageExample) {
ScopedTaskEnvironment scoped_task_environment(
- ScopedTaskEnvironment::MainThreadType::MOCK_TIME,
- ScopedTaskEnvironment::NowSource::MAIN_THREAD_MOCK_TIME);
-
- // Advance time a little bit so that TimeTicks::Now().is_null() becomes false.
- scoped_task_environment.FastForwardBy(kTimeAdvance);
+ ScopedTaskEnvironment::TimeSource::MOCK_TIME_AND_NOW);
LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval);
@@ -57,8 +53,7 @@ TEST(LapTimer, UsageExample) {
// iOS simulator does not support using ThreadTicks.
TEST(LapTimer, ThreadTicksUsageExample) {
ScopedTaskEnvironment scoped_task_environment(
- ScopedTaskEnvironment::MainThreadType::MOCK_TIME,
- ScopedTaskEnvironment::NowSource::MAIN_THREAD_MOCK_TIME);
+ ScopedTaskEnvironment::TimeSource::MOCK_TIME_AND_NOW);
LapTimer timer(kWarmupRuns, kTimeLimit, kTimeCheckInterval,
LapTimer::TimerMethod::kUseThreadTicks);
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index fbc2d91af7b..13c5cf3c6ea 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -114,13 +114,12 @@ class BASE_EXPORT TimerBase {
// this Timer is running. This method can only be called while this Timer
// isn't running. This is an alternative (old) approach to mock time in tests.
// The modern and preferred approach is to use
- // ScopedTaskEnvironment::MainThreadType::MOCK_TIME
- // (ScopedTaskEnvironment::NowSource::MAIN_THREAD_MOCK_TIME may also be useful
- // if the Timer is ever restarted). To avoid racy usage of Timer,
+ // ScopedTaskEnvironment::TimeSource::MOCK_TIME_AND_NOW (more reliable than
+ // TimeSource::MOCK_TIME if the Timer is ever restarted and needs to compare
+ // with the current TimeTicks::Now()). To avoid racy usage of Timer,
// |task_runner| must run tasks on the same sequence which this Timer is bound
- // to (started from).
- // TODO(gab): Migrate all callers to
- // ScopedTaskEnvironment::MainThreadType::MOCK_TIME.
+ // to (started from). TODO(gab): Migrate all callers to
+ // ScopedTaskEnvironment::TimeSource::MOCK_TIME_AND_NOW.
virtual void SetTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner);
// Call this method to stop and cancel the timer. It is a no-op if the timer
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index 7129fea7b1a..ff6cbbe3116 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -438,7 +438,7 @@ TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
TEST(TimerTest, OneShotTimerWithTickClock) {
test::ScopedTaskEnvironment scoped_task_environment(
- test::ScopedTaskEnvironment::MainThreadType::MOCK_TIME);
+ test::ScopedTaskEnvironment::TimeSource::MOCK_TIME);
Receiver receiver;
OneShotTimer timer(scoped_task_environment.GetMockTickClock());
timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
@@ -465,7 +465,7 @@ TEST_P(TimerTestWithThreadType, RepeatingTimerZeroDelay_Cancel) {
TEST(TimerTest, RepeatingTimerWithTickClock) {
test::ScopedTaskEnvironment scoped_task_environment(
- test::ScopedTaskEnvironment::MainThreadType::MOCK_TIME);
+ test::ScopedTaskEnvironment::TimeSource::MOCK_TIME);
Receiver receiver;
const int expected_times_called = 10;
RepeatingTimer timer(scoped_task_environment.GetMockTickClock());
@@ -496,7 +496,7 @@ TEST_P(TimerTestWithThreadType, DelayTimer_Deleted) {
TEST(TimerTest, DelayTimerWithTickClock) {
test::ScopedTaskEnvironment scoped_task_environment(
- test::ScopedTaskEnvironment::MainThreadType::MOCK_TIME);
+ test::ScopedTaskEnvironment::TimeSource::MOCK_TIME);
Receiver receiver;
DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
&Receiver::OnCalled,
diff --git a/chromium/base/trace_event/blame_context.cc b/chromium/base/trace_event/blame_context.cc
index b46412c1db9..3c5f32ab464 100644
--- a/chromium/base/trace_event/blame_context.cc
+++ b/chromium/base/trace_event/blame_context.cc
@@ -24,8 +24,7 @@ BlameContext::BlameContext(const char* category,
id_(id),
parent_scope_(parent_context ? parent_context->scope() : nullptr),
parent_id_(parent_context ? parent_context->id() : 0),
- category_group_enabled_(nullptr),
- weak_factory_(this) {
+ category_group_enabled_(nullptr) {
DCHECK(!parent_context || !std::strcmp(name_, parent_context->name()))
<< "Parent blame context must have the same name";
}
diff --git a/chromium/base/trace_event/blame_context.h b/chromium/base/trace_event/blame_context.h
index a973a28fd28..4da9e82b779 100644
--- a/chromium/base/trace_event/blame_context.h
+++ b/chromium/base/trace_event/blame_context.h
@@ -127,7 +127,7 @@ class BASE_EXPORT BlameContext
const unsigned char* category_group_enabled_;
ThreadChecker thread_checker_;
- WeakPtrFactory<BlameContext> weak_factory_;
+ WeakPtrFactory<BlameContext> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(BlameContext);
};
diff --git a/chromium/base/trace_event/builtin_categories.h b/chromium/base/trace_event/builtin_categories.h
index 97055b4bdaf..7c0723ffd49 100644
--- a/chromium/base/trace_event/builtin_categories.h
+++ b/chromium/base/trace_event/builtin_categories.h
@@ -136,7 +136,6 @@
X("viz") \
X("vk") \
X("wayland") \
- X("Wayland") \
X("webaudio") \
X("WebCore") \
X("webrtc") \
@@ -155,7 +154,6 @@
X(TRACE_DISABLED_BY_DEFAULT("cc.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.cdp-perf")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.display_items")) \
- X(TRACE_DISABLED_BY_DEFAULT("cc.debug.ipc")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.picture")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.scheduler")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.scheduler.frames")) \
@@ -178,6 +176,7 @@
X(TRACE_DISABLED_BY_DEFAULT("gpu.service")) \
X(TRACE_DISABLED_BY_DEFAULT("ipc.flow")) \
X(TRACE_DISABLED_BY_DEFAULT("layer-element")) \
+ X(TRACE_DISABLED_BY_DEFAULT("lifecycles")) \
X(TRACE_DISABLED_BY_DEFAULT("loading")) \
X(TRACE_DISABLED_BY_DEFAULT("memory-infra")) \
X(TRACE_DISABLED_BY_DEFAULT("memory-infra.v8.code_stats")) \
diff --git a/chromium/base/trace_event/common/trace_event_common.h b/chromium/base/trace_event/common/trace_event_common.h
index f1878a18da9..cbd80fcce81 100644
--- a/chromium/base/trace_event/common/trace_event_common.h
+++ b/chromium/base/trace_event/common/trace_event_common.h
@@ -256,6 +256,13 @@
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
TRACE_EVENT_FLAG_COPY | scope, arg1_name, arg1_val, \
arg2_name, arg2_val)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS0(category_group, name, scope_and_flags) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ scope_and_flags)
+#define TRACE_EVENT_INSTANT_WITH_FLAGS1(category_group, name, scope_and_flags, \
+ arg1_name, arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_INSTANT, category_group, name, \
+ scope_and_flags, arg1_name, arg1_val)
#define TRACE_EVENT_INSTANT_WITH_TIMESTAMP0(category_group, name, scope, \
timestamp) \
@@ -285,12 +292,12 @@
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_BEGIN0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
- TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_BEGIN1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
- TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS0(category_group, name, flags) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, flags)
+#define TRACE_EVENT_BEGIN_WITH_FLAGS1(category_group, name, flags, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
+ flags, arg1_name, arg1_val)
#define TRACE_EVENT_COPY_BEGIN2(category_group, name, arg1_name, arg1_val, \
arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_BEGIN, category_group, name, \
@@ -341,12 +348,12 @@
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
TRACE_EVENT_FLAG_NONE, arg1_name, arg1_val, \
arg2_name, arg2_val)
-#define TRACE_EVENT_COPY_END0(category_group, name) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
- TRACE_EVENT_FLAG_COPY)
-#define TRACE_EVENT_COPY_END1(category_group, name, arg1_name, arg1_val) \
- INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
- TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val)
+#define TRACE_EVENT_END_WITH_FLAGS0(category_group, name, flags) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags)
+#define TRACE_EVENT_END_WITH_FLAGS1(category_group, name, flags, arg1_name, \
+ arg1_val) \
+ INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, flags, \
+ arg1_name, arg1_val)
#define TRACE_EVENT_COPY_END2(category_group, name, arg1_name, arg1_val, \
arg2_name, arg2_val) \
INTERNAL_TRACE_EVENT_ADD(TRACE_EVENT_PHASE_END, category_group, name, \
@@ -580,6 +587,9 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_FLAGS0(category_group, name, id, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_BEGIN, \
+ category_group, name, id, flags)
// Similar to TRACE_EVENT_ASYNC_BEGINx but with a custom |at| timestamp
// provided.
@@ -606,6 +616,11 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP_AND_FLAGS0( \
+ category_group, name, id, timestamp, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
// Records a single ASYNC_STEP_INTO event for |step| immediately. If the
// category is not enabled, then this does nothing. The |name| and |id| must
@@ -677,6 +692,9 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID( \
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_FLAG_COPY, arg1_name, arg1_val, arg2_name, arg2_val)
+#define TRACE_EVENT_ASYNC_END_WITH_FLAGS0(category_group, name, id, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID(TRACE_EVENT_PHASE_ASYNC_END, \
+ category_group, name, id, flags)
// Similar to TRACE_EVENT_ASYNC_ENDx but with a custom |at| timestamp provided.
#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP0(category_group, name, id, \
@@ -702,6 +720,11 @@
INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, TRACE_EVENT_FLAG_COPY)
+#define TRACE_EVENT_ASYNC_END_WITH_TIMESTAMP_AND_FLAGS0(category_group, name, \
+ id, timestamp, flags) \
+ INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ TRACE_EVENT_PHASE_ASYNC_END, category_group, name, id, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, flags)
// NESTABLE_ASYNC_* APIs are used to describe an async operation, which can
// be nested within a NESTABLE_ASYNC event and/or have inner NESTABLE_ASYNC
@@ -1075,6 +1098,8 @@
// TODO(eseckler): Remove once we have native support for typed proto events in
// TRACE_EVENT macros.
#define TRACE_EVENT_FLAG_TYPED_PROTO_ARGS (static_cast<unsigned int>(1 << 15))
+#define TRACE_EVENT_FLAG_JAVA_STRING_LITERALS \
+ (static_cast<unsigned int>(1 << 16))
#define TRACE_EVENT_FLAG_SCOPE_MASK \
(static_cast<unsigned int>(TRACE_EVENT_FLAG_SCOPE_OFFSET | \
diff --git a/chromium/base/trace_event/cpufreq_monitor_android.cc b/chromium/base/trace_event/cpufreq_monitor_android.cc
index bc5ebb4eb2a..ab3be5a1d7a 100644
--- a/chromium/base/trace_event/cpufreq_monitor_android.cc
+++ b/chromium/base/trace_event/cpufreq_monitor_android.cc
@@ -109,8 +109,9 @@ void CPUFreqMonitorDelegate::RecordFrequency(unsigned int cpu_id,
scoped_refptr<SingleThreadTaskRunner>
CPUFreqMonitorDelegate::CreateTaskRunner() {
- return base::CreateSingleThreadTaskRunnerWithTraits(
- {base::MayBlock(), base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
+ return base::CreateSingleThreadTaskRunner(
+ {base::ThreadPool(), base::MayBlock(),
+ base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
base::TaskPriority::BEST_EFFORT},
base::SingleThreadTaskRunnerThreadMode::SHARED);
}
diff --git a/chromium/base/trace_event/event_name_filter_unittest.cc b/chromium/base/trace_event/event_name_filter_unittest.cc
index d0b32d11dae..230e2742458 100644
--- a/chromium/base/trace_event/event_name_filter_unittest.cc
+++ b/chromium/base/trace_event/event_name_filter_unittest.cc
@@ -5,6 +5,7 @@
#include "base/trace_event/event_name_filter.h"
#include "base/memory/ptr_util.h"
+#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_event_impl.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -13,8 +14,8 @@ namespace trace_event {
const TraceEvent& MakeTraceEvent(const char* name) {
static TraceEvent event;
- event.Reset(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
- nullptr, 0);
+ event.Reset(0, TimeTicks(), ThreadTicks(), ThreadInstructionCount(), 'b',
+ nullptr, name, "", 0, 0, nullptr, 0);
return event;
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index 7da727ace1e..ef48f68f343 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -97,6 +97,7 @@ AllocationContextTracker::AllocationContextTracker()
: thread_name_(nullptr), ignore_scope_depth_(0) {
tracked_stack_.reserve(kMaxStackDepth);
task_contexts_.reserve(kMaxTaskDepth);
+ task_contexts_.push_back("UntrackedTask");
}
AllocationContextTracker::~AllocationContextTracker() = default;
@@ -163,8 +164,8 @@ void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the context was never pushed, so it is possible that pop is called
- // on an empty stack.
- if (task_contexts_.empty())
+ // on an empty stack. Note that the context always contains "UntrackedTask".
+ if (task_contexts_.size() == 1)
return;
DCHECK_EQ(context, task_contexts_.back())
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index c26149efaa7..b05d263aa88 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -8,6 +8,7 @@
#include "base/memory/ref_counted.h"
#include "base/pending_task.h"
+#include "base/strings/string_piece.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
@@ -334,7 +335,7 @@ TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
AllocationContext ctx;
ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
->GetContextSnapshot(&ctx));
- ASSERT_FALSE(ctx.type_name);
+ ASSERT_EQ("UntrackedTask", base::StringPiece(ctx.type_name));
}
TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
diff --git a/chromium/base/trace_event/memory_allocator_dump_unittest.cc b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
index 67ea455673c..1e8bf4c576b 100644
--- a/chromium/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/chromium/base/trace_event/memory_allocator_dump_unittest.cc
@@ -21,7 +21,6 @@ using testing::ElementsAre;
using testing::Eq;
using testing::ByRef;
using testing::IsEmpty;
-using testing::Contains;
namespace base {
namespace trace_event {
@@ -62,7 +61,7 @@ void CheckString(const MemoryAllocatorDump* dump,
const char* expected_units,
const std::string& expected_value) {
MemoryAllocatorDump::Entry expected(name, expected_units, expected_value);
- EXPECT_THAT(dump->entries(), Contains(Eq(ByRef(expected))));
+ EXPECT_THAT(dump->entries(), testing::Contains(Eq(ByRef(expected))));
}
void CheckScalar(const MemoryAllocatorDump* dump,
@@ -70,7 +69,7 @@ void CheckScalar(const MemoryAllocatorDump* dump,
const char* expected_units,
uint64_t expected_value) {
MemoryAllocatorDump::Entry expected(name, expected_units, expected_value);
- EXPECT_THAT(dump->entries(), Contains(Eq(ByRef(expected))));
+ EXPECT_THAT(dump->entries(), testing::Contains(Eq(ByRef(expected))));
}
} // namespace
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 2710f3809e1..2ca37b03285 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -155,7 +155,7 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
~TestSequencedTaskRunner() override = default;
const scoped_refptr<SequencedTaskRunner> task_runner_ =
- CreateSequencedTaskRunnerWithTraits({});
+ CreateSequencedTaskRunner({ThreadPool()});
bool enabled_ = true;
unsigned num_of_post_tasks_ = 0;
};
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index ed5ff3b73b3..c52fb80207b 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -355,6 +355,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"sync/0x?/model_type/USER_CONSENT",
"sync/0x?/model_type/USER_EVENT",
"sync/0x?/model_type/WALLET_METADATA",
+ "sync/0x?/model_type/WEB_APP",
"sync/0x?/model_type/WIFI_CONFIGURATION",
"sync/0x?/model_type/WIFI_CREDENTIAL",
"tab_restore/service_helper_0x?/entries",
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index 50030ecad1f..ac241e47d85 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -22,6 +22,7 @@
#include "base/trace_event/builtin_categories.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_arguments.h"
#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_log.h"
@@ -377,28 +378,28 @@
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS( \
- category_group, name, id, thread_id, begin_timestamp, end_timestamp, \
- thread_end_timestamp, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
- trace_event_internal::TraceID trace_event_trace_id((id)); \
- unsigned int trace_event_flags = \
- flags | trace_event_trace_id.id_flags(); \
- const unsigned char* uid_category_group_enabled = \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
- auto handle = \
- trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
- TRACE_EVENT_PHASE_COMPLETE, uid_category_group_enabled, name, \
- trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- thread_id, begin_timestamp, \
- trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
- trace_event_internal::kNoId, ##__VA_ARGS__); \
- TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT( \
- uid_category_group_enabled, name, handle, end_timestamp, \
- thread_end_timestamp); \
- } \
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS( \
+ category_group, name, id, thread_id, begin_timestamp, end_timestamp, \
+ thread_end_timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ const unsigned char* uid_category_group_enabled = \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
+ auto handle = \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ TRACE_EVENT_PHASE_COMPLETE, uid_category_group_enabled, name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ thread_id, begin_timestamp, \
+ trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT( \
+ uid_category_group_enabled, name, handle, end_timestamp, \
+ thread_end_timestamp, base::trace_event::ThreadInstructionCount()); \
+ } \
} while (0)
// The linked ID will not be mangled.
@@ -705,12 +706,13 @@ UpdateTraceEventDuration(const unsigned char* category_group_enabled,
const char* name,
base::trace_event::TraceEventHandle handle);
-void BASE_EXPORT
-UpdateTraceEventDurationExplicit(const unsigned char* category_group_enabled,
- const char* name,
- base::trace_event::TraceEventHandle handle,
- const base::TimeTicks& now,
- const base::ThreadTicks& thread_now);
+void BASE_EXPORT UpdateTraceEventDurationExplicit(
+ const unsigned char* category_group_enabled,
+ const char* name,
+ base::trace_event::TraceEventHandle handle,
+ const base::TimeTicks& now,
+ const base::ThreadTicks& thread_now,
+ base::trace_event::ThreadInstructionCount thread_instruction_now);
// These AddTraceEvent and AddTraceEventWithThreadIdAndTimestamp template
// functions are defined here instead of in the macro, because the arg_values
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index ee3e21d4483..658aff3a23c 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -35,6 +35,7 @@ TraceEvent::TraceEvent() = default;
TraceEvent::TraceEvent(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
+ ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
@@ -45,6 +46,7 @@ TraceEvent::TraceEvent(int thread_id,
unsigned int flags)
: timestamp_(timestamp),
thread_timestamp_(thread_timestamp),
+ thread_instruction_count_(thread_instruction_count),
scope_(scope),
id_(id),
category_group_enabled_(category_group_enabled),
@@ -65,6 +67,7 @@ void TraceEvent::Reset() {
// Only reset fields that won't be initialized in Reset(int, ...), or that may
// hold references to other objects.
duration_ = TimeDelta::FromInternalValue(-1);
+ thread_instruction_delta_ = ThreadInstructionDelta();
args_.Reset();
parameter_copy_storage_.Reset();
}
@@ -72,6 +75,7 @@ void TraceEvent::Reset() {
void TraceEvent::Reset(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
+ ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
@@ -90,6 +94,7 @@ void TraceEvent::Reset(int thread_id,
thread_id_ = thread_id;
flags_ = flags;
bind_id_ = bind_id;
+ thread_instruction_count_ = thread_instruction_count;
phase_ = phase;
InitArgs(args);
@@ -103,7 +108,8 @@ void TraceEvent::InitArgs(TraceArguments* args) {
}
void TraceEvent::UpdateDuration(const TimeTicks& now,
- const ThreadTicks& thread_now) {
+ const ThreadTicks& thread_now,
+ ThreadInstructionCount thread_instruction_now) {
DCHECK_EQ(duration_.ToInternalValue(), -1);
duration_ = now - timestamp_;
@@ -111,6 +117,11 @@ void TraceEvent::UpdateDuration(const TimeTicks& now,
// initialized when it was recorded.
if (thread_timestamp_ != ThreadTicks())
thread_duration_ = thread_now - thread_timestamp_;
+
+ if (!thread_instruction_count_.is_null()) {
+ thread_instruction_delta_ =
+ thread_instruction_now - thread_instruction_count_;
+ }
}
void TraceEvent::EstimateTraceMemoryOverhead(
@@ -191,6 +202,10 @@ void TraceEvent::AppendAsJSON(
if (thread_duration != -1)
StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
}
+ if (!thread_instruction_count_.is_null()) {
+ int64_t thread_instructions = thread_instruction_delta_.ToInternalValue();
+ StringAppendF(out, ",\"tidelta\":%" PRId64, thread_instructions);
+ }
}
// Output tts if thread_timestamp is valid.
diff --git a/chromium/base/trace_event/trace_event_impl.h b/chromium/base/trace_event/trace_event_impl.h
index 97cd581a584..161cd6d82f0 100644
--- a/chromium/base/trace_event/trace_event_impl.h
+++ b/chromium/base/trace_event/trace_event_impl.h
@@ -23,6 +23,7 @@
#include "base/synchronization/lock.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_arguments.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "build/build_config.h"
@@ -60,6 +61,7 @@ class BASE_EXPORT TraceEvent {
TraceEvent(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
+ ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
@@ -88,6 +90,7 @@ class BASE_EXPORT TraceEvent {
void Reset(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
+ ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
@@ -97,7 +100,9 @@ class BASE_EXPORT TraceEvent {
TraceArguments* args,
unsigned int flags);
- void UpdateDuration(const TimeTicks& now, const ThreadTicks& thread_now);
+ void UpdateDuration(const TimeTicks& now,
+ const ThreadTicks& thread_now,
+ ThreadInstructionCount thread_instruction_now);
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
@@ -116,11 +121,17 @@ class BASE_EXPORT TraceEvent {
TimeTicks timestamp() const { return timestamp_; }
ThreadTicks thread_timestamp() const { return thread_timestamp_; }
+ ThreadInstructionCount thread_instruction_count() const {
+ return thread_instruction_count_;
+ }
char phase() const { return phase_; }
int thread_id() const { return thread_id_; }
int process_id() const { return process_id_; }
TimeDelta duration() const { return duration_; }
TimeDelta thread_duration() const { return thread_duration_; }
+ ThreadInstructionDelta thread_instruction_delta() const {
+ return thread_instruction_delta_;
+ }
const char* scope() const { return scope_; }
unsigned long long id() const { return id_; }
unsigned int flags() const { return flags_; }
@@ -162,6 +173,8 @@ class BASE_EXPORT TraceEvent {
ThreadTicks thread_timestamp_ = ThreadTicks();
TimeDelta duration_ = TimeDelta::FromInternalValue(-1);
TimeDelta thread_duration_ = TimeDelta();
+ ThreadInstructionCount thread_instruction_count_ = ThreadInstructionCount();
+ ThreadInstructionDelta thread_instruction_delta_ = ThreadInstructionDelta();
// scope_ and id_ can be used to store phase-specific data.
// The following should be default-initialized to the expression
// trace_event_internal::kGlobalScope, which is nullptr, but its definition
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index a831b9bed3e..14539d3d022 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -1823,10 +1823,11 @@ TEST_F(TraceEventTestFixture, DeepCopy) {
std::string val2("val2");
BeginTrace();
- TRACE_EVENT_COPY_INSTANT0("category", name1.c_str(),
- TRACE_EVENT_SCOPE_THREAD);
- TRACE_EVENT_COPY_BEGIN1("category", name2.c_str(),
- arg1.c_str(), 5);
+ TRACE_EVENT_INSTANT_WITH_FLAGS0(
+ "category", name1.c_str(),
+ TRACE_EVENT_FLAG_COPY | TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_BEGIN_WITH_FLAGS1("category", name2.c_str(),
+ TRACE_EVENT_FLAG_COPY, arg1.c_str(), 5);
TRACE_EVENT_COPY_END2("category", name3.c_str(),
arg1.c_str(), val1,
arg2.c_str(), val2);
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 08ed27f38da..dd59a6bf6a0 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -40,6 +40,7 @@
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
@@ -98,6 +99,11 @@ ThreadTicks ThreadNow() {
: ThreadTicks();
}
+ThreadInstructionCount ThreadInstructionNow() {
+ return ThreadInstructionCount::IsSupported() ? ThreadInstructionCount::Now()
+ : ThreadInstructionCount();
+}
+
template <typename T>
void InitializeMetadataEvent(TraceEvent* trace_event,
int thread_id,
@@ -108,13 +114,14 @@ void InitializeMetadataEvent(TraceEvent* trace_event,
return;
TraceArguments args(arg_name, value);
- trace_event->Reset(
- thread_id, TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
- CategoryRegistry::kCategoryMetadata->state_ptr(), metadata_name,
- trace_event_internal::kGlobalScope, // scope
- trace_event_internal::kNoId, // id
- trace_event_internal::kNoId, // bind_id
- &args, TRACE_EVENT_FLAG_NONE);
+ trace_event->Reset(thread_id, TimeTicks(), ThreadTicks(),
+ ThreadInstructionCount(), TRACE_EVENT_PHASE_METADATA,
+ CategoryRegistry::kCategoryMetadata->state_ptr(),
+ metadata_name,
+ trace_event_internal::kGlobalScope, // scope
+ trace_event_internal::kNoId, // id
+ trace_event_internal::kNoId, // bind_id
+ &args, TRACE_EVENT_FLAG_NONE);
}
class AutoThreadLocalBoolean {
@@ -782,7 +789,7 @@ void TraceLog::AddOwnedEnabledStateObserver(
bool TraceLog::HasEnabledStateObserver(EnabledStateObserver* listener) const {
AutoLock lock(observers_lock_);
- return ContainsValue(enabled_state_observers_, listener);
+ return Contains(enabled_state_observers_, listener);
}
void TraceLog::AddAsyncEnabledStateObserver(
@@ -800,7 +807,7 @@ void TraceLog::RemoveAsyncEnabledStateObserver(
bool TraceLog::HasAsyncEnabledStateObserver(
AsyncEnabledStateObserver* listener) const {
AutoLock lock(observers_lock_);
- return ContainsKey(async_observers_, listener);
+ return Contains(async_observers_, listener);
}
TraceLogStatus TraceLog::GetStatus() const {
@@ -999,13 +1006,12 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
}
if (use_worker_thread_) {
- base::PostTaskWithTraits(
- FROM_HERE,
- {MayBlock(), TaskPriority::BEST_EFFORT,
- TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
- BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
- std::move(previous_logged_events), flush_output_callback,
- argument_filter_predicate));
+ base::PostTask(FROM_HERE,
+ {ThreadPool(), MayBlock(), TaskPriority::BEST_EFFORT,
+ TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
+ BindOnce(&TraceLog::ConvertTraceEventsToTraceFormat,
+ std::move(previous_logged_events),
+ flush_output_callback, argument_filter_predicate));
return;
}
@@ -1178,6 +1184,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
ThreadTicks thread_now = ThreadNow();
+ ThreadInstructionCount thread_instruction_now = ThreadInstructionNow();
ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
if (*category_group_enabled & RECORDING_MODE) {
@@ -1191,7 +1198,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// current thread to avoid locks in most cases.
if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
const char* new_name =
- ThreadIdNameManager::GetInstance()->GetName(thread_id);
+ ThreadIdNameManager::GetInstance()->GetNameForCurrentThread();
// Check if the thread name has been set or changed since the previous
// call (if any), but don't bother if the new name is empty. Note this will
// not detect a thread name change within the same char* buffer address: we
@@ -1212,7 +1219,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
std::vector<StringPiece> existing_names = base::SplitStringPiece(
existing_name->second, ",", base::KEEP_WHITESPACE,
base::SPLIT_WANT_NONEMPTY);
- if (!ContainsValue(existing_names, new_name)) {
+ if (!Contains(existing_names, new_name)) {
if (!existing_names.empty())
existing_name->second.push_back(',');
existing_name->second.append(new_name);
@@ -1233,9 +1240,9 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
auto trace_event_override =
add_trace_event_override_.load(std::memory_order_relaxed);
if (trace_event_override) {
- TraceEvent new_trace_event(thread_id, offset_event_timestamp, thread_now,
- phase, category_group_enabled, name, scope, id,
- bind_id, args, flags);
+ TraceEvent new_trace_event(
+ thread_id, offset_event_timestamp, thread_now, thread_instruction_now,
+ phase, category_group_enabled, name, scope, id, bind_id, args, flags);
trace_event_override(
&new_trace_event,
@@ -1249,8 +1256,8 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
bool disabled_by_filters = false;
if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
auto new_trace_event = std::make_unique<TraceEvent>(
- thread_id, offset_event_timestamp, thread_now, phase,
- category_group_enabled, name, scope, id, bind_id, args, flags);
+ thread_id, offset_event_timestamp, thread_now, thread_instruction_now,
+ phase, category_group_enabled, name, scope, id, bind_id, args, flags);
disabled_by_filters = true;
ForEachCategoryFilter(
@@ -1281,7 +1288,8 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
if (filtered_trace_event) {
*trace_event = std::move(*filtered_trace_event);
} else {
- trace_event->Reset(thread_id, offset_event_timestamp, thread_now, phase,
+ trace_event->Reset(thread_id, offset_event_timestamp, thread_now,
+ thread_instruction_now, phase,
category_group_enabled, name, scope, id, bind_id,
args, flags);
}
@@ -1312,10 +1320,11 @@ void TraceLog::AddMetadataEvent(const unsigned char* category_group_enabled,
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
ThreadTicks thread_now = ThreadNow();
TimeTicks now = OffsetNow();
+ ThreadInstructionCount thread_instruction_now = ThreadInstructionNow();
AutoLock lock(lock_);
auto trace_event = std::make_unique<TraceEvent>(
- thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
- category_group_enabled, name,
+ thread_id, now, thread_now, thread_instruction_now,
+ TRACE_EVENT_PHASE_METADATA, category_group_enabled, name,
trace_event_internal::kGlobalScope, // scope
trace_event_internal::kNoId, // id
trace_event_internal::kNoId, // bind_id
@@ -1394,7 +1403,8 @@ void TraceLog::UpdateTraceEventDuration(
return;
UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
- OffsetNow(), ThreadNow());
+ OffsetNow(), ThreadNow(),
+ ThreadInstructionNow());
}
void TraceLog::UpdateTraceEventDurationExplicit(
@@ -1402,7 +1412,8 @@ void TraceLog::UpdateTraceEventDurationExplicit(
const char* name,
TraceEventHandle handle,
const TimeTicks& now,
- const ThreadTicks& thread_now) {
+ const ThreadTicks& thread_now,
+ ThreadInstructionCount thread_instruction_now) {
char category_group_enabled_local = *category_group_enabled;
if (!category_group_enabled_local)
return;
@@ -1424,7 +1435,7 @@ void TraceLog::UpdateTraceEventDurationExplicit(
auto update_duration_callback =
update_duration_callback_.load(std::memory_order_relaxed);
if (update_duration_callback) {
- update_duration_callback(handle, now, thread_now);
+ update_duration_callback(handle, now, thread_now, thread_instruction_now);
return;
}
}
@@ -1437,7 +1448,7 @@ void TraceLog::UpdateTraceEventDurationExplicit(
if (trace_event) {
DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
- trace_event->UpdateDuration(now, thread_now);
+ trace_event->UpdateDuration(now, thread_now, thread_instruction_now);
#if defined(OS_ANDROID)
trace_event->SendToATrace();
#endif
@@ -1550,7 +1561,9 @@ void TraceLog::AddMetadataEventsWhileLocked() {
it.second);
}
- // Thread names.
+ // TODO(ssid): Stop emitting and tracking thread names when perfetto is
+ // enabled and after crbug/978093 if fixed. The JSON exporter will emit thread
+ // names from thread descriptors.
AutoLock thread_info_lock(thread_info_lock_);
for (const auto& it : thread_names_) {
if (it.second.empty())
@@ -1801,10 +1814,12 @@ void UpdateTraceEventDurationExplicit(
const char* name,
base::trace_event::TraceEventHandle handle,
const base::TimeTicks& now,
- const base::ThreadTicks& thread_now) {
+ const base::ThreadTicks& thread_now,
+ base::trace_event::ThreadInstructionCount thread_instruction_now) {
return base::trace_event::TraceLog::GetInstance()
->UpdateTraceEventDurationExplicit(category_group_enabled, name, handle,
- now, thread_now);
+ now, thread_now,
+ thread_instruction_now);
}
ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index 360e12348c2..55f6825e5c2 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -200,9 +200,11 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
bool thread_will_flush,
TraceEventHandle* handle);
using OnFlushCallback = void (*)();
- using UpdateDurationCallback = void (*)(TraceEventHandle handle,
- const TimeTicks& now,
- const ThreadTicks& thread_now);
+ using UpdateDurationCallback =
+ void (*)(TraceEventHandle handle,
+ const TimeTicks& now,
+ const ThreadTicks& thread_now,
+ ThreadInstructionCount thread_instruction_now);
// The callbacks will be called up until the point where the flush is
// finished, i.e. must be callable until OutputCallback is called with
// has_more_events==false.
@@ -291,7 +293,8 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
const char* name,
TraceEventHandle handle,
const TimeTicks& now,
- const ThreadTicks& thread_now);
+ const ThreadTicks& thread_now,
+ ThreadInstructionCount thread_instruction_now);
void EndFilteredEvent(const unsigned char* category_group_enabled,
const char* name,
diff --git a/chromium/base/util/type_safety/BUILD.gn b/chromium/base/util/type_safety/BUILD.gn
index 011e5778ce4..a335cc224a7 100644
--- a/chromium/base/util/type_safety/BUILD.gn
+++ b/chromium/base/util/type_safety/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/config/jumbo.gni")
+import("//build/nocompile.gni")
# Change this target's type to jumbo_component if it starts to contain more than
# just headers. Header-only targets cannot be compiled to libraries, so it must
@@ -10,6 +11,7 @@ import("//build/config/jumbo.gni")
source_set("type_safety") {
sources = [
"id_type.h",
+ "pass_key.h",
"strong_alias.h",
]
}
@@ -18,6 +20,7 @@ source_set("tests") {
testonly = true
sources = [
"id_type_unittest.cc",
+ "pass_key_unittest.cc",
"strong_alias_unittest.cc",
]
@@ -26,3 +29,18 @@ source_set("tests") {
"//testing/gtest",
]
}
+
+if (enable_nocompile_tests) {
+ nocompile_test("type_safety_nocompile_tests") {
+ sources = [
+ "pass_key_unittest.nc",
+ ]
+
+ deps = [
+ ":type_safety",
+ "//base:base_unittests_tasktraits",
+ "//base/test:run_all_unittests",
+ "//testing/gtest",
+ ]
+ }
+}
diff --git a/chromium/base/util/type_safety/OWNERS b/chromium/base/util/type_safety/OWNERS
index 3889b827a00..192ba641834 100644
--- a/chromium/base/util/type_safety/OWNERS
+++ b/chromium/base/util/type_safety/OWNERS
@@ -1,2 +1,2 @@
-lukasza@chromium.com
+lukasza@chromium.org
mpawlowski@opera.com
diff --git a/chromium/base/util/type_safety/pass_key.h b/chromium/base/util/type_safety/pass_key.h
new file mode 100644
index 00000000000..987822ede8f
--- /dev/null
+++ b/chromium/base/util/type_safety/pass_key.h
@@ -0,0 +1,48 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UTIL_TYPE_SAFETY_PASS_KEY_H_
+#define BASE_UTIL_TYPE_SAFETY_PASS_KEY_H_
+
+namespace util {
+
+// util::PassKey can be used to restrict access to functions to an authorized
+// caller. The primary use case is restricting the construction of an object in
+// situations where the constructor needs to be public, which may be the case
+// if the object must be constructed through a helper function, such as
+// blink::MakeGarbageCollected.
+//
+// For example, to limit the creation of 'Foo' to the 'Manager' class:
+//
+// class Foo {
+// public:
+// Foo(util::PassKey<Manager>);
+// };
+//
+// class Manager {
+// public:
+// using PassKey = util::PassKey<Manager>;
+// Manager() : foo_(blink::MakeGarbageCollected<Foo>(PassKey())) {}
+// void Trace(blink::Visitor* visitor) { visitor->Trace(foo_); }
+// Foo* GetFooSingleton() { foo_; }
+//
+// private:
+// blink::Member<Foo> foo_;
+// };
+//
+// In the above example, the 'Foo' constructor requires an instance of
+// util::PassKey<Manager>. Only Manager is allowed to create such instances,
+// making the constructor unusable elsewhere.
+template <typename T>
+class PassKey {
+ private:
+ // Avoid =default to disallow creation by uniform initialization.
+ PassKey() {}
+
+ friend T;
+};
+
+} // namespace util
+
+#endif // BASE_UTIL_TYPE_SAFETY_PASS_KEY_H_
diff --git a/chromium/base/util/type_safety/pass_key_unittest.cc b/chromium/base/util/type_safety/pass_key_unittest.cc
new file mode 100644
index 00000000000..51431b48847
--- /dev/null
+++ b/chromium/base/util/type_safety/pass_key_unittest.cc
@@ -0,0 +1,46 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/util/type_safety/pass_key.h"
+
+#include <utility>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace util {
+namespace {
+
+class Manager;
+
+// May not be created without a PassKey.
+class Restricted {
+ public:
+ Restricted(util::PassKey<Manager>) {}
+};
+
+class Manager {
+ public:
+ enum class ExplicitConstruction { kTag };
+ enum class UniformInitialization { kTag };
+
+ Manager(ExplicitConstruction) : restricted_(util::PassKey<Manager>()) {}
+ Manager(UniformInitialization) : restricted_({}) {}
+
+ private:
+ Restricted restricted_;
+};
+
+// If this file compiles, then these test will run and pass. This is useful
+// for verifying that the file actually was compiled into the unit test binary.
+
+TEST(PassKeyTest, ExplicitConstruction) {
+ Manager manager(Manager::ExplicitConstruction::kTag);
+}
+
+TEST(PassKeyTest, UniformInitialization) {
+ Manager manager(Manager::UniformInitialization::kTag);
+}
+
+} // namespace
+} // namespace util
diff --git a/chromium/base/util/type_safety/pass_key_unittest.nc b/chromium/base/util/type_safety/pass_key_unittest.nc
new file mode 100644
index 00000000000..f8a298504b5
--- /dev/null
+++ b/chromium/base/util/type_safety/pass_key_unittest.nc
@@ -0,0 +1,73 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a no-compile test suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/util/type_safety/pass_key.h"
+
+namespace util {
+
+class Manager;
+
+// May not be created without a PassKey.
+class Restricted {
+ public:
+ Restricted(util::PassKey<Manager>) {}
+};
+
+int Secret(util::PassKey<Manager>) {
+ return 1;
+}
+
+#if defined(NCTEST_UNAUTHORIZED_PASS_KEY_IN_INITIALIZER) // [r"fatal error: calling a private constructor of class 'util::PassKey<util::Manager>'"]
+
+class NotAManager {
+ public:
+ NotAManager() : restricted_(util::PassKey<Manager>()) {}
+
+ private:
+ Restricted restricted_;
+};
+
+void WillNotCompile() {
+ NotAManager not_a_manager;
+}
+
+#elif defined(NCTEST_UNAUTHORIZED_UNIFORM_INITIALIZED_PASS_KEY_IN_INITIALIZER) // [r"fatal error: calling a private constructor of class 'util::PassKey<util::Manager>'"]
+
+class NotAManager {
+ public:
+ NotAManager() : restricted_({}) {}
+
+ private:
+ Restricted restricted_;
+};
+
+void WillNotCompile() {
+ NotAManager not_a_manager;
+}
+
+#elif defined(NCTEST_UNAUTHORIZED_PASS_KEY_IN_FUNCTION) // [r"fatal error: calling a private constructor of class 'util::PassKey<util::Manager>'"]
+
+int WillNotCompile() {
+ return Secret(util::PassKey<Manager>());
+}
+
+#elif defined(NCTEST_UNAUTHORIZED_UNIFORM_INITIALIZATION_WITH_DEDUCED_PASS_KEY_TYPE) // [r"fatal error: calling a private constructor of class 'util::PassKey<util::Manager>'"]
+
+int WillNotCompile() {
+ return Secret({});
+}
+
+#elif defined(NCTEST_UNAUTHORIZED_UNIFORM_INITIALIZATION) // [r"fatal error: calling a private constructor of class 'util::PassKey<util::Manager>'"]
+
+int WillNotCompile() {
+ util::PassKey<Manager> key {};
+ return Secret(key);
+}
+
+#endif
+
+} // namespace util
diff --git a/chromium/base/util/type_safety/strong_alias.h b/chromium/base/util/type_safety/strong_alias.h
index 7b0f38c8b19..05038bf3e2d 100644
--- a/chromium/base/util/type_safety/strong_alias.h
+++ b/chromium/base/util/type_safety/strong_alias.h
@@ -57,6 +57,12 @@ namespace util {
// in StrongAlias's interface. It's also potentially unwanted (ex. you don't
// want to be able to add two StrongAliases that represent socket handles).
// A getter is provided in case you need to access the UnderlyingType.
+//
+// See also
+// - //styleguide/c++/blink-c++.md which provides recommendation and examples of
+// using StrongAlias<Tag, bool> instead of a bare bool.
+// - util::IdType<...> which provides helpers for specializing
+// StrongAlias to be used as an id.
template <typename TagType, typename UnderlyingType>
class StrongAlias {
public:
@@ -71,6 +77,7 @@ class StrongAlias {
StrongAlias& operator=(StrongAlias&& other) = default;
const UnderlyingType& value() const { return value_; }
+ explicit operator UnderlyingType() const { return value_; }
bool operator==(const StrongAlias& other) const {
return value_ == other.value_;
diff --git a/chromium/base/util/type_safety/strong_alias_unittest.cc b/chromium/base/util/type_safety/strong_alias_unittest.cc
index 9b8af3a5d0c..d7b13cf8ca6 100644
--- a/chromium/base/util/type_safety/strong_alias_unittest.cc
+++ b/chromium/base/util/type_safety/strong_alias_unittest.cc
@@ -65,6 +65,13 @@ TYPED_TEST(StrongAliasTest, ValueAccessesUnderlyingValue) {
"Reference returned by const value getter should be const.");
}
+TYPED_TEST(StrongAliasTest, ExplicitConversionToUnderlyingValue) {
+ using FooAlias = StrongAlias<class FooTag, TypeParam>;
+
+ const FooAlias const_alias(GetExampleValue<TypeParam>(1));
+ EXPECT_EQ(GetExampleValue<TypeParam>(1), static_cast<TypeParam>(const_alias));
+}
+
TYPED_TEST(StrongAliasTest, CanBeCopyConstructed) {
using FooAlias = StrongAlias<class FooTag, TypeParam>;
FooAlias alias(GetExampleValue<TypeParam>(0));
@@ -138,9 +145,6 @@ TYPED_TEST(StrongAliasTest, CannotBeCreatedFromDifferentAlias) {
TYPED_TEST(StrongAliasTest, CannotBeImplicitlyConverterToUnderlyingValue) {
using FooAlias = StrongAlias<class FooTag, TypeParam>;
- static_assert(!std::is_constructible<TypeParam, FooAlias>::value,
- "Should be impossible to construct an underlying type from a "
- "StrongAlias.");
static_assert(!std::is_convertible<FooAlias, TypeParam>::value,
"Should be impossible to implicitly convert a StrongAlias into "
"an underlying type.");
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 03acb86bbc0..7d042cdcd45 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -1706,7 +1706,7 @@ void ListValue::AppendStrings(const std::vector<string16>& in_values) {
bool ListValue::AppendIfNotPresent(std::unique_ptr<Value> in_value) {
DCHECK(in_value);
- if (ContainsValue(list_, *in_value))
+ if (Contains(list_, *in_value))
return false;
list_.push_back(std::move(*in_value));
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 3d034cd6d1a..ef24b1998fb 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -167,7 +167,7 @@ class BASE_EXPORT Value {
bool is_dict() const { return type() == Type::DICTIONARY; }
bool is_list() const { return type() == Type::LIST; }
- // These will all CHECK if the type doesn't match.
+ // These will all CHECK that the type matches.
bool GetBool() const;
int GetInt() const;
double GetDouble() const; // Implicitly converts from int if necessary.
@@ -181,7 +181,7 @@ class BASE_EXPORT Value {
// a pointer to the element. Otherwise it returns nullptr.
// returned. Callers are expected to perform a check against null before using
// the pointer.
- // Note: This CHECKs if type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() is Type::DICTIONARY.
//
// Example:
// auto* found = FindKey("foo");
@@ -193,7 +193,7 @@ class BASE_EXPORT Value {
// different type nullptr is returned.
// Callers are expected to perform a check against null before using the
// pointer.
- // Note: This CHECKs if type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() is Type::DICTIONARY.
//
// Example:
// auto* found = FindKey("foo", Type::DOUBLE);
@@ -226,7 +226,7 @@ class BASE_EXPORT Value {
// |SetKey| looks up |key| in the underlying dictionary and sets the mapped
// value to |value|. If |key| could not be found, a new element is inserted.
// A pointer to the modified item is returned.
- // Note: This CHECKs if type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() is Type::DICTIONARY.
// Note: Prefer Set<Type>Key() for simple values.
//
// Example:
@@ -255,7 +255,7 @@ class BASE_EXPORT Value {
// failure, e.g. the key does not exist, false is returned and the underlying
// dictionary is not changed. In case of success, |key| is deleted from the
// dictionary and the method returns true.
- // Note: This CHECKs if type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() is Type::DICTIONARY.
//
// Example:
// bool success = dict.RemoveKey("foo");
@@ -265,7 +265,7 @@ class BASE_EXPORT Value {
// failure, e.g. the key does not exist, nullopt is returned and the
// underlying dictionary is not changed. In case of success, |key| is deleted
// from the dictionary and the method returns the extracted Value.
- // Note: This CHECKs if type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() is Type::DICTIONARY.
//
// Example:
// Optional<Value> maybe_value = dict.ExtractKey("foo");
@@ -398,12 +398,12 @@ class BASE_EXPORT Value {
// dictionary. These are intended for iteration over all items in the
// dictionary and are compatible with for-each loops and standard library
// algorithms.
- // Note: These CHECK if type() is not Type::DICTIONARY.
+ // Note: These CHECK that type() is Type::DICTIONARY.
dict_iterator_proxy DictItems();
const_dict_iterator_proxy DictItems() const;
// Returns the size of the dictionary, and if the dictionary is empty.
- // Note: These CHECK if type() is not Type::DICTIONARY.
+ // Note: These CHECK that type() is Type::DICTIONARY.
size_t DictSize() const;
bool DictEmpty() const;
@@ -412,7 +412,7 @@ class BASE_EXPORT Value {
// passed in dictionary takes precedence and data already present will be
// replaced. Values within |dictionary| are deep-copied, so |dictionary| may
// be freed any time after this call.
- // Note: This CHECKs if type() or dictionary->type() is not Type::DICTIONARY.
+ // Note: This CHECKs that type() and dictionary->type() is Type::DICTIONARY.
void MergeDictionary(const Value* dictionary);
// These methods allow the convenient retrieval of the contents of the Value.
diff --git a/chromium/base/win/async_operation.h b/chromium/base/win/async_operation.h
index 97f7aab61d4..fbe82d23d3e 100644
--- a/chromium/base/win/async_operation.h
+++ b/chromium/base/win/async_operation.h
@@ -19,6 +19,7 @@
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/threading/thread_checker.h"
+#include "base/win/winrt_foundation_helpers.h"
namespace base {
namespace win {
@@ -70,70 +71,20 @@ namespace win {
namespace internal {
// Template tricks needed to dispatch to the correct implementation below.
-//
-// For all types which are neither InterfaceGroups nor RuntimeClasses, the
-// following three typedefs are synonyms for a single C++ type. But for
-// InterfaceGroups and RuntimeClasses, they are different types:
-// LogicalT: The C++ Type for the InterfaceGroup or RuntimeClass, when
-// used as a template parameter. Eg "RCFoo*"
-// AbiT: The C++ type for the default interface used to represent the
-// InterfaceGroup or RuntimeClass when passed as a method parameter.
-// Eg "IFoo*"
-// ComplexT: An instantiation of the Internal "AggregateType" template that
-// combines LogicalT with AbiT. Eg "AggregateType<RCFoo*,IFoo*>"
-//
-// windows.foundation.collections.h defines the following template and
-// semantics in Windows::Foundation::Internal:
-//
-// template <class LogicalType, class AbiType>
-// struct AggregateType;
-//
-// LogicalType - the Windows Runtime type (eg, runtime class, inteface group,
-// etc) being provided as an argument to an _impl template, when
-// that type cannot be represented at the ABI.
-// AbiType - the type used for marshalling, ie "at the ABI", for the
-// logical type.
-template <typename T>
-using ComplexT =
- typename ABI::Windows::Foundation::IAsyncOperation<T>::TResult_complex;
+// See base/win/winrt_foundation_helpers.h for explanation.
template <typename T>
-using AbiT =
- typename ABI::Windows::Foundation::Internal::GetAbiType<ComplexT<T>>::type;
-
-template <typename T>
-using LogicalT = typename ABI::Windows::Foundation::Internal::GetLogicalType<
- ComplexT<T>>::type;
-
-template <typename T>
-using InterfaceT = std::remove_pointer_t<AbiT<T>>;
-
-// Compile time switch to decide what container to use for the async results for
-// |T|. Depends on whether the underlying Abi type is a pointer to IUnknown or
-// not. It queries the internals of Windows::Foundation to obtain this
-// information.
-template <typename T>
-using ResultT =
- std::conditional_t<std::is_convertible<AbiT<T>, IUnknown*>::value,
- Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT<T>>>,
- AbiT<T>>;
+using Complex =
+ typename ABI::Windows::Foundation::IAsyncOperation<T>::TResult_complex;
template <typename T>
-using StorageT =
- std::conditional_t<std::is_convertible<AbiT<T>, IUnknown*>::value,
- Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT<T>>>,
- base::Optional<AbiT<T>>>;
+using Abi = AbiType<Complex<T>>;
template <typename T>
-HRESULT CopyStorage(const Microsoft::WRL::ComPtr<T>& storage, T** results) {
- return storage.CopyTo(results);
-}
+using OptionalStorageT = OptionalStorageType<Complex<T>>;
template <typename T>
-HRESULT CopyStorage(const base::Optional<T>& storage, T* results) {
- *results = *storage;
- return S_OK;
-}
+using StorageT = StorageType<Complex<T>>;
} // namespace internal
@@ -144,10 +95,11 @@ class AsyncOperation
Microsoft::WRL::WinRt | Microsoft::WRL::InhibitRoOriginateError>,
ABI::Windows::Foundation::IAsyncOperation<T>> {
public:
+ using AbiT = internal::Abi<T>;
+ using OptionalStorageT = internal::OptionalStorageT<T>;
using StorageT = internal::StorageT<T>;
- using ResultT = internal::ResultT<T>;
using Handler = ABI::Windows::Foundation::IAsyncOperationCompletedHandler<T>;
- using ResultCallback = base::OnceCallback<void(ResultT)>;
+ using ResultCallback = base::OnceCallback<void(StorageT)>;
AsyncOperation() : weak_factory_(this) {
// Note: This can't be done in the constructor initializer list. This is
@@ -169,9 +121,9 @@ class AsyncOperation
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
return handler_.CopyTo(handler);
}
- IFACEMETHODIMP GetResults(internal::AbiT<T>* results) override {
+ IFACEMETHODIMP GetResults(AbiT* results) override {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- return storage_ ? internal::CopyStorage(storage_, results) : E_PENDING;
+ return results_ ? internal::CopyTo(results_, results) : E_PENDING;
}
ResultCallback callback() {
@@ -185,16 +137,16 @@ class AsyncOperation
handler_->Invoke(this, ABI::Windows::Foundation::AsyncStatus::Completed);
}
- void OnResult(ResultT result) {
+ void OnResult(StorageT result) {
DCHECK_CALLED_ON_VALID_THREAD(thread_checker_);
- DCHECK(!storage_);
- storage_ = std::move(result);
+ DCHECK(!results_);
+ results_ = std::move(result);
InvokeCompletedHandler();
}
ResultCallback callback_;
Microsoft::WRL::ComPtr<Handler> handler_;
- StorageT storage_;
+ OptionalStorageT results_;
THREAD_CHECKER(thread_checker_);
base::WeakPtrFactory<AsyncOperation> weak_factory_;
diff --git a/chromium/base/win/com_init_check_hook.cc b/chromium/base/win/com_init_check_hook.cc
index 79bf8d63147..9ec32fbe72b 100644
--- a/chromium/base/win/com_init_check_hook.cc
+++ b/chromium/base/win/com_init_check_hook.cc
@@ -278,12 +278,12 @@ class HookManager {
//
// If you hit this assert as part of migrating to the Task Scheduler,
// evaluate your threading guarantees and dispatch your work with
- // base::CreateCOMSTATaskRunnerWithTraits().
+ // base::CreateCOMSTATaskRunner().
//
// If you need MTA support, ping //base/task/thread_pool/OWNERS.
AssertComInitialized(
"CoCreateInstance calls in Chromium require explicit COM "
- "initialization via base::CreateCOMSTATaskRunnerWithTraits() or "
+ "initialization via base::CreateCOMSTATaskRunner() or "
"ScopedCOMInitializer. See the comment in DCheckedCoCreateInstance for "
"more details.");
return original_co_create_instance_body_function_(rclsid, pUnkOuter,
diff --git a/chromium/base/win/enum_variant.cc b/chromium/base/win/enum_variant.cc
index 2975560a9ec..38861bfe70f 100644
--- a/chromium/base/win/enum_variant.cc
+++ b/chromium/base/win/enum_variant.cc
@@ -11,18 +11,19 @@
namespace base {
namespace win {
-EnumVariant::EnumVariant(unsigned long count)
- : items_(new VARIANT[count]),
- count_(count),
- current_index_(0) {
+EnumVariant::EnumVariant(ULONG count) : items_(), current_index_(0) {
+ for (ULONG i = 0; i < count; ++i)
+ items_.emplace_back(ScopedVariant::kEmptyVariant);
}
-EnumVariant::~EnumVariant() {
-}
+EnumVariant::~EnumVariant() = default;
-VARIANT* EnumVariant::ItemAt(unsigned long index) {
- DCHECK(index < count_);
- return &items_[index];
+VARIANT* EnumVariant::ItemAt(ULONG index) {
+ DCHECK_LT(index, items_.size());
+ // This is a hack to return a mutable pointer to the ScopedVariant, even
+ // though the original intent of the AsInput method was to allow only readonly
+ // access to the wrapped variant.
+ return items_[index].AsInput();
}
ULONG STDMETHODCALLTYPE EnumVariant::AddRef() {
@@ -46,19 +47,28 @@ STDMETHODIMP EnumVariant::QueryInterface(REFIID riid, void** ppv) {
STDMETHODIMP EnumVariant::Next(ULONG requested_count,
VARIANT* out_elements,
ULONG* out_elements_received) {
- unsigned long count = std::min(requested_count, count_ - current_index_);
- for (unsigned long i = 0; i < count; ++i)
- out_elements[i] = items_[current_index_ + i];
+ if (!out_elements)
+ return E_INVALIDARG;
+
+ DCHECK_LE(current_index_, items_.size());
+ ULONG available_count = ULONG{items_.size()} - current_index_;
+ ULONG count = std::min(requested_count, available_count);
+ for (ULONG i = 0; i < count; ++i)
+ out_elements[i] = items_[current_index_ + i].Copy();
current_index_ += count;
- *out_elements_received = count;
+
+ // The caller can choose not to get the number of received elements by setting
+ // |out_elements_received| to nullptr.
+ if (out_elements_received)
+ *out_elements_received = count;
return (count == requested_count ? S_OK : S_FALSE);
}
STDMETHODIMP EnumVariant::Skip(ULONG skip_count) {
- unsigned long count = skip_count;
- if (current_index_ + count > count_)
- count = count_ - current_index_;
+ ULONG count = skip_count;
+ if (current_index_ + count > ULONG{items_.size()})
+ count = ULONG{items_.size()} - current_index_;
current_index_ += count;
return (count == skip_count ? S_OK : S_FALSE);
@@ -70,9 +80,14 @@ STDMETHODIMP EnumVariant::Reset() {
}
STDMETHODIMP EnumVariant::Clone(IEnumVARIANT** out_cloned_object) {
- EnumVariant* other = new EnumVariant(count_);
- if (count_ > 0)
- memcpy(other->ItemAt(0), &items_[0], count_ * sizeof(VARIANT));
+ if (!out_cloned_object)
+ return E_INVALIDARG;
+
+ size_t count = items_.size();
+ EnumVariant* other = new EnumVariant(ULONG{count});
+ for (size_t i = 0; i < count; ++i)
+ other->items_[i] = static_cast<const VARIANT&>(items_[i]);
+
other->Skip(current_index_);
other->AddRef();
*out_cloned_object = other;
diff --git a/chromium/base/win/enum_variant.h b/chromium/base/win/enum_variant.h
index e27afcd93d4..47ffd070de1 100644
--- a/chromium/base/win/enum_variant.h
+++ b/chromium/base/win/enum_variant.h
@@ -8,8 +8,10 @@
#include <unknwn.h>
#include <memory>
+#include <vector>
#include "base/win/iunknown_impl.h"
+#include "base/win/scoped_variant.h"
namespace base {
namespace win {
@@ -19,12 +21,12 @@ class BASE_EXPORT EnumVariant
: public IEnumVARIANT,
public IUnknownImpl {
public:
- // The constructor allocates an array of size |count|. Then use
- // ItemAt to set the value of each item in the array to initialize it.
- explicit EnumVariant(unsigned long count);
+ // The constructor allocates a vector of empty ScopedVariants of size |count|.
+ // Use ItemAt to set the value of each item in the array.
+ explicit EnumVariant(ULONG count);
// Returns a mutable pointer to the item at position |index|.
- VARIANT* ItemAt(unsigned long index);
+ VARIANT* ItemAt(ULONG index);
// IUnknown.
ULONG STDMETHODCALLTYPE AddRef() override;
@@ -42,9 +44,8 @@ class BASE_EXPORT EnumVariant
private:
~EnumVariant() override;
- std::unique_ptr<VARIANT[]> items_;
- unsigned long count_;
- unsigned long current_index_;
+ std::vector<ScopedVariant> items_;
+ ULONG current_index_;
};
} // namespace win
diff --git a/chromium/base/win/enum_variant_unittest.cc b/chromium/base/win/enum_variant_unittest.cc
index 288c97ea559..77b2fdef73c 100644
--- a/chromium/base/win/enum_variant_unittest.cc
+++ b/chromium/base/win/enum_variant_unittest.cc
@@ -29,9 +29,11 @@ TEST(EnumVariantTest, EmptyEnumVariant) {
ienumvariant->Release();
VARIANT out_element;
+ ::VariantInit(&out_element);
ULONG out_received = 0;
EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
EXPECT_EQ(0u, out_received);
+ ::VariantClear(&out_element);
EXPECT_EQ(S_FALSE, ev->Skip(1));
@@ -66,20 +68,26 @@ TEST(EnumVariantTest, SimpleEnumVariant) {
// Get elements one at a time.
VARIANT out_element;
+ ::VariantInit(&out_element);
ULONG out_received = 0;
EXPECT_EQ(S_OK, ev->Next(1, &out_element, &out_received));
EXPECT_EQ(1u, out_received);
EXPECT_EQ(VT_I4, out_element.vt);
EXPECT_EQ(10, out_element.lVal);
+ ::VariantClear(&out_element);
EXPECT_EQ(S_OK, ev->Skip(1));
EXPECT_EQ(S_OK, ev->Next(1, &out_element, &out_received));
EXPECT_EQ(1u, out_received);
EXPECT_EQ(VT_I4, out_element.vt);
EXPECT_EQ(30, out_element.lVal);
+ ::VariantClear(&out_element);
EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+ ::VariantClear(&out_element);
// Reset and get all elements at once.
VARIANT out_elements[3];
+ for (int i = 0; i < 3; ++i)
+ ::VariantInit(&out_elements[i]);
EXPECT_EQ(S_OK, ev->Reset());
EXPECT_EQ(S_OK, ev->Next(3, out_elements, &out_received));
EXPECT_EQ(3u, out_received);
@@ -89,23 +97,29 @@ TEST(EnumVariantTest, SimpleEnumVariant) {
EXPECT_EQ(20, out_elements[1].lVal);
EXPECT_EQ(VT_I4, out_elements[2].vt);
EXPECT_EQ(30, out_elements[2].lVal);
+ for (int i = 0; i < 3; ++i)
+ ::VariantClear(&out_elements[i]);
EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+ ::VariantClear(&out_element);
// Clone it.
IEnumVARIANT* ev2 = NULL;
EXPECT_EQ(S_OK, ev->Clone(&ev2));
EXPECT_TRUE(ev2 != NULL);
EXPECT_EQ(S_FALSE, ev->Next(1, &out_element, &out_received));
+ ::VariantClear(&out_element);
EXPECT_EQ(S_OK, ev2->Reset());
- EXPECT_EQ(S_OK, ev2->Next(3, out_elements, &out_received));
- EXPECT_EQ(3u, out_received);
+ EXPECT_EQ(S_OK, ev2->Next(3, out_elements, nullptr));
EXPECT_EQ(VT_I4, out_elements[0].vt);
EXPECT_EQ(10, out_elements[0].lVal);
EXPECT_EQ(VT_I4, out_elements[1].vt);
EXPECT_EQ(20, out_elements[1].lVal);
EXPECT_EQ(VT_I4, out_elements[2].vt);
EXPECT_EQ(30, out_elements[2].lVal);
- EXPECT_EQ(S_FALSE, ev2->Next(1, &out_element, &out_received));
+ for (int i = 0; i < 3; ++i)
+ ::VariantClear(&out_elements[i]);
+ EXPECT_EQ(S_FALSE, ev2->Next(1, &out_element, nullptr));
+ ::VariantClear(&out_element);
ULONG ev2_finalrefcount = ev2->Release();
EXPECT_EQ(0u, ev2_finalrefcount);
diff --git a/chromium/base/win/hstring_compare.cc b/chromium/base/win/hstring_compare.cc
new file mode 100644
index 00000000000..34d38eefce3
--- /dev/null
+++ b/chromium/base/win/hstring_compare.cc
@@ -0,0 +1,40 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/hstring_compare.h"
+
+#include <winstring.h>
+
+#include "base/native_library.h"
+#include "base/win/windows_version.h"
+
+namespace base {
+namespace win {
+
+HRESULT HStringCompare(HSTRING string1, HSTRING string2, INT32* result) {
+ using CompareStringFunc = decltype(&::WindowsCompareStringOrdinal);
+
+ static const auto compare_string_func = []() -> CompareStringFunc {
+ if (GetVersion() < Version::WIN8)
+ return nullptr;
+
+ NativeLibraryLoadError load_error;
+ NativeLibrary combase_module =
+ PinSystemLibrary(FILE_PATH_LITERAL("combase.dll"), &load_error);
+ if (load_error.code)
+ return nullptr;
+
+ return reinterpret_cast<CompareStringFunc>(
+ GetFunctionPointerFromNativeLibrary(combase_module,
+ "WindowsCompareStringOrdinal"));
+ }();
+
+ if (!compare_string_func)
+ return E_FAIL;
+
+ return compare_string_func(string1, string2, result);
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/hstring_compare.h b/chromium/base/win/hstring_compare.h
new file mode 100644
index 00000000000..448fdb2a919
--- /dev/null
+++ b/chromium/base/win/hstring_compare.h
@@ -0,0 +1,28 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_HSTRING_COMPARE_H_
+#define BASE_WIN_HSTRING_COMPARE_H_
+
+#include <hstring.h>
+
+#include "base/base_export.h"
+
+namespace base {
+namespace win {
+
+// HStringCompare provides a delayloaded version of WindowsCompareStringOrdinal
+// function, which compares HSTRING values.
+//
+// Note that it requires certain functions that are only available on Windows 8
+// and later, and that these functions need to be delayloaded to avoid breaking
+// Chrome on Windows 7.
+BASE_EXPORT HRESULT HStringCompare(HSTRING string1,
+ HSTRING string2,
+ INT32* result);
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_HSTRING_COMPARE_H_
diff --git a/chromium/base/win/hstring_compare_unittest.cc b/chromium/base/win/hstring_compare_unittest.cc
new file mode 100644
index 00000000000..48ffddf4ca5
--- /dev/null
+++ b/chromium/base/win/hstring_compare_unittest.cc
@@ -0,0 +1,74 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/hstring_compare.h"
+
+#include "base/win/hstring_reference.h"
+#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+namespace {
+
+constexpr wchar_t kTestString12[] = L"12";
+constexpr wchar_t kTestString123[] = L"123";
+constexpr wchar_t kTestString1234[] = L"1234";
+
+} // namespace
+
+TEST(HStringCompareTest, WorksOnWindows8AndAbove) {
+ INT32 result;
+ HRESULT hr = HStringCompare(nullptr, nullptr, &result);
+ // HStringCompare requires WinRT core functions, which are not available in
+ // older versions.
+ if (GetVersion() < Version::WIN8)
+ EXPECT_HRESULT_FAILED(hr);
+ else
+ EXPECT_HRESULT_SUCCEEDED(hr);
+}
+
+TEST(HStringCompareTest, FirstStringBeforeSecondString) {
+ if (GetVersion() < Version::WIN8)
+ return;
+
+ ASSERT_TRUE(HStringReference::ResolveCoreWinRTStringDelayload());
+
+ const HStringReference string12(kTestString12);
+ const HStringReference string123(kTestString123);
+ INT32 result;
+ HRESULT hr = HStringCompare(string12.Get(), string123.Get(), &result);
+ EXPECT_HRESULT_SUCCEEDED(hr);
+ EXPECT_EQ(-1, result);
+}
+
+TEST(HStringCompareTest, StringsEqual) {
+ if (GetVersion() < Version::WIN8)
+ return;
+
+ ASSERT_TRUE(HStringReference::ResolveCoreWinRTStringDelayload());
+
+ const HStringReference string123(kTestString123);
+ INT32 result;
+ HRESULT hr = HStringCompare(string123.Get(), string123.Get(), &result);
+ EXPECT_HRESULT_SUCCEEDED(hr);
+ EXPECT_EQ(0, result);
+}
+
+TEST(HStringCompareTest, FirstStringAfterSecondString) {
+ if (GetVersion() < Version::WIN8)
+ return;
+
+ ASSERT_TRUE(HStringReference::ResolveCoreWinRTStringDelayload());
+
+ const HStringReference string123(kTestString123);
+ const HStringReference string1234(kTestString1234);
+ INT32 result;
+ HRESULT hr = HStringCompare(string1234.Get(), string123.Get(), &result);
+ EXPECT_HRESULT_SUCCEEDED(hr);
+ EXPECT_EQ(1, result);
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/i18n.cc b/chromium/base/win/i18n.cc
index eb44c496675..04f533a713f 100644
--- a/chromium/base/win/i18n.cc
+++ b/chromium/base/win/i18n.cc
@@ -7,146 +7,40 @@
#include <windows.h>
#include "base/logging.h"
-#include "base/stl_util.h"
+#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
namespace {
-// Keep this enum in sync with kLanguageFunctionNames.
-enum LanguageFunction {
- SYSTEM_LANGUAGES,
- USER_LANGUAGES,
- PROCESS_LANGUAGES,
- THREAD_LANGUAGES,
- NUM_FUNCTIONS
-};
+typedef decltype(::GetSystemPreferredUILanguages)* GetPreferredUILanguages_Fn;
-const char kSystemLanguagesFunctionName[] = "GetSystemPreferredUILanguages";
-const char kUserLanguagesFunctionName[] = "GetUserPreferredUILanguages";
-const char kProcessLanguagesFunctionName[] = "GetProcessPreferredUILanguages";
-const char kThreadLanguagesFunctionName[] = "GetThreadPreferredUILanguages";
-
-// Keep this array in sync with enum LanguageFunction.
-const char *const kLanguageFunctionNames[] = {
- &kSystemLanguagesFunctionName[0],
- &kUserLanguagesFunctionName[0],
- &kProcessLanguagesFunctionName[0],
- &kThreadLanguagesFunctionName[0]
-};
-
-static_assert(NUM_FUNCTIONS == base::size(kLanguageFunctionNames),
- "LanguageFunction enum and kLanguageFunctionNames array must be "
- "kept in sync");
-
-// Calls one of the MUI Get*PreferredUILanguages functions, placing the result
-// in |languages|. |function| identifies the function to call and |flags| is
-// the function-specific flags (callers must not specify MUI_LANGUAGE_ID or
-// MUI_LANGUAGE_NAME). Returns true if at least one language is placed in
-// |languages|.
-bool GetMUIPreferredUILanguageList(LanguageFunction function, ULONG flags,
- std::vector<wchar_t>* languages) {
- DCHECK(0 <= function && NUM_FUNCTIONS > function);
- DCHECK_EQ(0U, (flags & (MUI_LANGUAGE_ID | MUI_LANGUAGE_NAME)));
- DCHECK(languages);
-
- HMODULE kernel32 = GetModuleHandle(L"kernel32.dll");
- if (NULL != kernel32) {
- typedef BOOL (WINAPI* GetPreferredUILanguages_Fn)(
- DWORD, PULONG, PZZWSTR, PULONG);
- GetPreferredUILanguages_Fn get_preferred_ui_languages =
- reinterpret_cast<GetPreferredUILanguages_Fn>(
- GetProcAddress(kernel32, kLanguageFunctionNames[function]));
- if (NULL != get_preferred_ui_languages) {
- const ULONG call_flags = flags | MUI_LANGUAGE_NAME;
- ULONG language_count = 0;
- ULONG buffer_length = 0;
- if (get_preferred_ui_languages(call_flags, &language_count, NULL,
- &buffer_length) &&
- 0 != buffer_length) {
- languages->resize(buffer_length);
- if (get_preferred_ui_languages(call_flags, &language_count,
- &(*languages)[0], &buffer_length) &&
- 0 != language_count) {
- DCHECK(languages->size() == buffer_length);
- return true;
- } else {
- DPCHECK(0 == language_count)
- << "Failed getting preferred UI languages.";
- }
- } else {
- DPCHECK(0 == buffer_length)
- << "Failed getting size of preferred UI languages.";
- }
- } else {
- DVLOG(2) << "MUI not available.";
- }
- } else {
- NOTREACHED() << "kernel32.dll not found.";
- }
-
- return false;
-}
-
-bool GetUserDefaultUILanguage(base::string16* language,
- base::string16* region) {
- DCHECK(language);
-
- LANGID lang_id = ::GetUserDefaultUILanguage();
- if (LOCALE_CUSTOM_UI_DEFAULT != lang_id) {
- const LCID locale_id = MAKELCID(lang_id, SORT_DEFAULT);
- // max size for LOCALE_SISO639LANGNAME and LOCALE_SISO3166CTRYNAME is 9
- base::char16 result_buffer[9];
- int result_length = GetLocaleInfo(locale_id, LOCALE_SISO639LANGNAME,
- base::as_writable_wcstr(result_buffer),
- base::size(result_buffer));
- DPCHECK(0 != result_length) << "Failed getting language id";
- if (1 < result_length) {
- language->assign(&result_buffer[0], result_length - 1);
- region->clear();
- if (SUBLANG_NEUTRAL != SUBLANGID(lang_id)) {
- result_length = GetLocaleInfo(locale_id, LOCALE_SISO3166CTRYNAME,
- base::as_writable_wcstr(result_buffer),
- base::size(result_buffer));
- DPCHECK(0 != result_length) << "Failed getting region id";
- if (1 < result_length)
- region->assign(&result_buffer[0], result_length - 1);
- }
- return true;
- }
- } else {
- // This is entirely unexpected on pre-Vista, which is the only time we
- // should try GetUserDefaultUILanguage anyway.
- NOTREACHED() << "Cannot determine language for a supplemental locale.";
- }
- return false;
-}
-
-bool GetPreferredUILanguageList(LanguageFunction function,
+bool GetPreferredUILanguageList(GetPreferredUILanguages_Fn function,
ULONG flags,
std::vector<base::string16>* languages) {
- std::vector<wchar_t> buffer;
- base::string16 language;
- base::string16 region;
+ DCHECK_EQ(0U, (flags & (MUI_LANGUAGE_ID | MUI_LANGUAGE_NAME)));
+ const ULONG call_flags = flags | MUI_LANGUAGE_NAME;
+ ULONG language_count = 0;
+ ULONG buffer_length = 0;
+ if (!function(call_flags, &language_count, nullptr, &buffer_length) ||
+ 0 == buffer_length) {
+ DPCHECK(0 == buffer_length)
+ << "Failed getting size of preferred UI languages.";
+ return false;
+ }
- if (GetMUIPreferredUILanguageList(function, flags, &buffer)) {
- std::vector<wchar_t>::const_iterator scan = buffer.begin();
- language = base::WideToUTF16(&*scan);
- while (!language.empty()) {
- languages->push_back(language);
- scan += language.size() + 1;
- language = base::WideToUTF16(&*scan);
- }
- } else if (GetUserDefaultUILanguage(&language, &region)) {
- // Mimic the MUI behavior of putting the neutral version of the lang after
- // the regional one (e.g., "fr-CA, fr").
- if (!region.empty())
- languages->push_back(language + base::string16(1, '-') + region);
- languages->push_back(language);
- } else {
+ base::string16 buffer(buffer_length, '\0');
+ if (!function(call_flags, &language_count, base::as_writable_wcstr(buffer),
+ &buffer_length) ||
+ 0 == language_count) {
+ DPCHECK(0 == language_count) << "Failed getting preferred UI languages.";
return false;
}
+ // Split string on NUL characters.
+ *languages =
+ base::SplitString(buffer, base::string16(1, '\0'), base::KEEP_WHITESPACE,
+ base::SPLIT_WANT_NONEMPTY);
+ DCHECK_EQ(languages->size(), language_count);
return true;
}
@@ -158,14 +52,15 @@ namespace i18n {
bool GetUserPreferredUILanguageList(std::vector<base::string16>* languages) {
DCHECK(languages);
- return GetPreferredUILanguageList(USER_LANGUAGES, 0, languages);
+ return GetPreferredUILanguageList(::GetUserPreferredUILanguages, 0,
+ languages);
}
bool GetThreadPreferredUILanguageList(std::vector<base::string16>* languages) {
DCHECK(languages);
return GetPreferredUILanguageList(
- THREAD_LANGUAGES, MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK,
- languages);
+ ::GetThreadPreferredUILanguages,
+ MUI_MERGE_SYSTEM_FALLBACK | MUI_MERGE_USER_FALLBACK, languages);
}
} // namespace i18n
diff --git a/chromium/base/win/i18n_unittest.cc b/chromium/base/win/i18n_unittest.cc
index 01888381d2a..b7973c3e659 100644
--- a/chromium/base/win/i18n_unittest.cc
+++ b/chromium/base/win/i18n_unittest.cc
@@ -4,12 +4,13 @@
// This file contains unit tests for Windows internationalization funcs.
-#include "testing/gtest/include/gtest/gtest.h"
-
#include <stddef.h>
+#include <string.h>
+#include "base/strings/string_util.h"
#include "base/win/i18n.h"
#include "base/win/windows_version.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace win {
@@ -22,6 +23,8 @@ TEST(I18NTest, GetUserPreferredUILanguageList) {
EXPECT_FALSE(languages.empty());
for (const auto& language : languages) {
EXPECT_FALSE(language.empty());
+ // Ensure there's no extra trailing 0 characters.
+ EXPECT_EQ(language.size(), wcslen(base::as_wcstr(language)));
}
}
@@ -32,6 +35,7 @@ TEST(I18NTest, GetThreadPreferredUILanguageList) {
EXPECT_FALSE(languages.empty());
for (const auto& language : languages) {
EXPECT_FALSE(language.empty());
+ EXPECT_EQ(language.size(), wcslen(base::as_wcstr(language)));
}
}
diff --git a/chromium/base/win/registry.cc b/chromium/base/win/registry.cc
index d7a1890b818..7c324938e4b 100644
--- a/chromium/base/win/registry.cc
+++ b/chromium/base/win/registry.cc
@@ -281,7 +281,7 @@ LONG RegKey::DeleteEmptyKey(const char16* name) {
return result;
if (count == 0)
- return RegDeleteKeyExWrapper(key_, name, wow64access_, 0);
+ return RegDeleteKeyEx(key_, name, wow64access_, 0);
return ERROR_DIR_NOT_EMPTY;
}
@@ -431,28 +431,9 @@ bool RegKey::StartWatching(ChangeCallback callback) {
}
// static
-LONG RegKey::RegDeleteKeyExWrapper(HKEY hKey,
- const char16* lpSubKey,
- REGSAM samDesired,
- DWORD Reserved) {
- typedef LSTATUS(WINAPI* RegDeleteKeyExPtr)(HKEY, LPCWSTR, REGSAM, DWORD);
-
- RegDeleteKeyExPtr reg_delete_key_ex_func =
- reinterpret_cast<RegDeleteKeyExPtr>(
- GetProcAddress(GetModuleHandleA("advapi32.dll"), "RegDeleteKeyExW"));
-
- if (reg_delete_key_ex_func)
- return reg_delete_key_ex_func(hKey, as_wcstr(lpSubKey), samDesired,
- Reserved);
-
- // Windows XP does not support RegDeleteKeyEx, so fallback to RegDeleteKey.
- return RegDeleteKey(hKey, as_wcstr(lpSubKey));
-}
-
-// static
LONG RegKey::RegDelRecurse(HKEY root_key, const char16* name, REGSAM access) {
// First, see if the key can be deleted without having to recurse.
- LONG result = RegDeleteKeyExWrapper(root_key, name, access, 0);
+ LONG result = RegDeleteKeyEx(root_key, name, access, 0);
if (result == ERROR_SUCCESS)
return result;
@@ -497,7 +478,7 @@ LONG RegKey::RegDelRecurse(HKEY root_key, const char16* name, REGSAM access) {
RegCloseKey(target_key);
// Try again to delete the key.
- result = RegDeleteKeyExWrapper(root_key, name, access, 0);
+ result = RegDeleteKeyEx(root_key, name, access, 0);
return result;
}
diff --git a/chromium/base/win/registry.h b/chromium/base/win/registry.h
index 4b859440eb9..1c57fc9f537 100644
--- a/chromium/base/win/registry.h
+++ b/chromium/base/win/registry.h
@@ -142,13 +142,6 @@ class BASE_EXPORT RegKey {
private:
class Watcher;
- // Calls RegDeleteKeyEx on supported platforms, alternatively falls back to
- // RegDeleteKey.
- static LONG RegDeleteKeyExWrapper(HKEY hKey,
- const char16* lpSubKey,
- REGSAM samDesired,
- DWORD Reserved);
-
// Recursively deletes a key and all of its subkeys.
static LONG RegDelRecurse(HKEY root_key, const char16* name, REGSAM access);
diff --git a/chromium/base/win/scoped_variant.cc b/chromium/base/win/scoped_variant.cc
index 61f645ab71d..2d87dacbcac 100644
--- a/chromium/base/win/scoped_variant.cc
+++ b/chromium/base/win/scoped_variant.cc
@@ -12,6 +12,11 @@ namespace win {
// Global, const instance of an empty variant.
const VARIANT ScopedVariant::kEmptyVariant = {{{VT_EMPTY}}};
+ScopedVariant::ScopedVariant(ScopedVariant&& var) {
+ var_.vt = VT_EMPTY;
+ Reset(var.Release());
+}
+
ScopedVariant::~ScopedVariant() {
static_assert(sizeof(ScopedVariant) == sizeof(VARIANT), "ScopedVariantSize");
::VariantClear(&var_);
@@ -222,6 +227,12 @@ void ScopedVariant::Set(const VARIANT& var) {
}
}
+ScopedVariant& ScopedVariant::operator=(ScopedVariant&& var) {
+ if (var.ptr() != &var_)
+ Reset(var.Release());
+ return *this;
+}
+
ScopedVariant& ScopedVariant::operator=(const VARIANT& var) {
if (&var != &var_) {
VariantClear(&var_);
diff --git a/chromium/base/win/scoped_variant.h b/chromium/base/win/scoped_variant.h
index caf6f504350..afeecce0d67 100644
--- a/chromium/base/win/scoped_variant.h
+++ b/chromium/base/win/scoped_variant.h
@@ -61,6 +61,9 @@ class BASE_EXPORT ScopedVariant {
// Copies the variant.
explicit ScopedVariant(const VARIANT& var);
+ // Moves the wrapped variant into another ScopedVariant.
+ ScopedVariant(ScopedVariant&& var);
+
~ScopedVariant();
inline VARTYPE type() const {
@@ -126,6 +129,9 @@ class BASE_EXPORT ScopedVariant {
// over that.
const VARIANT* ptr() const { return &var_; }
+ // Moves the ScopedVariant to another instance.
+ ScopedVariant& operator=(ScopedVariant&& var);
+
// Like other scoped classes (e.g. scoped_refptr, ScopedBstr,
// Microsoft::WRL::ComPtr) we support the assignment operator for the type we
// wrap.
diff --git a/chromium/base/win/scoped_variant_unittest.cc b/chromium/base/win/scoped_variant_unittest.cc
index 7d61e2859b8..ef762f9781d 100644
--- a/chromium/base/win/scoped_variant_unittest.cc
+++ b/chromium/base/win/scoped_variant_unittest.cc
@@ -4,6 +4,8 @@
#include <stdint.h>
+#include <utility>
+
#include "base/win/scoped_variant.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -214,6 +216,17 @@ TEST(ScopedVariantTest, ScopedVariant) {
{
ScopedVariant ref1(&faker);
EXPECT_EQ(1, faker.ref_count());
+ ScopedVariant ref2(std::move(ref1));
+ EXPECT_EQ(1, faker.ref_count());
+ ScopedVariant ref3;
+ ref3 = std::move(ref2);
+ EXPECT_EQ(1, faker.ref_count());
+ }
+ EXPECT_EQ(0, faker.ref_count());
+
+ {
+ ScopedVariant ref1(&faker);
+ EXPECT_EQ(1, faker.ref_count());
ScopedVariant ref2(static_cast<const VARIANT&>(ref1));
EXPECT_EQ(2, faker.ref_count());
ScopedVariant ref3;
diff --git a/chromium/base/win/startup_information.cc b/chromium/base/win/startup_information.cc
index 9986674a059..372b85e04ff 100644
--- a/chromium/base/win/startup_information.cc
+++ b/chromium/base/win/startup_information.cc
@@ -6,83 +6,42 @@
#include "base/logging.h"
-namespace {
-
-typedef BOOL (WINAPI *InitializeProcThreadAttributeListFunction)(
- LPPROC_THREAD_ATTRIBUTE_LIST attribute_list,
- DWORD attribute_count,
- DWORD flags,
- PSIZE_T size);
-static InitializeProcThreadAttributeListFunction
- initialize_proc_thread_attribute_list;
-
-typedef BOOL (WINAPI *UpdateProcThreadAttributeFunction)(
- LPPROC_THREAD_ATTRIBUTE_LIST attribute_list,
- DWORD flags,
- DWORD_PTR attribute,
- PVOID value,
- SIZE_T size,
- PVOID previous_value,
- PSIZE_T return_size);
-static UpdateProcThreadAttributeFunction update_proc_thread_attribute_list;
-
-typedef VOID (WINAPI *DeleteProcThreadAttributeListFunction)(
- LPPROC_THREAD_ATTRIBUTE_LIST lpAttributeList);
-static DeleteProcThreadAttributeListFunction delete_proc_thread_attribute_list;
-
-} // namespace
-
namespace base {
namespace win {
-StartupInformation::StartupInformation() {
- memset(&startup_info_, 0, sizeof(startup_info_));
+StartupInformation::StartupInformation() : startup_info_() {
startup_info_.StartupInfo.cb = sizeof(startup_info_);
-
- // Load the attribute API functions.
- if (!initialize_proc_thread_attribute_list ||
- !update_proc_thread_attribute_list ||
- !delete_proc_thread_attribute_list) {
- HMODULE module = ::GetModuleHandleW(L"kernel32.dll");
- initialize_proc_thread_attribute_list =
- reinterpret_cast<InitializeProcThreadAttributeListFunction>(
- ::GetProcAddress(module, "InitializeProcThreadAttributeList"));
- update_proc_thread_attribute_list =
- reinterpret_cast<UpdateProcThreadAttributeFunction>(
- ::GetProcAddress(module, "UpdateProcThreadAttribute"));
- delete_proc_thread_attribute_list =
- reinterpret_cast<DeleteProcThreadAttributeListFunction>(
- ::GetProcAddress(module, "DeleteProcThreadAttributeList"));
- }
}
StartupInformation::~StartupInformation() {
if (startup_info_.lpAttributeList) {
- delete_proc_thread_attribute_list(startup_info_.lpAttributeList);
- delete [] reinterpret_cast<BYTE*>(startup_info_.lpAttributeList);
+ ::DeleteProcThreadAttributeList(startup_info_.lpAttributeList);
}
}
bool StartupInformation::InitializeProcThreadAttributeList(
DWORD attribute_count) {
if (startup_info_.StartupInfo.cb != sizeof(startup_info_) ||
- startup_info_.lpAttributeList)
+ startup_info_.lpAttributeList) {
return false;
+ }
SIZE_T size = 0;
- initialize_proc_thread_attribute_list(NULL, attribute_count, 0, &size);
+ ::InitializeProcThreadAttributeList(nullptr, attribute_count, 0, &size);
if (size == 0)
return false;
- startup_info_.lpAttributeList =
- reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(new BYTE[size]);
- if (!initialize_proc_thread_attribute_list(startup_info_.lpAttributeList,
- attribute_count, 0, &size)) {
- delete [] reinterpret_cast<BYTE*>(startup_info_.lpAttributeList);
- startup_info_.lpAttributeList = NULL;
+ auto attribute_list = std::make_unique<char[]>(size);
+ auto* attribute_list_ptr =
+ reinterpret_cast<LPPROC_THREAD_ATTRIBUTE_LIST>(attribute_list.get());
+ if (!::InitializeProcThreadAttributeList(attribute_list_ptr, attribute_count,
+ 0, &size)) {
return false;
}
+ attribute_list_ = std::move(attribute_list);
+ startup_info_.lpAttributeList = attribute_list_ptr;
+
return true;
}
@@ -92,8 +51,9 @@ bool StartupInformation::UpdateProcThreadAttribute(
size_t size) {
if (!startup_info_.lpAttributeList)
return false;
- return !!update_proc_thread_attribute_list(startup_info_.lpAttributeList, 0,
- attribute, value, size, NULL, NULL);
+ return !!::UpdateProcThreadAttribute(startup_info_.lpAttributeList, 0,
+ attribute, value, size, nullptr,
+ nullptr);
}
} // namespace win
diff --git a/chromium/base/win/startup_information.h b/chromium/base/win/startup_information.h
index 5b777baefe1..2d4edaf5211 100644
--- a/chromium/base/win/startup_information.h
+++ b/chromium/base/win/startup_information.h
@@ -5,6 +5,8 @@
#ifndef BASE_WIN_STARTUP_INFORMATION_H_
#define BASE_WIN_STARTUP_INFORMATION_H_
+#include <memory>
+
#include <windows.h>
#include <stddef.h>
@@ -41,6 +43,7 @@ class BASE_EXPORT StartupInformation {
}
private:
+ std::unique_ptr<char[]> attribute_list_;
STARTUPINFOEXW startup_info_;
DISALLOW_COPY_AND_ASSIGN(StartupInformation);
};
diff --git a/chromium/base/win/vector.h b/chromium/base/win/vector.h
index 41c0c8119d9..ba94ecf661e 100644
--- a/chromium/base/win/vector.h
+++ b/chromium/base/win/vector.h
@@ -17,6 +17,7 @@
#include "base/base_export.h"
#include "base/containers/flat_map.h"
#include "base/logging.h"
+#include "base/win/winrt_foundation_helpers.h"
namespace base {
namespace win {
@@ -26,17 +27,21 @@ class Vector;
namespace internal {
+// Template tricks needed to dispatch to the correct implementation.
+// See base/win/winrt_foundation_helpers.h for explanation.
+
template <typename T>
using Complex =
typename ABI::Windows::Foundation::Collections::IVector<T>::T_complex;
template <typename T>
-using Logical = typename ABI::Windows::Foundation::Internal::GetLogicalType<
- Complex<T>>::type;
+using Logical = LogicalType<Complex<T>>;
+
+template <typename T>
+using Abi = AbiType<Complex<T>>;
template <typename T>
-using Abi =
- typename ABI::Windows::Foundation::Internal::GetAbiType<Complex<T>>::type;
+using Storage = StorageType<Complex<T>>;
template <typename T>
class VectorIterator
@@ -177,45 +182,6 @@ class VectorView
EventRegistrationToken vector_changed_token_;
};
-template <typename T>
-HRESULT CopyTo(const T& value, T* ptr) {
- *ptr = value;
- return S_OK;
-}
-
-template <typename T>
-HRESULT CopyTo(const Microsoft::WRL::ComPtr<T>& com_ptr, T** ptr) {
- return com_ptr.CopyTo(ptr);
-}
-
-template <typename T>
-HRESULT CopyN(typename std::vector<T>::const_iterator first,
- unsigned count,
- T* result) {
- std::copy_n(first, count, result);
- return S_OK;
-}
-
-template <typename T>
-HRESULT CopyN(
- typename std::vector<Microsoft::WRL::ComPtr<T>>::const_iterator first,
- unsigned count,
- T** result) {
- for (unsigned i = 0; i < count; ++i)
- CopyTo(*first++, result++);
- return S_OK;
-}
-
-template <typename T>
-bool IsEqual(const T& lhs, const T& rhs) {
- return lhs == rhs;
-}
-
-template <typename T>
-bool IsEqual(const Microsoft::WRL::ComPtr<T>& com_ptr, const T* ptr) {
- return com_ptr.Get() == ptr;
-}
-
} // namespace internal
// This file provides an implementation of Windows::Foundation::IVector. It
@@ -238,24 +204,9 @@ class Vector
ABI::Windows::Foundation::Collections::IIterable<
internal::Logical<T>>> {
public:
- // windows.foundation.collections.h defines the following template and
- // semantics in Windows::Foundation::Internal:
- //
- // template <class LogicalType, class AbiType>
- // struct AggregateType;
- //
- // LogicalType - the Windows Runtime type (eg, runtime class, interface
- // group, etc) being provided as an argument to an _impl
- // template, when that type cannot be represented at the ABI.
- // AbiType - the type used for marshalling, ie "at the ABI", for the
- // logical type.
using LogicalT = internal::Logical<T>;
using AbiT = internal::Abi<T>;
-
- using StorageT =
- std::conditional_t<std::is_convertible<AbiT, IUnknown*>::value,
- Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiT>>,
- AbiT>;
+ using StorageT = internal::Storage<T>;
Vector() = default;
explicit Vector(const std::vector<StorageT>& vector) : vector_(vector) {}
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index 38134b53be7..607c71431ce 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -42,6 +42,7 @@
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/scoped_thread_priority.h"
#include "base/threading/thread_restrictions.h"
#include "base/win/core_winrt_util.h"
#include "base/win/propvarutil.h"
@@ -155,6 +156,11 @@ bool* GetDomainEnrollmentStateStorage() {
bool* GetRegisteredWithManagementStateStorage() {
static bool state = []() {
+ // Mitigate the issues caused by loading DLLs on a background thread
+ // (http://crbug/973868).
+ base::ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(
+ FROM_HERE);
+
ScopedNativeLibrary library(
FilePath(FILE_PATH_LITERAL("MDMRegistration.dll")));
if (!library.is_valid())
@@ -793,6 +799,33 @@ bool IsRunningUnderDesktopName(StringPiece16 desktop_name) {
return EqualsCaseInsensitiveASCII(current_desktop_name, desktop_name);
}
+// This method is used to detect whether current session is a remote session.
+// See:
+// https://docs.microsoft.com/en-us/windows/desktop/TermServ/detecting-the-terminal-services-environment
+bool IsCurrentSessionRemote() {
+ if (::GetSystemMetrics(SM_REMOTESESSION))
+ return true;
+
+ DWORD current_session_id = 0;
+
+ if (!::ProcessIdToSessionId(::GetCurrentProcessId(), &current_session_id))
+ return false;
+
+ static constexpr wchar_t kRdpSettingsKeyName[] =
+ L"SYSTEM\\CurrentControlSet\\Control\\Terminal Server";
+ base::win::RegKey key(HKEY_LOCAL_MACHINE, kRdpSettingsKeyName, KEY_READ);
+ if (!key.Valid())
+ return false;
+
+ static constexpr wchar_t kGlassSessionIdValueName[] = L"GlassSessionId";
+ DWORD glass_session_id = 0;
+ if (key.ReadValueDW(kGlassSessionIdValueName, &glass_session_id) !=
+ ERROR_SUCCESS)
+ return false;
+
+ return current_session_id != glass_session_id;
+}
+
ScopedDomainStateForTesting::ScopedDomainStateForTesting(bool state)
: initial_state_(IsEnrolledToDomain()) {
*GetDomainEnrollmentStateStorage() = state;
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index 0c16d9fbd7f..921b5c102ea 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -223,6 +223,9 @@ BASE_EXPORT string16 GetWindowObjectName(HANDLE handle);
// characters will be compared with exact matches).
BASE_EXPORT bool IsRunningUnderDesktopName(StringPiece16 desktop_name);
+// Returns true if current session is a remote session.
+BASE_EXPORT bool IsCurrentSessionRemote();
+
// Allows changing the domain enrolled state for the life time of the object.
// The original state is restored upon destruction.
class BASE_EXPORT ScopedDomainStateForTesting {
diff --git a/chromium/base/win/win_util_unittest.cc b/chromium/base/win/win_util_unittest.cc
index 5999fb9070c..1178199e211 100644
--- a/chromium/base/win/win_util_unittest.cc
+++ b/chromium/base/win/win_util_unittest.cc
@@ -73,7 +73,7 @@ TEST(BaseWinUtilTest, TestGetLoadedModulesSnapshot) {
ASSERT_NE(static_cast<HMODULE>(NULL), new_dll.get());
ASSERT_TRUE(GetLoadedModulesSnapshot(::GetCurrentProcess(), &snapshot));
ASSERT_GT(snapshot.size(), original_snapshot_size);
- ASSERT_TRUE(ContainsValue(snapshot, new_dll.get()));
+ ASSERT_TRUE(Contains(snapshot, new_dll.get()));
}
TEST(BaseWinUtilTest, TestUint32ToInvalidHandle) {
diff --git a/chromium/base/win/windows_version.cc b/chromium/base/win/windows_version.cc
index eefb92323a3..1d3721944f4 100644
--- a/chromium/base/win/windows_version.cc
+++ b/chromium/base/win/windows_version.cc
@@ -23,15 +23,10 @@
#error VS 2017 Update 3.2 or higher is required
#endif
-#if !defined(NTDDI_WIN10_RS5)
-// Windows 10 October 2018 SDK is required to build Chrome.
-#error October 2018 SDK (10.0.17763.0) or higher required.
+#if !defined(NTDDI_WIN10_19H1)
+#error Windows 10.0.18362.0 SDK or higher required.
#endif
-namespace {
-typedef BOOL(WINAPI* GetProductInfoPtr)(DWORD, DWORD, DWORD, DWORD, PDWORD);
-} // namespace
-
namespace base {
namespace win {
@@ -80,14 +75,8 @@ OSInfo** OSInfo::GetInstanceStorage() {
::GetVersionEx(reinterpret_cast<_OSVERSIONINFOW*>(&version_info));
DWORD os_type = 0;
- if (version_info.dwMajorVersion == 6 || version_info.dwMajorVersion == 10) {
- // Only present on Vista+.
- GetProductInfoPtr get_product_info =
- reinterpret_cast<GetProductInfoPtr>(::GetProcAddress(
- ::GetModuleHandle(L"kernel32.dll"), "GetProductInfo"));
- get_product_info(version_info.dwMajorVersion, version_info.dwMinorVersion,
- 0, 0, &os_type);
- }
+ ::GetProductInfo(version_info.dwMajorVersion, version_info.dwMinorVersion,
+ 0, 0, &os_type);
return new OSInfo(version_info, GetSystemInfoStorage(), os_type);
}();
@@ -253,13 +242,8 @@ std::string OSInfo::processor_model_name() {
// static
OSInfo::WOW64Status OSInfo::GetWOW64StatusForProcess(HANDLE process_handle) {
- typedef BOOL(WINAPI * IsWow64ProcessFunc)(HANDLE, PBOOL);
- IsWow64ProcessFunc is_wow64_process = reinterpret_cast<IsWow64ProcessFunc>(
- GetProcAddress(GetModuleHandle(L"kernel32.dll"), "IsWow64Process"));
- if (!is_wow64_process)
- return WOW64_DISABLED;
BOOL is_wow64 = FALSE;
- if (!(*is_wow64_process)(process_handle, &is_wow64))
+ if (!::IsWow64Process(process_handle, &is_wow64))
return WOW64_UNKNOWN;
return is_wow64 ? WOW64_ENABLED : WOW64_DISABLED;
}
diff --git a/chromium/base/win/windowsx_shim.h b/chromium/base/win/windowsx_shim.h
index 5b65078e0a4..e47927a8b69 100644
--- a/chromium/base/win/windowsx_shim.h
+++ b/chromium/base/win/windowsx_shim.h
@@ -18,6 +18,7 @@
#include <windowsx.h>
#undef GetNextSibling // Same as GetWindow(hwnd, GW_HWNDNEXT)
+#undef GetFirstChild // Same as GetTopWindow(hwnd)
#undef IsMaximized // Defined to IsZoomed, use IsZoomed directly instead
#undef IsMinimized // Defined to IsIconic, use IsIconic directly instead
#undef IsRestored // Macro to check that neither WS_MINIMIZE, nor
diff --git a/chromium/base/win/winrt_foundation_helpers.h b/chromium/base/win/winrt_foundation_helpers.h
new file mode 100644
index 00000000000..819ae1b67f4
--- /dev/null
+++ b/chromium/base/win/winrt_foundation_helpers.h
@@ -0,0 +1,151 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_WINRT_FOUNDATION_HELPERS_H_
+#define BASE_WIN_WINRT_FOUNDATION_HELPERS_H_
+
+#include <windows.foundation.h>
+#include <wrl/client.h>
+
+#include <vector>
+
+#include "base/optional.h"
+#include "base/win/hstring_compare.h"
+
+// This file provides helpers for WinRT types.
+
+namespace base {
+namespace win {
+namespace internal {
+
+// Template tricks needed to dispatch to the correct implementation.
+//
+// For all types which are neither InterfaceGroups nor RuntimeClasses, the
+// following three typedefs are synonyms for a single C++ type. But for
+// InterfaceGroups and RuntimeClasses, they are different types:
+// LogicalT: The C++ Type for the InterfaceGroup or RuntimeClass, when
+// used as a template parameter. Eg "RCFoo*"
+// AbiT: The C++ type for the default interface used to represent the
+// InterfaceGroup or RuntimeClass when passed as a method parameter.
+// Eg "IFoo*"
+// ComplexT: An instantiation of the Internal "AggregateType" template that
+// combines LogicalT with AbiT. Eg "AggregateType<RCFoo*,IFoo*>".
+// ComplexT is tightly coupled to the interface being implemented,
+// hence defined in headers which include this file.
+// For instance base/win/async_operation.h or
+// base/win/collection_helpers.h
+//
+// windows.foundation.collections.h defines the following template and
+// semantics in Windows::Foundation::Internal:
+//
+// template <class LogicalType, class AbiType>
+// struct AggregateType;
+//
+// LogicalType - the Windows Runtime type (eg, runtime class, interface group,
+// etc) being provided as an argument to an _impl template, when
+// that type cannot be represented at the ABI.
+// AbiType - the type used for marshalling, ie "at the ABI", for the
+// logical type.
+template <typename TComplex>
+using AbiType =
+ typename ABI::Windows::Foundation::Internal::GetAbiType<TComplex>::type;
+
+template <typename TComplex>
+using LogicalType =
+ typename ABI::Windows::Foundation::Internal::GetLogicalType<TComplex>::type;
+
+// Compile time switch to decide what container to use for |TComplex|.
+// Depends on whether the underlying Abi type is a pointer to IUnknown or not.
+// It queries the internals of Windows::Foundation to obtain this information.
+template <typename TComplex>
+using StorageType = std::conditional_t<
+ std::is_convertible<AbiType<TComplex>, IUnknown*>::value,
+ Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiType<TComplex>>>,
+ AbiType<TComplex>>;
+
+// Similar to StorageType, but returns a base::Optional in case underlying Abi
+// type is not a pointer to IUnknown.
+template <typename TComplex>
+using OptionalStorageType = std::conditional_t<
+ std::is_convertible<AbiType<TComplex>, IUnknown*>::value,
+ Microsoft::WRL::ComPtr<std::remove_pointer_t<AbiType<TComplex>>>,
+ base::Optional<AbiType<TComplex>>>;
+
+template <typename T>
+HRESULT CopyTo(const T& value, T* ptr) {
+ *ptr = value;
+ return S_OK;
+}
+
+template <typename T>
+HRESULT CopyTo(const Microsoft::WRL::ComPtr<T>& value, T** ptr) {
+ return value.CopyTo(ptr);
+}
+
+template <typename T>
+HRESULT CopyTo(const base::Optional<T>& value, T* ptr) {
+ *ptr = *value;
+ return S_OK;
+}
+
+template <typename T>
+HRESULT CopyN(typename std::vector<T>::const_iterator first,
+ unsigned count,
+ T* result) {
+ std::copy_n(first, count, result);
+ return S_OK;
+}
+
+template <typename T>
+HRESULT CopyN(
+ typename std::vector<Microsoft::WRL::ComPtr<T>>::const_iterator first,
+ unsigned count,
+ T** result) {
+ for (unsigned i = 0; i < count; ++i)
+ CopyTo(*first++, result++);
+ return S_OK;
+}
+
+inline bool IsEqual(const HSTRING& lhs, const HSTRING& rhs) {
+ INT32 result;
+ HRESULT hr = HStringCompare(lhs, rhs, &result);
+ DCHECK(SUCCEEDED(hr));
+ return result == 0;
+}
+
+template <typename T>
+bool IsEqual(const T& lhs, const T& rhs) {
+ return lhs == rhs;
+}
+
+template <typename T>
+bool IsEqual(const Microsoft::WRL::ComPtr<T>& com_ptr, const T* ptr) {
+ return com_ptr.Get() == ptr;
+}
+
+struct Less {
+ bool operator()(const HSTRING& lhs, const HSTRING& rhs) const {
+ INT32 result;
+ HRESULT hr = HStringCompare(lhs, rhs, &result);
+ DCHECK(SUCCEEDED(hr));
+ return result < 0;
+ }
+
+ template <typename T>
+ bool operator()(const Microsoft::WRL::ComPtr<T>& com_ptr,
+ const T* ptr) const {
+ return com_ptr.Get() < ptr;
+ }
+
+ template <typename T>
+ constexpr bool operator()(const T& lhs, const T& rhs) const {
+ return lhs < rhs;
+ }
+};
+
+} // namespace internal
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_WINRT_FOUNDATION_HELPERS_H_
diff --git a/chromium/base/win/wmi.cc b/chromium/base/win/wmi.cc
index ef9eb097f4a..4d5e094c3c4 100644
--- a/chromium/base/win/wmi.cc
+++ b/chromium/base/win/wmi.cc
@@ -10,8 +10,10 @@
#include <stdint.h>
#include <utility>
+#include "base/location.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/scoped_thread_priority.h"
#include "base/win/scoped_bstr.h"
#include "base/win/scoped_variant.h"
@@ -22,6 +24,10 @@ namespace win {
bool CreateLocalWmiConnection(bool set_blanket,
ComPtr<IWbemServices>* wmi_services) {
+ // Mitigate the issues caused by loading DLLs on a background thread
+ // (http://crbug/973868).
+ base::ScopedThreadMayLoadLibraryOnBackgroundThread priority_boost(FROM_HERE);
+
ComPtr<IWbemLocator> wmi_locator;
HRESULT hr =
::CoCreateInstance(CLSID_WbemLocator, nullptr, CLSCTX_INPROC_SERVER,