summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-03-08 10:28:10 +0100
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-03-20 13:40:30 +0000
commite733310db58160074f574c429d48f8308c0afe17 (patch)
treef8aef4b7e62a69928dbcf880620eece20f98c6df /chromium/base
parent2f583e4aec1ae3a86fa047829c96b310dc12ecdf (diff)
downloadqtwebengine-chromium-e733310db58160074f574c429d48f8308c0afe17.tar.gz
BASELINE: Update Chromium to 56.0.2924.122
Change-Id: I4e04de8f47e47e501c46ed934c76a431c6337ced Reviewed-by: Michael Brüning <michael.bruning@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn196
-rw-r--r--chromium/base/PRESUBMIT.py2
-rw-r--r--chromium/base/allocator/BUILD.gn7
-rw-r--r--chromium/base/allocator/README.md4
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc34
-rw-r--r--chromium/base/android/linker/config.gni3
-rw-r--r--chromium/base/bind.h7
-rw-r--r--chromium/base/bind_internal.h10
-rw-r--r--chromium/base/bind_unittest.cc671
-rw-r--r--chromium/base/bind_unittest.nc4
-rw-r--r--chromium/base/callback_forward.h3
-rw-r--r--chromium/base/chromeos/logging.h28
-rw-r--r--chromium/base/compiler_specific.h16
-rw-r--r--chromium/base/containers/scoped_ptr_hash_map.h6
-rw-r--r--chromium/base/cpu.cc3
-rw-r--r--chromium/base/cpu.h2
-rw-r--r--chromium/base/cpu_unittest.cc10
-rw-r--r--chromium/base/debug/activity_analyzer.cc6
-rw-r--r--chromium/base/debug/activity_analyzer.h12
-rw-r--r--chromium/base/debug/activity_analyzer_unittest.cc17
-rw-r--r--chromium/base/debug/activity_tracker.cc446
-rw-r--r--chromium/base/debug/activity_tracker.h371
-rw-r--r--chromium/base/debug/activity_tracker_unittest.cc71
-rw-r--r--chromium/base/debug/crash_logging.cc5
-rw-r--r--chromium/base/debug/debugger_posix.cc2
-rw-r--r--chromium/base/debug/gdi_debug_util_win.cc48
-rw-r--r--chromium/base/debug/gdi_debug_util_win.h10
-rw-r--r--chromium/base/debug/scoped_thread_heap_usage.h103
-rw-r--r--chromium/base/debug/stack_trace.cc23
-rw-r--r--chromium/base/debug/stack_trace.h52
-rw-r--r--chromium/base/debug/task_annotator.cc24
-rw-r--r--chromium/base/debug/task_annotator.h2
-rw-r--r--chromium/base/debug/task_annotator_unittest.cc2
-rw-r--r--chromium/base/debug/thread_heap_usage_tracker.cc (renamed from chromium/base/debug/scoped_thread_heap_usage.cc)141
-rw-r--r--chromium/base/debug/thread_heap_usage_tracker.h117
-rw-r--r--chromium/base/debug/thread_heap_usage_tracker_unittest.cc (renamed from chromium/base/debug/scoped_thread_heap_usage_unittest.cc)267
-rw-r--r--chromium/base/feature_list.h2
-rw-r--r--chromium/base/files/file_path.cc6
-rw-r--r--chromium/base/files/file_path_watcher.cc3
-rw-r--r--chromium/base/files/file_path_watcher.h7
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc1
-rw-r--r--chromium/base/files/file_path_watcher_mac.cc1
-rw-r--r--chromium/base/files/file_path_watcher_stub.cc1
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc93
-rw-r--r--chromium/base/files/file_path_watcher_win.cc1
-rw-r--r--chromium/base/files/file_posix.cc2
-rw-r--r--chromium/base/files/file_proxy.h4
-rw-r--r--chromium/base/files/file_util.h11
-rw-r--r--chromium/base/files/file_util_posix.cc38
-rw-r--r--chromium/base/files/file_win.cc2
-rw-r--r--chromium/base/files/important_file_writer.h1
-rw-r--r--chromium/base/i18n/icu_util.h8
-rw-r--r--chromium/base/i18n/number_formatting_unittest.cc18
-rw-r--r--chromium/base/ios/OWNERS4
-rw-r--r--chromium/base/ios/ios_util.h3
-rw-r--r--chromium/base/ios/ios_util.mm5
-rw-r--r--chromium/base/json/correctness_fuzzer.cc57
-rw-r--r--chromium/base/json/json_file_value_serializer.cc11
-rw-r--r--chromium/base/json/json_file_value_serializer.h11
-rw-r--r--chromium/base/json/json_parser.cc14
-rw-r--r--chromium/base/json/json_parser.h4
-rw-r--r--chromium/base/json/json_parser_unittest.cc18
-rw-r--r--chromium/base/json/json_reader.h5
-rw-r--r--chromium/base/json/json_string_value_serializer.cc13
-rw-r--r--chromium/base/json/json_string_value_serializer.h13
-rw-r--r--chromium/base/json/json_value_serializer_unittest.cc18
-rw-r--r--chromium/base/lazy_instance.h2
-rw-r--r--chromium/base/logging.cc27
-rw-r--r--chromium/base/mac/bundle_locations.h1
-rw-r--r--chromium/base/mac/mac_util.h6
-rw-r--r--chromium/base/mac/mac_util.mm25
-rw-r--r--chromium/base/mac/mac_util_unittest.mm8
-rw-r--r--chromium/base/mac/mach_port_broker.mm7
-rw-r--r--chromium/base/mac/scoped_ionotificationportref.h33
-rw-r--r--chromium/base/mac/scoped_nsobject.h7
-rw-r--r--chromium/base/mac/scoped_nsobject_unittest.mm6
-rw-r--r--chromium/base/mac/sdk_forward_declarations.h179
-rw-r--r--chromium/base/mac/sdk_forward_declarations.mm33
-rw-r--r--chromium/base/memory/memory_coordinator_client.cc27
-rw-r--r--chromium/base/memory/memory_coordinator_client.h9
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.cc41
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.h51
-rw-r--r--chromium/base/memory/memory_pressure_listener.cc4
-rw-r--r--chromium/base/memory/memory_pressure_monitor.cc40
-rw-r--r--chromium/base/memory/memory_pressure_monitor.h3
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.cc8
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.h7
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.cc55
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.h15
-rw-r--r--chromium/base/memory/memory_pressure_monitor_unittest.cc33
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.cc32
-rw-r--r--chromium/base/memory/ref_counted.cc10
-rw-r--r--chromium/base/memory/ref_counted.h57
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc35
-rw-r--r--chromium/base/memory/scoped_vector.h16
-rw-r--r--chromium/base/memory/shared_memory.h19
-rw-r--r--chromium/base/memory/shared_memory_handle.h2
-rw-r--r--chromium/base/memory/shared_memory_mac.cc11
-rw-r--r--chromium/base/memory/shared_memory_nacl.cc15
-rw-r--r--chromium/base/memory/shared_memory_posix.cc15
-rw-r--r--chromium/base/memory/shared_memory_win.cc42
-rw-r--r--chromium/base/message_loop/incoming_task_queue.h1
-rw-r--r--chromium/base/message_loop/message_loop.cc40
-rw-r--r--chromium/base/message_loop/message_loop.h23
-rw-r--r--chromium/base/message_loop/message_pump.cc7
-rw-r--r--chromium/base/message_loop/message_pump.h9
-rw-r--r--chromium/base/message_loop/message_pump_io_ios.h3
-rw-r--r--chromium/base/message_loop/message_pump_libevent.cc35
-rw-r--r--chromium/base/message_loop/message_pump_libevent.h10
-rw-r--r--chromium/base/message_loop/message_pump_mac.mm32
-rw-r--r--chromium/base/message_loop/message_pump_perftest.cc2
-rw-r--r--chromium/base/message_loop/message_pump_win.cc184
-rw-r--r--chromium/base/message_loop/message_pump_win.h21
-rw-r--r--chromium/base/metrics/field_trial.cc375
-rw-r--r--chromium/base/metrics/field_trial.h88
-rw-r--r--chromium/base/metrics/field_trial_param_associator.cc55
-rw-r--r--chromium/base/metrics/field_trial_param_associator.h57
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc62
-rw-r--r--chromium/base/metrics/histogram.h1
-rw-r--r--chromium/base/metrics/histogram_base.h1
-rw-r--r--chromium/base/metrics/histogram_functions.cc99
-rw-r--r--chromium/base/metrics/histogram_functions.h100
-rw-r--r--chromium/base/metrics/histogram_functions_unittest.cc109
-rw-r--r--chromium/base/metrics/histogram_macros.h25
-rw-r--r--chromium/base/metrics/histogram_macros_internal.h26
-rw-r--r--chromium/base/metrics/histogram_unittest.nc22
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc11
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h1
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc22
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.h29
-rw-r--r--chromium/base/metrics/persistent_memory_allocator_unittest.cc21
-rw-r--r--chromium/base/metrics/persistent_sample_map.h1
-rw-r--r--chromium/base/metrics/sparse_histogram.h34
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc1
-rw-r--r--chromium/base/metrics/statistics_recorder.h2
-rw-r--r--chromium/base/numerics/safe_conversions.h33
-rw-r--r--chromium/base/numerics/safe_math.h168
-rw-r--r--chromium/base/numerics/safe_math_impl.h605
-rw-r--r--chromium/base/numerics/safe_numerics_unittest.cc79
-rw-r--r--chromium/base/observer_list.h170
-rw-r--r--chromium/base/observer_list_threadsafe.h64
-rw-r--r--chromium/base/observer_list_unittest.cc450
-rw-r--r--chromium/base/optional.h62
-rw-r--r--chromium/base/optional_unittest.cc58
-rw-r--r--chromium/base/pending_task.cc10
-rw-r--r--chromium/base/pending_task.h7
-rw-r--r--chromium/base/process/launch.h6
-rw-r--r--chromium/base/process/launch_mac.cc153
-rw-r--r--chromium/base/process/launch_posix.cc16
-rw-r--r--chromium/base/process/memory_mac.mm1
-rw-r--r--chromium/base/process/port_provider_mac.cc3
-rw-r--r--chromium/base/process/process.h33
-rw-r--r--chromium/base/process/process_mac.cc90
-rw-r--r--chromium/base/process/process_posix.cc14
-rw-r--r--chromium/base/process/process_unittest.cc39
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc29
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h18
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc9
-rw-r--r--chromium/base/stl_util.h29
-rw-r--r--chromium/base/strings/string_number_conversions_unittest.cc5
-rw-r--r--chromium/base/synchronization/lock_impl_win.cc3
-rw-r--r--chromium/base/synchronization/waitable_event_watcher.h1
-rw-r--r--chromium/base/syslog_logging.cc10
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.cc135
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.h61
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager_unittest.cc209
-rw-r--r--chromium/base/task_scheduler/initialization_util.cc22
-rw-r--r--chromium/base/task_scheduler/initialization_util.h21
-rw-r--r--chromium/base/task_scheduler/post_task.cc25
-rw-r--r--chromium/base/task_scheduler/post_task.h42
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread.cc101
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread.h50
-rw-r--r--chromium/base/task_scheduler/scheduler_service_thread_unittest.cc188
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.cc51
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.h33
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.h22
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc225
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h60
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc330
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.cc2
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.h13
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc9
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc113
-rw-r--r--chromium/base/task_scheduler/sequence.cc45
-rw-r--r--chromium/base/task_scheduler/sequence.h34
-rw-r--r--chromium/base/task_scheduler/sequence_unittest.cc120
-rw-r--r--chromium/base/task_scheduler/task.cc3
-rw-r--r--chromium/base/task_scheduler/task.h3
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h24
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc103
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h26
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl_unittest.cc53
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc33
-rw-r--r--chromium/base/task_scheduler/task_tracker.h7
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.cc30
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.h43
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix_unittest.cc65
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc166
-rw-r--r--chromium/base/task_scheduler/task_traits.cc19
-rw-r--r--chromium/base/task_scheduler/task_traits.h16
-rw-r--r--chromium/base/task_scheduler/test_task_factory.h4
-rw-r--r--chromium/base/task_scheduler/test_utils.h20
-rw-r--r--chromium/base/test/BUILD.gn2
-rw-r--r--chromium/base/third_party/dmg_fp/dtoa.cc6
-rw-r--r--chromium/base/third_party/dmg_fp/exp_length.patch11
-rw-r--r--chromium/base/third_party/libevent/http-internal.h1
-rw-r--r--chromium/base/threading/platform_thread_linux.cc53
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc29
-rw-r--r--chromium/base/threading/sequenced_worker_pool.h14
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc5
-rw-r--r--chromium/base/threading/thread.h8
-rw-r--r--chromium/base/threading/thread_local.h93
-rw-r--r--chromium/base/threading/thread_local_android.cc31
-rw-r--r--chromium/base/threading/thread_local_posix.cc43
-rw-r--r--chromium/base/threading/thread_local_storage.cc247
-rw-r--r--chromium/base/threading/thread_local_storage.h12
-rw-r--r--chromium/base/threading/thread_local_storage_unittest.cc10
-rw-r--r--chromium/base/threading/thread_local_win.cc40
-rw-r--r--chromium/base/threading/thread_unittest.cc9
-rw-r--r--chromium/base/threading/worker_pool.h2
-rw-r--r--chromium/base/threading/worker_pool_posix.cc2
-rw-r--r--chromium/base/threading/worker_pool_posix.h2
-rw-r--r--chromium/base/threading/worker_pool_win.cc2
-rw-r--r--chromium/base/time/time.cc28
-rw-r--r--chromium/base/time/time.h21
-rw-r--r--chromium/base/time/time_mac.cc15
-rw-r--r--chromium/base/time/time_posix.cc24
-rw-r--r--chromium/base/time/time_unittest.cc5
-rw-r--r--chromium/base/time/time_win.cc112
-rw-r--r--chromium/base/timer/timer.cc40
-rw-r--r--chromium/base/timer/timer.h45
-rw-r--r--chromium/base/timer/timer_unittest.cc68
-rw-r--r--chromium/base/trace_event/auto_open_close_event.cc52
-rw-r--r--chromium/base/trace_event/auto_open_close_event.h57
-rw-r--r--chromium/base/trace_event/category_registry.cc162
-rw-r--r--chromium/base/trace_event/category_registry.h83
-rw-r--r--chromium/base/trace_event/common/trace_event_common.h61
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc4
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc18
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc31
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h1
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc25
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc15
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc8
-rw-r--r--chromium/base/trace_event/memory_usage_estimator.cc14
-rw-r--r--chromium/base/trace_event/memory_usage_estimator.h418
-rw-r--r--chromium/base/trace_event/memory_usage_estimator_unittest.cc229
-rw-r--r--chromium/base/trace_event/process_memory_dump.h1
-rw-r--r--chromium/base/trace_event/trace_category.h109
-rw-r--r--chromium/base/trace_event/trace_category_unittest.cc137
-rw-r--r--chromium/base/trace_event/trace_config.cc57
-rw-r--r--chromium/base/trace_event/trace_config.h21
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h4
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc60
-rw-r--r--chromium/base/trace_event/trace_event.h116
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc42
-rw-r--r--chromium/base/trace_event/trace_event_impl.h4
-rw-r--r--chromium/base/trace_event/trace_event_synthetic_delay.h3
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc575
-rw-r--r--chromium/base/trace_event/trace_log.cc464
-rw-r--r--chromium/base/trace_event/trace_log.h125
-rw-r--r--chromium/base/trace_event/trace_log_constants.cc3
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.cc107
-rw-r--r--chromium/base/trace_event/trace_sampling_thread.h54
-rw-r--r--chromium/base/unguessable_token.h2
-rw-r--r--chromium/base/values.cc23
-rw-r--r--chromium/base/values.h4
-rw-r--r--chromium/base/win/BUILD.gn27
-rw-r--r--chromium/base/win/eventlog_messages.mc32
-rw-r--r--chromium/base/win/eventlog_provider.cc9
-rw-r--r--chromium/base/win/scoped_hdc.h4
-rw-r--r--chromium/base/win/windows_version.cc26
-rw-r--r--chromium/base/win/windows_version.h1
273 files changed, 9247 insertions, 5102 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 8f5be418e34..3754be5f383 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -27,6 +27,7 @@ import("//build/config/nacl/config.gni")
import("//build/config/sysroot.gni")
import("//build/config/ui.gni")
import("//build/nocompile.gni")
+import("//testing/libfuzzer/fuzzer_test.gni")
import("//testing/test.gni")
declare_args() {
@@ -40,10 +41,6 @@ if (is_android) {
import("//build/config/android/rules.gni")
}
-if (is_win) {
- import("//build/config/win/visual_studio_version.gni")
-}
-
config("base_flags") {
if (is_clang) {
cflags = [
@@ -217,6 +214,8 @@ component("base") {
"android/sys_utils.h",
"android/trace_event_binding.cc",
"android/trace_event_binding.h",
+ "android/unguessable_token_android.cc",
+ "android/unguessable_token_android.h",
"at_exit.cc",
"at_exit.h",
"atomic_ref_count.h",
@@ -263,6 +262,13 @@ component("base") {
"cpu.h",
"critical_closure.h",
"critical_closure_internal_ios.mm",
+
+ # This file depends on files from the "debug/allocator" target,
+ # but this target does not depend on "debug/allocator".
+ "debug/activity_analyzer.cc",
+ "debug/activity_analyzer.h",
+ "debug/activity_tracker.cc",
+ "debug/activity_tracker.h",
"debug/alias.cc",
"debug/alias.h",
"debug/asan_invalid_access.cc",
@@ -279,22 +285,12 @@ component("base") {
"debug/dump_without_crashing.h",
"debug/gdi_debug_util_win.cc",
"debug/gdi_debug_util_win.h",
-
- # This file depends on files from the "debug/allocator" target,
- # but this target does not depend on "debug/allocator" (see
- # allocator.gyp for details).
- "debug/activity_analyzer.cc",
- "debug/activity_analyzer.h",
- "debug/activity_tracker.cc",
- "debug/activity_tracker.h",
"debug/leak_annotations.h",
"debug/leak_tracker.h",
"debug/proc_maps_linux.cc",
"debug/proc_maps_linux.h",
"debug/profiler.cc",
"debug/profiler.h",
- "debug/scoped_thread_heap_usage.cc",
- "debug/scoped_thread_heap_usage.h",
"debug/stack_trace.cc",
"debug/stack_trace.h",
"debug/stack_trace_android.cc",
@@ -302,6 +298,8 @@ component("base") {
"debug/stack_trace_win.cc",
"debug/task_annotator.cc",
"debug/task_annotator.h",
+ "debug/thread_heap_usage_tracker.cc",
+ "debug/thread_heap_usage_tracker.h",
"deferred_sequenced_task_runner.cc",
"deferred_sequenced_task_runner.h",
"environment.cc",
@@ -368,6 +366,7 @@ component("base") {
"hash.cc",
"hash.h",
"id_map.h",
+ "ios/block_types.h",
"ios/crb_protocol_observers.h",
"ios/crb_protocol_observers.mm",
"ios/device_util.h",
@@ -441,6 +440,7 @@ component("base") {
"mac/scoped_block.h",
"mac/scoped_cftyperef.h",
"mac/scoped_dispatch_object.h",
+ "mac/scoped_ionotificationportref.h",
"mac/scoped_ioobject.h",
"mac/scoped_ioplugininterface.h",
"mac/scoped_launch_data.h",
@@ -472,9 +472,12 @@ component("base") {
"memory/free_deleter.h",
"memory/linked_ptr.h",
"memory/manual_constructor.h",
+ "memory/memory_coordinator_client.cc",
"memory/memory_coordinator_client.h",
"memory/memory_coordinator_client_registry.cc",
"memory/memory_coordinator_client_registry.h",
+ "memory/memory_coordinator_proxy.cc",
+ "memory/memory_coordinator_proxy.h",
"memory/memory_pressure_listener.cc",
"memory/memory_pressure_listener.h",
"memory/memory_pressure_monitor.cc",
@@ -533,6 +536,8 @@ component("base") {
"metrics/bucket_ranges.h",
"metrics/field_trial.cc",
"metrics/field_trial.h",
+ "metrics/field_trial_param_associator.cc",
+ "metrics/field_trial_param_associator.h",
"metrics/histogram.cc",
"metrics/histogram.h",
"metrics/histogram_base.cc",
@@ -540,6 +545,8 @@ component("base") {
"metrics/histogram_delta_serialization.cc",
"metrics/histogram_delta_serialization.h",
"metrics/histogram_flattener.h",
+ "metrics/histogram_functions.cc",
+ "metrics/histogram_functions.h",
"metrics/histogram_macros.h",
"metrics/histogram_samples.cc",
"metrics/histogram_samples.h",
@@ -650,6 +657,7 @@ component("base") {
#"process/process_iterator_openbsd.cc", # Unused in Chromium build.
"process/process_iterator_win.cc",
"process/process_linux.cc",
+ "process/process_mac.cc",
"process/process_metrics.cc",
"process/process_metrics.h",
@@ -785,6 +793,8 @@ component("base") {
"task_runner_util.h",
"task_scheduler/delayed_task_manager.cc",
"task_scheduler/delayed_task_manager.h",
+ "task_scheduler/initialization_util.cc",
+ "task_scheduler/initialization_util.h",
"task_scheduler/post_task.cc",
"task_scheduler/post_task.h",
"task_scheduler/priority_queue.cc",
@@ -792,8 +802,6 @@ component("base") {
"task_scheduler/scheduler_lock.h",
"task_scheduler/scheduler_lock_impl.cc",
"task_scheduler/scheduler_lock_impl.h",
- "task_scheduler/scheduler_service_thread.cc",
- "task_scheduler/scheduler_service_thread.h",
"task_scheduler/scheduler_worker.cc",
"task_scheduler/scheduler_worker.h",
"task_scheduler/scheduler_worker_pool.h",
@@ -815,6 +823,8 @@ component("base") {
"task_scheduler/task_scheduler_impl.h",
"task_scheduler/task_tracker.cc",
"task_scheduler/task_tracker.h",
+ "task_scheduler/task_tracker_posix.cc",
+ "task_scheduler/task_tracker_posix.h",
"task_scheduler/task_traits.cc",
"task_scheduler/task_traits.h",
"template_util.h",
@@ -855,13 +865,10 @@ component("base") {
"threading/thread_id_name_manager.cc",
"threading/thread_id_name_manager.h",
"threading/thread_local.h",
- "threading/thread_local_android.cc",
- "threading/thread_local_posix.cc",
"threading/thread_local_storage.cc",
"threading/thread_local_storage.h",
"threading/thread_local_storage_posix.cc",
"threading/thread_local_storage_win.cc",
- "threading/thread_local_win.cc",
"threading/thread_restrictions.cc",
"threading/thread_restrictions.h",
"threading/thread_task_runner_handle.cc",
@@ -895,8 +902,12 @@ component("base") {
"timer/mock_timer.h",
"timer/timer.cc",
"timer/timer.h",
+ "trace_event/auto_open_close_event.cc",
+ "trace_event/auto_open_close_event.h",
"trace_event/blame_context.cc",
"trace_event/blame_context.h",
+ "trace_event/category_registry.cc",
+ "trace_event/category_registry.h",
"trace_event/common/trace_event_common.h",
"trace_event/heap_profiler.h",
"trace_event/heap_profiler_allocation_context.cc",
@@ -930,6 +941,8 @@ component("base") {
"trace_event/memory_dump_session_state.h",
"trace_event/memory_infra_background_whitelist.cc",
"trace_event/memory_infra_background_whitelist.h",
+ "trace_event/memory_usage_estimator.cc",
+ "trace_event/memory_usage_estimator.h",
"trace_event/process_memory_dump.cc",
"trace_event/process_memory_dump.h",
"trace_event/process_memory_maps.cc",
@@ -938,6 +951,7 @@ component("base") {
"trace_event/process_memory_totals.h",
"trace_event/trace_buffer.cc",
"trace_event/trace_buffer.h",
+ "trace_event/trace_category.h",
"trace_event/trace_config.cc",
"trace_event/trace_config.h",
"trace_event/trace_event.h",
@@ -957,8 +971,6 @@ component("base") {
"trace_event/trace_log.cc",
"trace_event/trace_log.h",
"trace_event/trace_log_constants.cc",
- "trace_event/trace_sampling_thread.cc",
- "trace_event/trace_sampling_thread.h",
"trace_event/tracing_agent.cc",
"trace_event/tracing_agent.h",
"tracked_objects.cc",
@@ -1156,6 +1168,8 @@ component("base") {
"synchronization/read_write_lock_posix.cc",
"sys_info.cc",
"sys_info_posix.cc",
+ "task_scheduler/initialization_util.cc",
+ "task_scheduler/initialization_util.h",
"trace_event/trace_event_system_stats_monitor.cc",
]
@@ -1181,6 +1195,8 @@ component("base") {
"process/launch.h",
"process/launch_posix.cc",
"rand_util_posix.cc",
+ "task_scheduler/task_tracker_posix.cc",
+ "task_scheduler/task_tracker_posix.h",
]
}
} else {
@@ -1210,6 +1226,7 @@ component("base") {
deps += [
"//base/trace_event/etw_manifest:chrome_events_win",
"//base/win:base_win_features",
+ "//base/win:eventlog_messages",
]
if (is_component_build) {
@@ -1224,61 +1241,54 @@ component("base") {
# These runtime files are copied to the output directory by the
# vs_toolchain script that runs as part of toolchain configuration.
- if (visual_studio_version == "2015") {
- data += [
- "$root_out_dir/msvcp140${vcrt_suffix}.dll",
- "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
- "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
-
- # Universal Windows 10 CRT files
- "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
- "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
- "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
- "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
- "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
- "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
- "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
- ]
- } else {
- data += [
- "$root_out_dir/msvcp120${vcrt_suffix}.dll",
- "$root_out_dir/msvcr120${vcrt_suffix}.dll",
- ]
- }
+ data += [
+ "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+ "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+ "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+ # Universal Windows 10 CRT files
+ "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+ "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+ "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+ "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+ "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+ "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+ ]
if (is_asan) {
if (current_cpu == "x64") {
data += [ "$clang_base_path/lib/clang/$clang_version/lib/windows/clang_rt.asan_dynamic-x86_64.dll" ]
@@ -1756,6 +1766,7 @@ test("base_unittests") {
"android/path_utils_unittest.cc",
"android/scoped_java_ref_unittest.cc",
"android/sys_utils_unittest.cc",
+ "android/unguessable_token_android_unittest.cc",
"at_exit_unittest.cc",
"atomicops_unittest.cc",
"barrier_closure_unittest.cc",
@@ -1786,9 +1797,9 @@ test("base_unittests") {
"debug/debugger_unittest.cc",
"debug/leak_tracker_unittest.cc",
"debug/proc_maps_linux_unittest.cc",
- "debug/scoped_thread_heap_usage_unittest.cc",
"debug/stack_trace_unittest.cc",
"debug/task_annotator_unittest.cc",
+ "debug/thread_heap_usage_tracker_unittest.cc",
"deferred_sequenced_task_runner_unittest.cc",
"environment_unittest.cc",
"feature_list_unittest.cc",
@@ -1848,6 +1859,7 @@ test("base_unittests") {
"memory/memory_pressure_listener_unittest.cc",
"memory/memory_pressure_monitor_chromeos_unittest.cc",
"memory/memory_pressure_monitor_mac_unittest.cc",
+ "memory/memory_pressure_monitor_unittest.cc",
"memory/memory_pressure_monitor_win_unittest.cc",
"memory/ptr_util_unittest.cc",
"memory/ref_counted_memory_unittest.cc",
@@ -1866,6 +1878,7 @@ test("base_unittests") {
"metrics/field_trial_unittest.cc",
"metrics/histogram_base_unittest.cc",
"metrics/histogram_delta_serialization_unittest.cc",
+ "metrics/histogram_functions_unittest.cc",
"metrics/histogram_macros_unittest.cc",
"metrics/histogram_snapshot_manager_unittest.cc",
"metrics/histogram_unittest.cc",
@@ -1938,19 +1951,21 @@ test("base_unittests") {
"task_scheduler/delayed_task_manager_unittest.cc",
"task_scheduler/priority_queue_unittest.cc",
"task_scheduler/scheduler_lock_unittest.cc",
- "task_scheduler/scheduler_service_thread_unittest.cc",
"task_scheduler/scheduler_worker_pool_impl_unittest.cc",
"task_scheduler/scheduler_worker_stack_unittest.cc",
"task_scheduler/scheduler_worker_unittest.cc",
"task_scheduler/sequence_sort_key_unittest.cc",
"task_scheduler/sequence_unittest.cc",
"task_scheduler/task_scheduler_impl_unittest.cc",
+ "task_scheduler/task_tracker_posix_unittest.cc",
"task_scheduler/task_tracker_unittest.cc",
"task_scheduler/task_unittest.cc",
"task_scheduler/test_task_factory.cc",
"task_scheduler/test_task_factory.h",
+ "task_scheduler/test_utils.h",
"template_util_unittest.cc",
"test/histogram_tester_unittest.cc",
+ "test/scoped_mock_time_message_loop_task_runner_unittest.cc",
"test/test_pending_task_unittest.cc",
"test/test_reg_util_win_unittest.cc",
"test/trace_event_analyzer_unittest.cc",
@@ -1986,7 +2001,9 @@ test("base_unittests") {
"trace_event/java_heap_dump_provider_android_unittest.cc",
"trace_event/memory_allocator_dump_unittest.cc",
"trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_usage_estimator_unittest.cc",
"trace_event/process_memory_dump_unittest.cc",
+ "trace_event/trace_category_unittest.cc",
"trace_event/trace_config_unittest.cc",
"trace_event/trace_event_argument_unittest.cc",
"trace_event/trace_event_synthetic_delay_unittest.cc",
@@ -2200,6 +2217,7 @@ if (enable_nocompile_tests) {
"callback_list_unittest.nc",
"callback_unittest.nc",
"memory/weak_ptr_unittest.nc",
+ "metrics/histogram_unittest.nc",
]
deps = [
@@ -2237,6 +2255,7 @@ if (is_android) {
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
"android/java/src/org/chromium/base/metrics/RecordHistogram.java",
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
@@ -2294,6 +2313,7 @@ if (is_android) {
"android/java/src/org/chromium/base/LocaleUtils.java",
"android/java/src/org/chromium/base/Log.java",
"android/java/src/org/chromium/base/MemoryPressureListener.java",
+ "android/java/src/org/chromium/base/NonThreadSafe.java",
"android/java/src/org/chromium/base/ObserverList.java",
"android/java/src/org/chromium/base/PackageUtils.java",
"android/java/src/org/chromium/base/PathService.java",
@@ -2309,6 +2329,7 @@ if (is_android) {
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
+ "android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/VisibleForTesting.java",
"android/java/src/org/chromium/base/annotations/AccessedByNative.java",
"android/java/src/org/chromium/base/annotations/CalledByNative.java",
@@ -2328,6 +2349,7 @@ if (is_android) {
"android/java/src/org/chromium/base/library_loader/ModernLinker.java",
"android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
"android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
+ "android/java/src/org/chromium/base/metrics/CachedMetrics.java",
"android/java/src/org/chromium/base/metrics/RecordHistogram.java",
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
"android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
@@ -2354,6 +2376,9 @@ if (is_android) {
"android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
"android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
"android/javatests/src/org/chromium/base/CommandLineTest.java",
+
+ # TODO(nona): move to Junit once that is built for Android N.
+ "android/javatests/src/org/chromium/base/LocaleUtilsTest.java",
"android/javatests/src/org/chromium/base/ObserverListTest.java",
"android/javatests/src/org/chromium/base/metrics/RecordHistogramTest.java",
]
@@ -2364,14 +2389,19 @@ if (is_android) {
deps = [
":base_java",
"//testing/android/reporter:reporter_java",
+ "//third_party/android_support_test_runner:exposed_instrumentation_api_publish_java",
+ "//third_party/android_support_test_runner:runner_java",
"//third_party/hamcrest:hamcrest_core_java",
+ "//third_party/junit",
]
java_files = [
"test/android/javatests/src/org/chromium/base/test/BaseActivityInstrumentationTestCase.java",
"test/android/javatests/src/org/chromium/base/test/BaseChromiumInstrumentationTestRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseInstrumentationTestRunner.java",
+ "test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
+ "test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
"test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIf.java",
"test/android/javatests/src/org/chromium/base/test/util/DisableIfSkipCheck.java",
@@ -2409,6 +2439,7 @@ if (is_android) {
java_files = [
"android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
"android/junit/src/org/chromium/base/LogTest.java",
+ "android/junit/src/org/chromium/base/NonThreadSafeTest.java",
"android/junit/src/org/chromium/base/PromiseTest.java",
"test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
"test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
@@ -2461,3 +2492,12 @@ if (is_android) {
]
}
}
+
+fuzzer_test("base_json_correctness_fuzzer") {
+ sources = [
+ "json/correctness_fuzzer.cc",
+ ]
+ deps = [
+ ":base",
+ ]
+}
diff --git a/chromium/base/PRESUBMIT.py b/chromium/base/PRESUBMIT.py
index 7fc8107658c..77686044068 100644
--- a/chromium/base/PRESUBMIT.py
+++ b/chromium/base/PRESUBMIT.py
@@ -35,6 +35,8 @@ def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
+ results.extend(
+ input_api.canned_checks.CheckPatchFormatted(input_api, output_api))
return results
def CheckChangeOnUpload(input_api, output_api):
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index dab03b5854f..1f69f9538c7 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -216,7 +216,12 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src",
]
- configs -= [ "//build/config/compiler:chromium_code" ]
+ configs -= [
+ "//build/config/compiler:chromium_code",
+
+ # The asm code in tcmalloc is not currently thumb compatible
+ "//build/config/compiler:compiler_arm_thumb",
+ ]
configs += [
"//build/config/compiler:no_chromium_code",
":tcmalloc_flags",
diff --git a/chromium/base/allocator/README.md b/chromium/base/allocator/README.md
index 164df51ae6f..a211732c3f3 100644
--- a/chromium/base/allocator/README.md
+++ b/chromium/base/allocator/README.md
@@ -189,8 +189,8 @@ Related links
- [Unified allocator shim doc - Feb 2016][url-allocator-shim]
- [Allocator cleanup doc - Jan 2016][url-allocator-cleanup]
- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
-- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
+- [Memory-Infra: Tools to profile memory usage in Chrome](/docs/memory-infra/README.md)
[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
-[url-memory-infra-heap-profiler]: components/tracing/docs/heap_profiler.md
+[url-memory-infra-heap-profiler]: /docs/memory-infra/heap_profiler.md
[url-allocator-shim]: https://docs.google.com/document/d/1yKlO1AO4XjpDad9rjcBOI15EKdAGsuGO_IeZy0g0kxo/edit?usp=sharing
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
index 9e04f04f7e6..69953aec0e9 100644
--- a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -2,7 +2,14 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <malloc.h>
+
#include "base/allocator/allocator_shim.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+#include <dlfcn.h>
+#endif
// This translation unit defines a default dispatch for the allocator shim which
// routes allocations to the original libc functions when using the link-time
@@ -45,7 +52,32 @@ void RealFree(const AllocatorDispatch*, void* address) {
__real_free(address);
}
-size_t RealSizeEstimate(const AllocatorDispatch*, void*) {
+#if defined(OS_ANDROID) && __ANDROID_API__ < 17
+size_t DummyMallocUsableSize(const void*) { return 0; }
+#endif
+
+size_t RealSizeEstimate(const AllocatorDispatch*, void* address) {
+#if defined(OS_ANDROID)
+#if __ANDROID_API__ < 17
+ // malloc_usable_size() is available only starting from API 17.
+ // TODO(dskiba): remove once we start building against 17+.
+ using MallocUsableSizeFunction = decltype(malloc_usable_size)*;
+ static MallocUsableSizeFunction usable_size_function = nullptr;
+ if (!usable_size_function) {
+ void* function_ptr = dlsym(RTLD_DEFAULT, "malloc_usable_size");
+ if (function_ptr) {
+ usable_size_function = reinterpret_cast<MallocUsableSizeFunction>(
+ function_ptr);
+ } else {
+ usable_size_function = &DummyMallocUsableSize;
+ }
+ }
+ return usable_size_function(address);
+#else
+ return malloc_usable_size(address);
+#endif
+#endif // OS_ANDROID
+
// TODO(primiano): This should be redirected to malloc_usable_size or
// the like.
return 0;
diff --git a/chromium/base/android/linker/config.gni b/chromium/base/android/linker/config.gni
index 174c1abe7ee..27793ffe6ef 100644
--- a/chromium/base/android/linker/config.gni
+++ b/chromium/base/android/linker/config.gni
@@ -6,7 +6,8 @@ import("//build/config/android/config.gni")
import("//build/config/compiler/compiler.gni")
import("//build/config/sanitizers/sanitizers.gni")
-# Chromium linker crashes on component builds on Android 4.4. See b/11379966
+# Chromium linker doesn't reliably support loading multiple libraries;
+# disable for component builds, see crbug.com/657093.
# Chromium linker causes instrumentation to return incorrect results.
chromium_linker_supported =
!is_component_build && !enable_profiling && !use_order_profiling && !is_asan
diff --git a/chromium/base/bind.h b/chromium/base/bind.h
index ec707a0618f..ce717972e26 100644
--- a/chromium/base/bind.h
+++ b/chromium/base/bind.h
@@ -23,7 +23,6 @@
// terms and concepts.
namespace base {
-namespace internal {
// Bind as OnceCallback.
template <typename Functor, typename... Args>
@@ -69,16 +68,14 @@ BindRepeating(Functor&& functor, Args&&... args) {
std::forward<Args>(args)...));
}
-} // namespace internal
-
// Unannotated Bind.
// TODO(tzik): Deprecate this and migrate to OnceCallback and
// RepeatingCallback, once they get ready.
template <typename Functor, typename... Args>
inline Callback<MakeUnboundRunType<Functor, Args...>>
Bind(Functor&& functor, Args&&... args) {
- return internal::BindRepeating(std::forward<Functor>(functor),
- std::forward<Args>(args)...);
+ return BindRepeating(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
}
} // namespace base
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index d16109e4243..88e764547f8 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -411,11 +411,15 @@ struct CancellationChecker<
}
};
-template <typename Signature, typename... BoundArgs>
-struct CancellationChecker<BindState<Callback<Signature>, BoundArgs...>> {
+template <typename Signature,
+ typename... BoundArgs,
+ CopyMode copy_mode,
+ RepeatMode repeat_mode>
+struct CancellationChecker<
+ BindState<Callback<Signature, copy_mode, repeat_mode>, BoundArgs...>> {
static constexpr bool is_cancellable = true;
static bool Run(const BindStateBase* base) {
- using Functor = Callback<Signature>;
+ using Functor = Callback<Signature, copy_mode, repeat_mode>;
using BindStateType = BindState<Functor, BoundArgs...>;
const BindStateType* bind_state = static_cast<const BindStateType*>(base);
return bind_state->functor_.IsCancelled();
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index 285a20d5269..c6b53d55f03 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -20,6 +20,7 @@
using ::testing::_;
using ::testing::Mock;
+using ::testing::ByMove;
using ::testing::Return;
using ::testing::StrictMock;
@@ -39,6 +40,7 @@ class NoRef {
MOCK_CONST_METHOD0(IntConstMethod0, int());
MOCK_METHOD1(VoidMethodWithIntArg, void(int));
+ MOCK_METHOD0(UniquePtrMethod0, std::unique_ptr<int>());
private:
// Particularly important in this test to ensure no copies are made.
@@ -343,6 +345,15 @@ class BindTest : public ::testing::Test {
};
StrictMock<NoRef>* BindTest::static_func_mock_ptr;
+StrictMock<NoRef>* g_func_mock_ptr;
+
+void VoidFunc0() {
+ g_func_mock_ptr->VoidMethod0();
+}
+
+int IntFunc0() {
+ return g_func_mock_ptr->IntMethod0();
+}
TEST_F(BindTest, BasicTest) {
Callback<int(int, int, int)> cb = Bind(&Sum, 32, 16, 8);
@@ -363,7 +374,8 @@ TEST_F(BindTest, BasicTest) {
// - multiple runs of resulting Callback remain valid.
TEST_F(BindTest, CurryingRvalueResultOfBind) {
int n = 0;
- Closure cb = base::Bind(&TakesACallback, base::Bind(&PtrArgSet, &n));
+ RepeatingClosure cb = BindRepeating(&TakesACallback,
+ BindRepeating(&PtrArgSet, &n));
// If we implement Bind() such that the return value has auto_ptr-like
// semantics, the second call here will fail because ownership of
@@ -377,76 +389,45 @@ TEST_F(BindTest, CurryingRvalueResultOfBind) {
EXPECT_EQ(2, n);
}
-// Function type support.
-// - Normal function.
-// - Normal function bound with non-refcounted first argument.
-// - Method bound to non-const object.
-// - Method bound to scoped_refptr.
-// - Const method bound to non-const object.
-// - Const method bound to const object.
-// - Derived classes can be used with pointers to non-virtual base functions.
-// - Derived classes can be used with pointers to virtual base functions (and
-// preserve virtual dispatch).
-TEST_F(BindTest, FunctionTypeSupport) {
- EXPECT_CALL(static_func_mock_, VoidMethod0());
- EXPECT_CALL(has_ref_, AddRef()).Times(4);
- EXPECT_CALL(has_ref_, Release()).Times(4);
- EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
- EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
-
- Closure normal_cb = Bind(&VoidFunc0);
- Callback<NoRef*()> normal_non_refcounted_cb =
- Bind(&PolymorphicIdentity<NoRef*>, &no_ref_);
- normal_cb.Run();
- EXPECT_EQ(&no_ref_, normal_non_refcounted_cb.Run());
-
- Closure method_cb = Bind(&HasRef::VoidMethod0, &has_ref_);
- Closure method_refptr_cb = Bind(&HasRef::VoidMethod0,
- make_scoped_refptr(&has_ref_));
- Closure const_method_nonconst_obj_cb = Bind(&HasRef::VoidConstMethod0,
- &has_ref_);
- Closure const_method_const_obj_cb = Bind(&HasRef::VoidConstMethod0,
- const_has_ref_ptr_);
- method_cb.Run();
- method_refptr_cb.Run();
- const_method_nonconst_obj_cb.Run();
- const_method_const_obj_cb.Run();
+TEST_F(BindTest, RepeatingCallbackBasicTest) {
+ RepeatingCallback<int(int)> c0 = BindRepeating(&Sum, 1, 2, 4, 8, 16);
- Child child;
- child.value = 0;
- Closure virtual_set_cb = Bind(&Parent::VirtualSet, &child);
- virtual_set_cb.Run();
- EXPECT_EQ(kChildValue, child.value);
+ // RepeatingCallback can run via a lvalue-reference.
+ EXPECT_EQ(63, c0.Run(32));
- child.value = 0;
- Closure non_virtual_set_cb = Bind(&Parent::NonVirtualSet, &child);
- non_virtual_set_cb.Run();
- EXPECT_EQ(kParentValue, child.value);
+ // It is valid to call a RepeatingCallback more than once.
+ EXPECT_EQ(54, c0.Run(23));
+
+ // BindRepeating can handle a RepeatingCallback as the target functor.
+ RepeatingCallback<int()> c1 = BindRepeating(c0, 11);
+
+ // RepeatingCallback can run via a rvalue-reference.
+ EXPECT_EQ(42, std::move(c1).Run());
+
+ // BindRepeating can handle a rvalue-reference of RepeatingCallback.
+ EXPECT_EQ(32, BindRepeating(std::move(c0), 1).Run());
}
-// Return value support.
-// - Function with return value.
-// - Method with return value.
-// - Const method with return value.
-TEST_F(BindTest, ReturnValues) {
- EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
- EXPECT_CALL(has_ref_, AddRef()).Times(3);
- EXPECT_CALL(has_ref_, Release()).Times(3);
- EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(31337));
- EXPECT_CALL(has_ref_, IntConstMethod0())
- .WillOnce(Return(41337))
- .WillOnce(Return(51337));
+TEST_F(BindTest, OnceCallbackBasicTest) {
+ OnceCallback<int(int)> c0 = BindOnce(&Sum, 1, 2, 4, 8, 16);
+
+ // OnceCallback can run via a rvalue-reference.
+ EXPECT_EQ(63, std::move(c0).Run(32));
- Callback<int()> normal_cb = Bind(&IntFunc0);
- Callback<int()> method_cb = Bind(&HasRef::IntMethod0, &has_ref_);
- Callback<int()> const_method_nonconst_obj_cb =
- Bind(&HasRef::IntConstMethod0, &has_ref_);
- Callback<int()> const_method_const_obj_cb =
- Bind(&HasRef::IntConstMethod0, const_has_ref_ptr_);
- EXPECT_EQ(1337, normal_cb.Run());
- EXPECT_EQ(31337, method_cb.Run());
- EXPECT_EQ(41337, const_method_nonconst_obj_cb.Run());
- EXPECT_EQ(51337, const_method_const_obj_cb.Run());
+ // After running via the rvalue-reference, the value of the OnceCallback
+ // is undefined. The implementation simply clears the instance after the
+ // invocation.
+ EXPECT_TRUE(c0.is_null());
+
+ c0 = BindOnce(&Sum, 2, 3, 5, 7, 11);
+
+ // BindOnce can handle a rvalue-reference of OnceCallback as the target
+ // functor.
+ OnceCallback<int()> c1 = BindOnce(std::move(c0), 13);
+ EXPECT_EQ(41, std::move(c1).Run());
+
+ RepeatingCallback<int(int)> c2 = BindRepeating(&Sum, 2, 3, 5, 7, 11);
+ EXPECT_EQ(41, BindOnce(c2, 13).Run());
}
// IgnoreResult adapter test.
@@ -455,7 +436,7 @@ TEST_F(BindTest, ReturnValues) {
// - Const Method with return.
// - Method with return value bound to WeakPtr<>.
// - Const Method with return bound to WeakPtr<>.
-TEST_F(BindTest, IgnoreResult) {
+TEST_F(BindTest, IgnoreResultForRepeating) {
EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
EXPECT_CALL(has_ref_, AddRef()).Times(2);
EXPECT_CALL(has_ref_, Release()).Times(2);
@@ -464,26 +445,28 @@ TEST_F(BindTest, IgnoreResult) {
EXPECT_CALL(no_ref_, IntMethod0()).WillOnce(Return(12));
EXPECT_CALL(no_ref_, IntConstMethod0()).WillOnce(Return(13));
- Closure normal_func_cb = Bind(IgnoreResult(&IntFunc0));
+ RepeatingClosure normal_func_cb = BindRepeating(IgnoreResult(&IntFunc0));
normal_func_cb.Run();
- Closure non_void_method_cb =
- Bind(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+ RepeatingClosure non_void_method_cb =
+ BindRepeating(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
non_void_method_cb.Run();
- Closure non_void_const_method_cb =
- Bind(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+ RepeatingClosure non_void_const_method_cb =
+ BindRepeating(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
non_void_const_method_cb.Run();
WeakPtrFactory<NoRef> weak_factory(&no_ref_);
WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
- Closure non_void_weak_method_cb =
- Bind(IgnoreResult(&NoRef::IntMethod0), weak_factory.GetWeakPtr());
+ RepeatingClosure non_void_weak_method_cb =
+ BindRepeating(IgnoreResult(&NoRef::IntMethod0),
+ weak_factory.GetWeakPtr());
non_void_weak_method_cb.Run();
- Closure non_void_weak_const_method_cb =
- Bind(IgnoreResult(&NoRef::IntConstMethod0), weak_factory.GetWeakPtr());
+ RepeatingClosure non_void_weak_const_method_cb =
+ BindRepeating(IgnoreResult(&NoRef::IntConstMethod0),
+ weak_factory.GetWeakPtr());
non_void_weak_const_method_cb.Run();
weak_factory.InvalidateWeakPtrs();
@@ -491,128 +474,86 @@ TEST_F(BindTest, IgnoreResult) {
non_void_weak_method_cb.Run();
}
-// Argument binding tests.
-// - Argument binding to primitive.
-// - Argument binding to primitive pointer.
-// - Argument binding to a literal integer.
-// - Argument binding to a literal string.
-// - Argument binding with template function.
-// - Argument binding to an object.
-// - Argument binding to pointer to incomplete type.
-// - Argument gets type converted.
-// - Pointer argument gets converted.
-// - Const Reference forces conversion.
-TEST_F(BindTest, ArgumentBinding) {
- int n = 2;
-
- Callback<int()> bind_primitive_cb = Bind(&Identity, n);
- EXPECT_EQ(n, bind_primitive_cb.Run());
-
- Callback<int*()> bind_primitive_pointer_cb =
- Bind(&PolymorphicIdentity<int*>, &n);
- EXPECT_EQ(&n, bind_primitive_pointer_cb.Run());
-
- Callback<int()> bind_int_literal_cb = Bind(&Identity, 3);
- EXPECT_EQ(3, bind_int_literal_cb.Run());
-
- Callback<const char*()> bind_string_literal_cb =
- Bind(&CStringIdentity, "hi");
- EXPECT_STREQ("hi", bind_string_literal_cb.Run());
-
- Callback<int()> bind_template_function_cb =
- Bind(&PolymorphicIdentity<int>, 4);
- EXPECT_EQ(4, bind_template_function_cb.Run());
-
- NoRefParent p;
- p.value = 5;
- Callback<int()> bind_object_cb = Bind(&UnwrapNoRefParent, p);
- EXPECT_EQ(5, bind_object_cb.Run());
+TEST_F(BindTest, IgnoreResultForOnce) {
+ EXPECT_CALL(static_func_mock_, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref_, AddRef()).Times(2);
+ EXPECT_CALL(has_ref_, Release()).Times(2);
+ EXPECT_CALL(has_ref_, IntMethod0()).WillOnce(Return(10));
+ EXPECT_CALL(has_ref_, IntConstMethod0()).WillOnce(Return(11));
- IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
- Callback<IncompleteType*()> bind_incomplete_ptr_cb =
- Bind(&PolymorphicIdentity<IncompleteType*>, incomplete_ptr);
- EXPECT_EQ(incomplete_ptr, bind_incomplete_ptr_cb.Run());
+ OnceClosure normal_func_cb = BindOnce(IgnoreResult(&IntFunc0));
+ std::move(normal_func_cb).Run();
- NoRefChild c;
- c.value = 6;
- Callback<int()> bind_promotes_cb = Bind(&UnwrapNoRefParent, c);
- EXPECT_EQ(6, bind_promotes_cb.Run());
+ OnceClosure non_void_method_cb =
+ BindOnce(IgnoreResult(&HasRef::IntMethod0), &has_ref_);
+ std::move(non_void_method_cb).Run();
- c.value = 7;
- Callback<int()> bind_pointer_promotes_cb =
- Bind(&UnwrapNoRefParentPtr, &c);
- EXPECT_EQ(7, bind_pointer_promotes_cb.Run());
+ OnceClosure non_void_const_method_cb =
+ BindOnce(IgnoreResult(&HasRef::IntConstMethod0), &has_ref_);
+ std::move(non_void_const_method_cb).Run();
- c.value = 8;
- Callback<int()> bind_const_reference_promotes_cb =
- Bind(&UnwrapNoRefParentConstRef, c);
- EXPECT_EQ(8, bind_const_reference_promotes_cb.Run());
-}
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
-// Unbound argument type support tests.
-// - Unbound value.
-// - Unbound pointer.
-// - Unbound reference.
-// - Unbound const reference.
-// - Unbound unsized array.
-// - Unbound sized array.
-// - Unbound array-of-arrays.
-TEST_F(BindTest, UnboundArgumentTypeSupport) {
- Callback<void(int)> unbound_value_cb = Bind(&VoidPolymorphic<int>::Run);
- Callback<void(int*)> unbound_pointer_cb = Bind(&VoidPolymorphic<int*>::Run);
- Callback<void(int&)> unbound_ref_cb = Bind(&VoidPolymorphic<int&>::Run);
- Callback<void(const int&)> unbound_const_ref_cb =
- Bind(&VoidPolymorphic<const int&>::Run);
- Callback<void(int[])> unbound_unsized_array_cb =
- Bind(&VoidPolymorphic<int[]>::Run);
- Callback<void(int[2])> unbound_sized_array_cb =
- Bind(&VoidPolymorphic<int[2]>::Run);
- Callback<void(int[][2])> unbound_array_of_arrays_cb =
- Bind(&VoidPolymorphic<int[][2]>::Run);
-
- Callback<void(int&)> unbound_ref_with_bound_arg =
- Bind(&VoidPolymorphic<int, int&>::Run, 1);
-}
+ OnceClosure non_void_weak_method_cb =
+ BindOnce(IgnoreResult(&NoRef::IntMethod0),
+ weak_factory.GetWeakPtr());
+ OnceClosure non_void_weak_const_method_cb =
+ BindOnce(IgnoreResult(&NoRef::IntConstMethod0),
+ weak_factory.GetWeakPtr());
-// Function with unbound reference parameter.
-// - Original parameter is modified by callback.
-TEST_F(BindTest, UnboundReferenceSupport) {
- int n = 0;
- Callback<void(int&)> unbound_ref_cb = Bind(&RefArgSet);
- unbound_ref_cb.Run(n);
- EXPECT_EQ(2, n);
+ weak_factory.InvalidateWeakPtrs();
+ std::move(non_void_weak_const_method_cb).Run();
+ std::move(non_void_weak_method_cb).Run();
}
// Functions that take reference parameters.
// - Forced reference parameter type still stores a copy.
// - Forced const reference parameter type still stores a copy.
-TEST_F(BindTest, ReferenceArgumentBinding) {
+TEST_F(BindTest, ReferenceArgumentBindingForRepeating) {
int n = 1;
int& ref_n = n;
const int& const_ref_n = n;
- Callback<int()> ref_copies_cb = Bind(&Identity, ref_n);
+ RepeatingCallback<int()> ref_copies_cb = BindRepeating(&Identity, ref_n);
EXPECT_EQ(n, ref_copies_cb.Run());
n++;
EXPECT_EQ(n - 1, ref_copies_cb.Run());
- Callback<int()> const_ref_copies_cb = Bind(&Identity, const_ref_n);
+ RepeatingCallback<int()> const_ref_copies_cb =
+ BindRepeating(&Identity, const_ref_n);
EXPECT_EQ(n, const_ref_copies_cb.Run());
n++;
EXPECT_EQ(n - 1, const_ref_copies_cb.Run());
}
+TEST_F(BindTest, ReferenceArgumentBindingForOnce) {
+ int n = 1;
+ int& ref_n = n;
+ const int& const_ref_n = n;
+
+ OnceCallback<int()> ref_copies_cb = BindOnce(&Identity, ref_n);
+ n++;
+ EXPECT_EQ(n - 1, std::move(ref_copies_cb).Run());
+
+ OnceCallback<int()> const_ref_copies_cb =
+ BindOnce(&Identity, const_ref_n);
+ n++;
+ EXPECT_EQ(n - 1, std::move(const_ref_copies_cb).Run());
+}
+
// Check that we can pass in arrays and have them be stored as a pointer.
// - Array of values stores a pointer.
// - Array of const values stores a pointer.
-TEST_F(BindTest, ArrayArgumentBinding) {
+TEST_F(BindTest, ArrayArgumentBindingForRepeating) {
int array[4] = {1, 1, 1, 1};
const int (*const_array_ptr)[4] = &array;
- Callback<int()> array_cb = Bind(&ArrayGet, array, 1);
+ RepeatingCallback<int()> array_cb = BindRepeating(&ArrayGet, array, 1);
EXPECT_EQ(1, array_cb.Run());
- Callback<int()> const_array_cb = Bind(&ArrayGet, *const_array_ptr, 1);
+ RepeatingCallback<int()> const_array_cb =
+ BindRepeating(&ArrayGet, *const_array_ptr, 1);
EXPECT_EQ(1, const_array_cb.Run());
array[1] = 3;
@@ -620,25 +561,17 @@ TEST_F(BindTest, ArrayArgumentBinding) {
EXPECT_EQ(3, const_array_cb.Run());
}
-// Unretained() wrapper support.
-// - Method bound to Unretained() non-const object.
-// - Const method bound to Unretained() non-const object.
-// - Const method bound to Unretained() const object.
-TEST_F(BindTest, Unretained) {
- EXPECT_CALL(no_ref_, VoidMethod0());
- EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
+TEST_F(BindTest, ArrayArgumentBindingForOnce) {
+ int array[4] = {1, 1, 1, 1};
+ const int (*const_array_ptr)[4] = &array;
- Callback<void()> method_cb =
- Bind(&NoRef::VoidMethod0, Unretained(&no_ref_));
- method_cb.Run();
+ OnceCallback<int()> array_cb = BindOnce(&ArrayGet, array, 1);
+ OnceCallback<int()> const_array_cb =
+ BindOnce(&ArrayGet, *const_array_ptr, 1);
- Callback<void()> const_method_cb =
- Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref_));
- const_method_cb.Run();
-
- Callback<void()> const_method_const_ptr_cb =
- Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr_));
- const_method_const_ptr_cb.Run();
+ array[1] = 3;
+ EXPECT_EQ(3, std::move(array_cb).Run());
+ EXPECT_EQ(3, std::move(const_array_cb).Run());
}
// WeakPtr() support.
@@ -647,27 +580,27 @@ TEST_F(BindTest, Unretained) {
// - Const method bound to WeakPtr<> to const object.
// - Normal Function with WeakPtr<> as P1 can have return type and is
// not canceled.
-TEST_F(BindTest, WeakPtr) {
+TEST_F(BindTest, WeakPtrForRepeating) {
EXPECT_CALL(no_ref_, VoidMethod0());
EXPECT_CALL(no_ref_, VoidConstMethod0()).Times(2);
WeakPtrFactory<NoRef> weak_factory(&no_ref_);
WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
- Closure method_cb =
- Bind(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+ RepeatingClosure method_cb =
+ BindRepeating(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
method_cb.Run();
- Closure const_method_cb =
- Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ RepeatingClosure const_method_cb =
+ BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
const_method_cb.Run();
- Closure const_method_const_ptr_cb =
- Bind(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ RepeatingClosure const_method_const_ptr_cb =
+ BindRepeating(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
const_method_const_ptr_cb.Run();
- Callback<int(int)> normal_func_cb =
- Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+ RepeatingCallback<int(int)> normal_func_cb =
+ BindRepeating(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
EXPECT_EQ(1, normal_func_cb.Run(1));
weak_factory.InvalidateWeakPtrs();
@@ -681,15 +614,39 @@ TEST_F(BindTest, WeakPtr) {
EXPECT_EQ(2, normal_func_cb.Run(2));
}
+TEST_F(BindTest, WeakPtrForOnce) {
+ WeakPtrFactory<NoRef> weak_factory(&no_ref_);
+ WeakPtrFactory<const NoRef> const_weak_factory(const_no_ref_ptr_);
+
+ OnceClosure method_cb =
+ BindOnce(&NoRef::VoidMethod0, weak_factory.GetWeakPtr());
+ OnceClosure const_method_cb =
+ BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ OnceClosure const_method_const_ptr_cb =
+ BindOnce(&NoRef::VoidConstMethod0, const_weak_factory.GetWeakPtr());
+ Callback<int(int)> normal_func_cb =
+ Bind(&FunctionWithWeakFirstParam, weak_factory.GetWeakPtr());
+
+ weak_factory.InvalidateWeakPtrs();
+ const_weak_factory.InvalidateWeakPtrs();
+
+ std::move(method_cb).Run();
+ std::move(const_method_cb).Run();
+ std::move(const_method_const_ptr_cb).Run();
+
+ // Still runs even after the pointers are invalidated.
+ EXPECT_EQ(2, std::move(normal_func_cb).Run(2));
+}
+
// ConstRef() wrapper support.
// - Binding w/o ConstRef takes a copy.
// - Binding a ConstRef takes a reference.
// - Binding ConstRef to a function ConstRef does not copy on invoke.
-TEST_F(BindTest, ConstRef) {
+TEST_F(BindTest, ConstRefForRepeating) {
int n = 1;
- Callback<int()> copy_cb = Bind(&Identity, n);
- Callback<int()> const_ref_cb = Bind(&Identity, ConstRef(n));
+ RepeatingCallback<int()> copy_cb = BindRepeating(&Identity, n);
+ RepeatingCallback<int()> const_ref_cb = BindRepeating(&Identity, ConstRef(n));
EXPECT_EQ(n, copy_cb.Run());
EXPECT_EQ(n, const_ref_cb.Run());
n++;
@@ -701,8 +658,8 @@ TEST_F(BindTest, ConstRef) {
int move_constructs = 0;
int move_assigns = 0;
CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
- Callback<int()> all_const_ref_cb =
- Bind(&GetCopies, ConstRef(counter));
+ RepeatingCallback<int()> all_const_ref_cb =
+ BindRepeating(&GetCopies, ConstRef(counter));
EXPECT_EQ(0, all_const_ref_cb.Run());
EXPECT_EQ(0, copies);
EXPECT_EQ(0, assigns);
@@ -710,25 +667,38 @@ TEST_F(BindTest, ConstRef) {
EXPECT_EQ(0, move_assigns);
}
-TEST_F(BindTest, ScopedRefptr) {
- EXPECT_CALL(has_ref_, AddRef()).Times(1);
- EXPECT_CALL(has_ref_, Release()).Times(1);
+TEST_F(BindTest, ConstRefForOnce) {
+ int n = 1;
- const scoped_refptr<HasRef> refptr(&has_ref_);
- Callback<int()> scoped_refptr_const_ref_cb =
- Bind(&FunctionWithScopedRefptrFirstParam, base::ConstRef(refptr), 1);
- EXPECT_EQ(1, scoped_refptr_const_ref_cb.Run());
+ OnceCallback<int()> copy_cb = BindOnce(&Identity, n);
+ OnceCallback<int()> const_ref_cb = BindOnce(&Identity, ConstRef(n));
+ n++;
+ EXPECT_EQ(n - 1, std::move(copy_cb).Run());
+ EXPECT_EQ(n, std::move(const_ref_cb).Run());
+
+ int copies = 0;
+ int assigns = 0;
+ int move_constructs = 0;
+ int move_assigns = 0;
+ CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+ OnceCallback<int()> all_const_ref_cb =
+ BindOnce(&GetCopies, ConstRef(counter));
+ EXPECT_EQ(0, std::move(all_const_ref_cb).Run());
+ EXPECT_EQ(0, copies);
+ EXPECT_EQ(0, assigns);
+ EXPECT_EQ(0, move_constructs);
+ EXPECT_EQ(0, move_assigns);
}
// Test Owned() support.
-TEST_F(BindTest, Owned) {
+TEST_F(BindTest, OwnedForRepeating) {
int deletes = 0;
DeleteCounter* counter = new DeleteCounter(&deletes);
// If we don't capture, delete happens on Callback destruction/reset.
// return the same value.
- Callback<DeleteCounter*()> no_capture_cb =
- Bind(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+ RepeatingCallback<DeleteCounter*()> no_capture_cb =
+ BindRepeating(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
ASSERT_EQ(counter, no_capture_cb.Run());
ASSERT_EQ(counter, no_capture_cb.Run());
EXPECT_EQ(0, deletes);
@@ -737,18 +707,272 @@ TEST_F(BindTest, Owned) {
deletes = 0;
counter = new DeleteCounter(&deletes);
- base::Closure own_object_cb =
- Bind(&DeleteCounter::VoidMethod0, Owned(counter));
+ RepeatingClosure own_object_cb =
+ BindRepeating(&DeleteCounter::VoidMethod0, Owned(counter));
own_object_cb.Run();
EXPECT_EQ(0, deletes);
own_object_cb.Reset();
EXPECT_EQ(1, deletes);
}
-TEST_F(BindTest, UniquePtrReceiver) {
+TEST_F(BindTest, OwnedForOnce) {
+ int deletes = 0;
+ DeleteCounter* counter = new DeleteCounter(&deletes);
+
+ // If we don't capture, delete happens on Callback destruction/reset.
+ // return the same value.
+ OnceCallback<DeleteCounter*()> no_capture_cb =
+ BindOnce(&PolymorphicIdentity<DeleteCounter*>, Owned(counter));
+ EXPECT_EQ(0, deletes);
+ no_capture_cb.Reset(); // This should trigger a delete.
+ EXPECT_EQ(1, deletes);
+
+ deletes = 0;
+ counter = new DeleteCounter(&deletes);
+ OnceClosure own_object_cb =
+ BindOnce(&DeleteCounter::VoidMethod0, Owned(counter));
+ EXPECT_EQ(0, deletes);
+ own_object_cb.Reset();
+ EXPECT_EQ(1, deletes);
+}
+
+template <typename T>
+class BindVariantsTest : public ::testing::Test {
+};
+
+struct RepeatingTestConfig {
+ template <typename Signature>
+ using CallbackType = RepeatingCallback<Signature>;
+ using ClosureType = RepeatingClosure;
+
+ template <typename F, typename... Args>
+ static CallbackType<MakeUnboundRunType<F, Args...>>
+ Bind(F&& f, Args&&... args) {
+ return BindRepeating(std::forward<F>(f), std::forward<Args>(args)...);
+ }
+};
+
+struct OnceTestConfig {
+ template <typename Signature>
+ using CallbackType = OnceCallback<Signature>;
+ using ClosureType = OnceClosure;
+
+ template <typename F, typename... Args>
+ static CallbackType<MakeUnboundRunType<F, Args...>>
+ Bind(F&& f, Args&&... args) {
+ return BindOnce(std::forward<F>(f), std::forward<Args>(args)...);
+ }
+};
+
+using BindVariantsTestConfig = ::testing::Types<
+ RepeatingTestConfig, OnceTestConfig>;
+TYPED_TEST_CASE(BindVariantsTest, BindVariantsTestConfig);
+
+template <typename TypeParam, typename Signature>
+using CallbackType = typename TypeParam::template CallbackType<Signature>;
+
+// Function type support.
+// - Normal function.
+// - Normal function bound with non-refcounted first argument.
+// - Method bound to non-const object.
+// - Method bound to scoped_refptr.
+// - Const method bound to non-const object.
+// - Const method bound to const object.
+// - Derived classes can be used with pointers to non-virtual base functions.
+// - Derived classes can be used with pointers to virtual base functions (and
+// preserve virtual dispatch).
+TYPED_TEST(BindVariantsTest, FunctionTypeSupport) {
+ using ClosureType = typename TypeParam::ClosureType;
+
+ StrictMock<HasRef> has_ref;
+ StrictMock<NoRef> no_ref;
+ StrictMock<NoRef> static_func_mock;
+ const HasRef* const_has_ref_ptr = &has_ref;
+ g_func_mock_ptr = &static_func_mock;
+
+ EXPECT_CALL(static_func_mock, VoidMethod0());
+ EXPECT_CALL(has_ref, AddRef()).Times(4);
+ EXPECT_CALL(has_ref, Release()).Times(4);
+ EXPECT_CALL(has_ref, VoidMethod0()).Times(2);
+ EXPECT_CALL(has_ref, VoidConstMethod0()).Times(2);
+
+ ClosureType normal_cb = TypeParam::Bind(&VoidFunc0);
+ CallbackType<TypeParam, NoRef*()> normal_non_refcounted_cb =
+ TypeParam::Bind(&PolymorphicIdentity<NoRef*>, &no_ref);
+ std::move(normal_cb).Run();
+ EXPECT_EQ(&no_ref, std::move(normal_non_refcounted_cb).Run());
+
+ ClosureType method_cb = TypeParam::Bind(&HasRef::VoidMethod0, &has_ref);
+ ClosureType method_refptr_cb = TypeParam::Bind(&HasRef::VoidMethod0,
+ make_scoped_refptr(&has_ref));
+ ClosureType const_method_nonconst_obj_cb =
+ TypeParam::Bind(&HasRef::VoidConstMethod0, &has_ref);
+ ClosureType const_method_const_obj_cb =
+ TypeParam::Bind(&HasRef::VoidConstMethod0, const_has_ref_ptr);
+ std::move(method_cb).Run();
+ std::move(method_refptr_cb).Run();
+ std::move(const_method_nonconst_obj_cb).Run();
+ std::move(const_method_const_obj_cb).Run();
+
+ Child child;
+ child.value = 0;
+ ClosureType virtual_set_cb = TypeParam::Bind(&Parent::VirtualSet, &child);
+ std::move(virtual_set_cb).Run();
+ EXPECT_EQ(kChildValue, child.value);
+
+ child.value = 0;
+ ClosureType non_virtual_set_cb =
+ TypeParam::Bind(&Parent::NonVirtualSet, &child);
+ std::move(non_virtual_set_cb).Run();
+ EXPECT_EQ(kParentValue, child.value);
+}
+
+// Return value support.
+// - Function with return value.
+// - Method with return value.
+// - Const method with return value.
+// - Move-only return value.
+TYPED_TEST(BindVariantsTest, ReturnValues) {
+ StrictMock<NoRef> static_func_mock;
+ StrictMock<HasRef> has_ref;
+ g_func_mock_ptr = &static_func_mock;
+ const HasRef* const_has_ref_ptr = &has_ref;
+
+ EXPECT_CALL(static_func_mock, IntMethod0()).WillOnce(Return(1337));
+ EXPECT_CALL(has_ref, AddRef()).Times(4);
+ EXPECT_CALL(has_ref, Release()).Times(4);
+ EXPECT_CALL(has_ref, IntMethod0()).WillOnce(Return(31337));
+ EXPECT_CALL(has_ref, IntConstMethod0())
+ .WillOnce(Return(41337))
+ .WillOnce(Return(51337));
+ EXPECT_CALL(has_ref, UniquePtrMethod0())
+ .WillOnce(Return(ByMove(MakeUnique<int>(42))));
+
+ CallbackType<TypeParam, int()> normal_cb = TypeParam::Bind(&IntFunc0);
+ CallbackType<TypeParam, int()> method_cb =
+ TypeParam::Bind(&HasRef::IntMethod0, &has_ref);
+ CallbackType<TypeParam, int()> const_method_nonconst_obj_cb =
+ TypeParam::Bind(&HasRef::IntConstMethod0, &has_ref);
+ CallbackType<TypeParam, int()> const_method_const_obj_cb =
+ TypeParam::Bind(&HasRef::IntConstMethod0, const_has_ref_ptr);
+ CallbackType<TypeParam, std::unique_ptr<int>()> move_only_rv_cb =
+ TypeParam::Bind(&HasRef::UniquePtrMethod0, &has_ref);
+ EXPECT_EQ(1337, std::move(normal_cb).Run());
+ EXPECT_EQ(31337, std::move(method_cb).Run());
+ EXPECT_EQ(41337, std::move(const_method_nonconst_obj_cb).Run());
+ EXPECT_EQ(51337, std::move(const_method_const_obj_cb).Run());
+ EXPECT_EQ(42, *std::move(move_only_rv_cb).Run());
+}
+
+// Argument binding tests.
+// - Argument binding to primitive.
+// - Argument binding to primitive pointer.
+// - Argument binding to a literal integer.
+// - Argument binding to a literal string.
+// - Argument binding with template function.
+// - Argument binding to an object.
+// - Argument binding to pointer to incomplete type.
+// - Argument gets type converted.
+// - Pointer argument gets converted.
+// - Const Reference forces conversion.
+TYPED_TEST(BindVariantsTest, ArgumentBinding) {
+ int n = 2;
+
+ EXPECT_EQ(n, TypeParam::Bind(&Identity, n).Run());
+ EXPECT_EQ(&n, TypeParam::Bind(&PolymorphicIdentity<int*>, &n).Run());
+ EXPECT_EQ(3, TypeParam::Bind(&Identity, 3).Run());
+ EXPECT_STREQ("hi", TypeParam::Bind(&CStringIdentity, "hi").Run());
+ EXPECT_EQ(4, TypeParam::Bind(&PolymorphicIdentity<int>, 4).Run());
+
+ NoRefParent p;
+ p.value = 5;
+ EXPECT_EQ(5, TypeParam::Bind(&UnwrapNoRefParent, p).Run());
+
+ IncompleteType* incomplete_ptr = reinterpret_cast<IncompleteType*>(123);
+ EXPECT_EQ(incomplete_ptr,
+ TypeParam::Bind(&PolymorphicIdentity<IncompleteType*>,
+ incomplete_ptr).Run());
+
+ NoRefChild c;
+ c.value = 6;
+ EXPECT_EQ(6, TypeParam::Bind(&UnwrapNoRefParent, c).Run());
+
+ c.value = 7;
+ EXPECT_EQ(7, TypeParam::Bind(&UnwrapNoRefParentPtr, &c).Run());
+
+ c.value = 8;
+ EXPECT_EQ(8, TypeParam::Bind(&UnwrapNoRefParentConstRef, c).Run());
+}
+
+// Unbound argument type support tests.
+// - Unbound value.
+// - Unbound pointer.
+// - Unbound reference.
+// - Unbound const reference.
+// - Unbound unsized array.
+// - Unbound sized array.
+// - Unbound array-of-arrays.
+TYPED_TEST(BindVariantsTest, UnboundArgumentTypeSupport) {
+ CallbackType<TypeParam, void(int)> unbound_value_cb =
+ TypeParam::Bind(&VoidPolymorphic<int>::Run);
+ CallbackType<TypeParam, void(int*)> unbound_pointer_cb =
+ TypeParam::Bind(&VoidPolymorphic<int*>::Run);
+ CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+ TypeParam::Bind(&VoidPolymorphic<int&>::Run);
+ CallbackType<TypeParam, void(const int&)> unbound_const_ref_cb =
+ TypeParam::Bind(&VoidPolymorphic<const int&>::Run);
+ CallbackType<TypeParam, void(int[])> unbound_unsized_array_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[]>::Run);
+ CallbackType<TypeParam, void(int[2])> unbound_sized_array_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[2]>::Run);
+ CallbackType<TypeParam, void(int[][2])> unbound_array_of_arrays_cb =
+ TypeParam::Bind(&VoidPolymorphic<int[][2]>::Run);
+ CallbackType<TypeParam, void(int&)> unbound_ref_with_bound_arg =
+ TypeParam::Bind(&VoidPolymorphic<int, int&>::Run, 1);
+}
+
+// Function with unbound reference parameter.
+// - Original parameter is modified by callback.
+TYPED_TEST(BindVariantsTest, UnboundReferenceSupport) {
+ int n = 0;
+ CallbackType<TypeParam, void(int&)> unbound_ref_cb =
+ TypeParam::Bind(&RefArgSet);
+ std::move(unbound_ref_cb).Run(n);
+ EXPECT_EQ(2, n);
+}
+
+// Unretained() wrapper support.
+// - Method bound to Unretained() non-const object.
+// - Const method bound to Unretained() non-const object.
+// - Const method bound to Unretained() const object.
+TYPED_TEST(BindVariantsTest, Unretained) {
+ StrictMock<NoRef> no_ref;
+ const NoRef* const_no_ref_ptr = &no_ref;
+
+ EXPECT_CALL(no_ref, VoidMethod0());
+ EXPECT_CALL(no_ref, VoidConstMethod0()).Times(2);
+
+ TypeParam::Bind(&NoRef::VoidMethod0, Unretained(&no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(&no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidConstMethod0, Unretained(const_no_ref_ptr)).Run();
+}
+
+TYPED_TEST(BindVariantsTest, ScopedRefptr) {
+ StrictMock<HasRef> has_ref;
+ EXPECT_CALL(has_ref, AddRef()).Times(1);
+ EXPECT_CALL(has_ref, Release()).Times(1);
+
+ const scoped_refptr<HasRef> refptr(&has_ref);
+ CallbackType<TypeParam, int()> scoped_refptr_const_ref_cb =
+ TypeParam::Bind(&FunctionWithScopedRefptrFirstParam,
+ base::ConstRef(refptr), 1);
+ EXPECT_EQ(1, std::move(scoped_refptr_const_ref_cb).Run());
+}
+
+TYPED_TEST(BindVariantsTest, UniquePtrReceiver) {
std::unique_ptr<StrictMock<NoRef>> no_ref(new StrictMock<NoRef>);
EXPECT_CALL(*no_ref, VoidMethod0()).Times(1);
- Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
+ TypeParam::Bind(&NoRef::VoidMethod0, std::move(no_ref)).Run();
}
// Tests for Passed() wrapper support:
@@ -1041,12 +1265,21 @@ TEST_F(BindTest, Cancellation) {
EXPECT_CALL(no_ref_, VoidMethodWithIntArg(_)).Times(2);
WeakPtrFactory<NoRef> weak_factory(&no_ref_);
- base::Callback<void(int)> cb =
- Bind(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
- Closure cb2 = Bind(cb, 8);
+ RepeatingCallback<void(int)> cb =
+ BindRepeating(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+ RepeatingClosure cb2 = BindRepeating(cb, 8);
+ OnceClosure cb3 = BindOnce(cb, 8);
+
+ OnceCallback<void(int)> cb4 =
+ BindOnce(&NoRef::VoidMethodWithIntArg, weak_factory.GetWeakPtr());
+ EXPECT_FALSE(cb4.IsCancelled());
+
+ OnceClosure cb5 = BindOnce(std::move(cb4), 8);
EXPECT_FALSE(cb.IsCancelled());
EXPECT_FALSE(cb2.IsCancelled());
+ EXPECT_FALSE(cb3.IsCancelled());
+ EXPECT_FALSE(cb5.IsCancelled());
cb.Run(6);
cb2.Run();
@@ -1055,18 +1288,16 @@ TEST_F(BindTest, Cancellation) {
EXPECT_TRUE(cb.IsCancelled());
EXPECT_TRUE(cb2.IsCancelled());
+ EXPECT_TRUE(cb3.IsCancelled());
+ EXPECT_TRUE(cb5.IsCancelled());
cb.Run(6);
cb2.Run();
+ std::move(cb3).Run();
+ std::move(cb5).Run();
}
TEST_F(BindTest, OnceCallback) {
- using internal::OnceClosure;
- using internal::RepeatingClosure;
- using internal::BindOnce;
- using internal::BindRepeating;
- using internal::OnceCallback;
-
// Check if Callback variants have declarations of conversions as expected.
// Copy constructor and assignment of RepeatingCallback.
static_assert(std::is_constructible<
diff --git a/chromium/base/bind_unittest.nc b/chromium/base/bind_unittest.nc
index fdbbbbcd0c8..bba2e025f2c 100644
--- a/chromium/base/bind_unittest.nc
+++ b/chromium/base/bind_unittest.nc
@@ -213,8 +213,8 @@ void WontCompile() {
#elif defined(NCTEST_DISALLOW_BINDING_ONCE_CALLBACK_WITH_NO_ARGS) // [r"static_assert failed \"Attempting to bind a base::Callback with no additional arguments: save a heap allocation and use the original base::Callback object\""]
void WontCompile() {
- internal::OnceClosure cb = internal::BindOnce([] {});
- internal::OnceClosure cb2 = internal::BindOnce(std::move(cb));
+ OnceClosure cb = BindOnce([] {});
+ OnceClosure cb2 = BindOnce(std::move(cb));
}
#elif defined(NCTEST_DISALLOW_BINDING_REPEATING_CALLBACK_WITH_NO_ARGS) // [r"static_assert failed \"Attempting to bind a base::Callback with no additional arguments: save a heap allocation and use the original base::Callback object\""]
diff --git a/chromium/base/callback_forward.h b/chromium/base/callback_forward.h
index ce4c3e7192d..13eed0eb0d2 100644
--- a/chromium/base/callback_forward.h
+++ b/chromium/base/callback_forward.h
@@ -32,8 +32,6 @@ class Callback;
// will be used in a lot of APIs with delayed execution.
using Closure = Callback<void()>;
-namespace internal {
-
template <typename Signature>
using OnceCallback = Callback<Signature,
internal::CopyMode::MoveOnly,
@@ -45,7 +43,6 @@ using RepeatingCallback = Callback<Signature,
using OnceClosure = OnceCallback<void()>;
using RepeatingClosure = RepeatingCallback<void()>;
-} // namespace internal
} // namespace base
#endif // BASE_CALLBACK_FORWARD_H_
diff --git a/chromium/base/chromeos/logging.h b/chromium/base/chromeos/logging.h
deleted file mode 100644
index 558d8d302d9..00000000000
--- a/chromium/base/chromeos/logging.h
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_CHROMEOS_LOGGING_H_
-#define BASE_CHROMEOS_LOGGING_H_
-
-#include "base/logging.h"
-
-namespace logging {
-
-#if defined(OS_CHROMEOS)
-
-// These macros are used to log events on ChromeOS which we want to be included
-// in the system log of the device.
-#define CHROMEOS_SYSLOG(severity) LOG(severity)
-#define CHROMEOS_SYSLOG_IF(severity, condition) LOG_IF(severity, condition)
-
-#else // Not defined(OS_CHROMEOS)
-
-#define CHROMEOS_SYSLOG(severity) LOG_IF(severity, false)
-#define CHROMEOS_SYSLOG_IF(severity, condition) LOG_IF(severity, false)
-
-#endif // defined(OS_CHROMEOS)
-
-} // namespace logging
-
-#endif // BASE_CHROMEOS_LOGGING_H_
diff --git a/chromium/base/compiler_specific.h b/chromium/base/compiler_specific.h
index 0dbc3ae5766..bb4c52be45e 100644
--- a/chromium/base/compiler_specific.h
+++ b/chromium/base/compiler_specific.h
@@ -100,6 +100,14 @@
#define NOINLINE
#endif
+#if COMPILER_GCC && defined(NDEBUG)
+#define ALWAYS_INLINE inline __attribute__((__always_inline__))
+#elif COMPILER_MSVC && defined(NDEBUG)
+#define ALWAYS_INLINE __forceinline
+#else
+#define ALWAYS_INLINE inline
+#endif
+
// Specify memory alignment for structs, classes, etc.
// Use like:
// class ALIGNAS(16) MyClass { ... }
@@ -196,6 +204,14 @@
#endif // defined(COMPILER_GCC)
#endif // !defined(UNLIKELY)
+#if !defined(LIKELY)
+#if defined(COMPILER_GCC)
+#define LIKELY(x) __builtin_expect((x), 1)
+#else
+#define LIKELY(x) (x)
+#endif // defined(COMPILER_GCC)
+#endif // !defined(LIKELY)
+
// Compiler feature-detection.
// clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension
#if defined(__has_feature)
diff --git a/chromium/base/containers/scoped_ptr_hash_map.h b/chromium/base/containers/scoped_ptr_hash_map.h
index 03f25c5b637..72c6ff4152e 100644
--- a/chromium/base/containers/scoped_ptr_hash_map.h
+++ b/chromium/base/containers/scoped_ptr_hash_map.h
@@ -128,9 +128,9 @@ class ScopedPtrHashMap {
inline void clear() {
auto it = data_.begin();
while (it != data_.end()) {
- // NOTE: Like STLDeleteContainerPointers, deleting behind the iterator.
- // Deleting the value does not always invalidate the iterator, but it may
- // do so if the key is a pointer into the value object.
+ // NOTE: Deleting behind the iterator. Deleting the value does not always
+ // invalidate the iterator, but it may do so if the key is a pointer into
+ // the value object.
auto temp = it;
++it;
// Let ScopedPtr decide how to delete.
diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc
index de4a001f7f7..af9c23da1a1 100644
--- a/chromium/base/cpu.cc
+++ b/chromium/base/cpu.cc
@@ -43,6 +43,7 @@ CPU::CPU()
has_ssse3_(false),
has_sse41_(false),
has_sse42_(false),
+ has_popcnt_(false),
has_avx_(false),
has_avx2_(false),
has_aesni_(false),
@@ -177,6 +178,8 @@ void CPU::Initialize() {
has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+ has_popcnt_ = (cpu_info[2] & 0x00800000) != 0;
+
// AVX instructions will generate an illegal instruction exception unless
// a) they are supported by the CPU,
// b) XSAVE is supported by the CPU and
diff --git a/chromium/base/cpu.h b/chromium/base/cpu.h
index 0e4303bfa0b..0e24df61ddf 100644
--- a/chromium/base/cpu.h
+++ b/chromium/base/cpu.h
@@ -46,6 +46,7 @@ class BASE_EXPORT CPU {
bool has_ssse3() const { return has_ssse3_; }
bool has_sse41() const { return has_sse41_; }
bool has_sse42() const { return has_sse42_; }
+ bool has_popcnt() const { return has_popcnt_; }
bool has_avx() const { return has_avx_; }
bool has_avx2() const { return has_avx2_; }
bool has_aesni() const { return has_aesni_; }
@@ -74,6 +75,7 @@ class BASE_EXPORT CPU {
bool has_ssse3_;
bool has_sse41_;
bool has_sse42_;
+ bool has_popcnt_;
bool has_avx_;
bool has_avx2_;
bool has_aesni_;
diff --git a/chromium/base/cpu_unittest.cc b/chromium/base/cpu_unittest.cc
index ec14620f98c..9cabfd6998d 100644
--- a/chromium/base/cpu_unittest.cc
+++ b/chromium/base/cpu_unittest.cc
@@ -57,6 +57,11 @@ TEST(CPU, RunExtendedInstructions) {
__asm__ __volatile__("crc32 %%eax, %%eax\n" : : : "eax");
}
+ if (cpu.has_popcnt()) {
+ // Execute a POPCNT instruction.
+ __asm__ __volatile__("popcnt %%eax, %%eax\n" : : : "eax");
+ }
+
if (cpu.has_avx()) {
// Execute an AVX instruction.
__asm__ __volatile__("vzeroupper\n" : : : "xmm0");
@@ -100,6 +105,11 @@ TEST(CPU, RunExtendedInstructions) {
__asm crc32 eax, eax;
}
+ if (cpu.has_popcnt()) {
+ // Execute a POPCNT instruction.
+ __asm popcnt eax, eax;
+ }
+
// Visual C 2012 required for AVX.
#if _MSC_VER >= 1700
if (cpu.has_avx()) {
diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc
index 79d513b5af6..cbac01e1e13 100644
--- a/chromium/base/debug/activity_analyzer.cc
+++ b/chromium/base/debug/activity_analyzer.cc
@@ -82,6 +82,12 @@ ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
return found->second.get();
}
+GlobalActivityAnalyzer::ProgramLocation
+GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
+ // TODO(bcwhite): Implement this.
+ return { 0, 0 };
+}
+
void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
// Fetch all the records. This will retrieve only ones created since the
// last run since the PMA iterator will continue from where it left off.
diff --git a/chromium/base/debug/activity_analyzer.h b/chromium/base/debug/activity_analyzer.h
index 14244b7b1fe..0a527332a62 100644
--- a/chromium/base/debug/activity_analyzer.h
+++ b/chromium/base/debug/activity_analyzer.h
@@ -8,6 +8,7 @@
#include <map>
#include <memory>
#include <set>
+#include <string>
#include "base/base_export.h"
#include "base/debug/activity_tracker.h"
@@ -73,6 +74,8 @@ class BASE_EXPORT ThreadActivityAnalyzer {
activity_snapshot_.thread_id);
}
+ const ActivitySnapshot& activity_snapshot() { return activity_snapshot_; }
+
private:
friend class GlobalActivityAnalyzer;
@@ -99,6 +102,11 @@ class BASE_EXPORT ThreadActivityAnalyzer {
// show small inconsistencies between threads if attempted on a live system.
class BASE_EXPORT GlobalActivityAnalyzer {
public:
+ struct ProgramLocation {
+ int module;
+ uintptr_t offset;
+ };
+
using ThreadKey = ThreadActivityAnalyzer::ThreadKey;
// Creates a global analyzer from a persistent memory allocator.
@@ -124,6 +132,10 @@ class BASE_EXPORT GlobalActivityAnalyzer {
// Ownership stays with the global analyzer object.
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
+ // Gets the corresponding "program location" for a given "program counter".
+ // This will return {0,0} if no mapping could be found.
+ ProgramLocation GetProgramLocationFromAddress(uint64_t address);
+
private:
using AnalyzerMap =
std::map<ThreadKey, std::unique_ptr<ThreadActivityAnalyzer>>;
diff --git a/chromium/base/debug/activity_analyzer_unittest.cc b/chromium/base/debug/activity_analyzer_unittest.cc
index 953a52f02d5..5cdb6c5fe48 100644
--- a/chromium/base/debug/activity_analyzer_unittest.cc
+++ b/chromium/base/debug/activity_analyzer_unittest.cc
@@ -46,7 +46,7 @@ class TestActivityTracker : public ThreadActivityTracker {
class ActivityAnalyzerTest : public testing::Test {
public:
- const int kMemorySize = 1 << 10; // 1MiB
+ const int kMemorySize = 1 << 20; // 1MiB
const int kStackSize = 1 << 10; // 1KiB
ActivityAnalyzerTest() {}
@@ -98,20 +98,19 @@ class SimpleActivityThread : public SimpleThread {
~SimpleActivityThread() override {}
void Run() override {
- GlobalActivityTracker::Get()
- ->GetOrCreateTrackerForCurrentThread()
- ->PushActivity(source_, activity_, data_);
+ ThreadActivityTracker::ActivityId id =
+ GlobalActivityTracker::Get()
+ ->GetOrCreateTrackerForCurrentThread()
+ ->PushActivity(source_, activity_, data_);
{
AutoLock auto_lock(lock_);
- ready_.store(true, std::memory_order_relaxed);
+ ready_.store(true, std::memory_order_release);
while (!exit_.load(std::memory_order_relaxed))
exit_condition_.Wait();
}
- GlobalActivityTracker::Get()
- ->GetOrCreateTrackerForCurrentThread()
- ->PopActivity();
+ GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
}
void Exit() {
@@ -121,7 +120,7 @@ class SimpleActivityThread : public SimpleThread {
}
void WaitReady() {
- SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_.load(std::memory_order_relaxed));
+ SPIN_FOR_1_SECOND_OR_UNTIL_TRUE(ready_.load(std::memory_order_acquire));
}
private:
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index dc1f529ad89..678e740f584 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -4,6 +4,8 @@
#include "base/debug/activity_tracker.h"
+#include <algorithm>
+
#include "base/debug/stack_trace.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
@@ -33,6 +35,13 @@ const uint32_t kHeaderCookie = 0xC0029B24UL + 2; // v2
// The minimum depth a stack should support.
const int kMinStackDepth = 2;
+// The amount of memory set aside for holding arbitrary user data (key/value
+// pairs) globally or associated with ActivityData entries.
+const size_t kUserDataSize = 1024; // bytes
+const size_t kGlobalDataSize = 1024; // bytes
+const size_t kMaxUserDataNameLength =
+ static_cast<size_t>(std::numeric_limits<uint8_t>::max());
+
union ThreadRef {
int64_t as_id;
#if defined(OS_WIN)
@@ -48,6 +57,11 @@ union ThreadRef {
#endif
};
+// Determines the next aligned index.
+size_t RoundUpToAlignment(size_t index, size_t alignment) {
+ return (index + (alignment - 1)) & (0 - alignment);
+}
+
} // namespace
@@ -66,12 +80,99 @@ ActivityData ActivityData::ForThread(const PlatformThreadHandle& handle) {
return ForThread(thread_ref.as_id);
}
+ActivityTrackerMemoryAllocator::ActivityTrackerMemoryAllocator(
+ PersistentMemoryAllocator* allocator,
+ uint32_t object_type,
+ uint32_t object_free_type,
+ size_t object_size,
+ size_t cache_size,
+ bool make_iterable)
+ : allocator_(allocator),
+ object_type_(object_type),
+ object_free_type_(object_free_type),
+ object_size_(object_size),
+ cache_size_(cache_size),
+ make_iterable_(make_iterable),
+ iterator_(allocator),
+ cache_values_(new Reference[cache_size]),
+ cache_used_(0) {
+ DCHECK(allocator);
+}
+
+ActivityTrackerMemoryAllocator::~ActivityTrackerMemoryAllocator() {}
+
+ActivityTrackerMemoryAllocator::Reference
+ActivityTrackerMemoryAllocator::GetObjectReference() {
+ // First see if there is a cached value that can be returned. This is much
+ // faster than searching the memory system for free blocks.
+ while (cache_used_ > 0) {
+ Reference cached = cache_values_[--cache_used_];
+ // Change the type of the cached object to the proper type and return it.
+ // If the type-change fails that means another thread has taken this from
+ // under us (via the search below) so ignore it and keep trying.
+ if (allocator_->ChangeType(cached, object_type_, object_free_type_))
+ return cached;
+ }
+
+ // Fetch the next "free" object from persistent memory. Rather than restart
+ // the iterator at the head each time and likely waste time going again
+ // through objects that aren't relevant, the iterator continues from where
+ // it last left off and is only reset when the end is reached. If the
+ // returned reference matches |last|, then it has wrapped without finding
+ // anything.
+ const Reference last = iterator_.GetLast();
+ while (true) {
+ uint32_t type;
+ Reference found = iterator_.GetNext(&type);
+ if (found && type == object_free_type_) {
+ // Found a free object. Change it to the proper type and return it. If
+ // the type-change fails that means another thread has taken this from
+ // under us so ignore it and keep trying.
+ if (allocator_->ChangeType(found, object_type_, object_free_type_))
+ return found;
+ }
+ if (found == last) {
+ // Wrapped. No desired object was found.
+ break;
+ }
+ if (!found) {
+ // Reached end; start over at the beginning.
+ iterator_.Reset();
+ }
+ }
+
+ // No free block was found so instead allocate a new one.
+ Reference allocated = allocator_->Allocate(object_size_, object_type_);
+ if (allocated && make_iterable_)
+ allocator_->MakeIterable(allocated);
+ return allocated;
+}
+
+void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
+ // Zero the memory so that it is ready for immediate use if needed later.
+ char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
+ DCHECK(mem_base);
+ memset(mem_base, 0, object_size_);
+
+ // Mark object as free.
+ bool success = allocator_->ChangeType(ref, object_free_type_, object_type_);
+ DCHECK(success);
+
+ // Add this reference to our "free" cache if there is space. If not, the type
+ // has still been changed to indicate that it is free so this (or another)
+ // thread can find it, albeit more slowly, using the iteration method above.
+ if (cache_used_ < cache_size_)
+ cache_values_[cache_used_++] = ref;
+}
+
// static
void Activity::FillFrom(Activity* activity,
+ const void* program_counter,
const void* origin,
Type type,
const ActivityData& data) {
activity->time_internal = base::TimeTicks::Now().ToInternalValue();
+ activity->calling_address = reinterpret_cast<uintptr_t>(program_counter);
activity->origin_address = reinterpret_cast<uintptr_t>(origin);
activity->activity_type = type;
activity->data = data;
@@ -95,6 +196,111 @@ void Activity::FillFrom(Activity* activity,
ActivitySnapshot::ActivitySnapshot() {}
ActivitySnapshot::~ActivitySnapshot() {}
+ActivityUserData::ValueInfo::ValueInfo() {}
+ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
+ActivityUserData::ValueInfo::~ValueInfo() {}
+
+ActivityUserData::ActivityUserData(void* memory, size_t size)
+ : memory_(static_cast<char*>(memory)), available_(size) {}
+
+ActivityUserData::~ActivityUserData() {}
+
+void ActivityUserData::Set(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_GE(std::numeric_limits<uint8_t>::max(), name.length());
+ size = std::min(std::numeric_limits<uint16_t>::max() - (kMemoryAlignment - 1),
+ size);
+
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ // The storage of a name is limited so use that limit during lookup.
+ if (name.length() > kMaxUserDataNameLength)
+ name.set(name.data(), kMaxUserDataNameLength);
+
+ ValueInfo* info;
+ auto existing = values_.find(name);
+ if (existing != values_.end()) {
+ info = &existing->second;
+ } else {
+ // The name size is limited to what can be held in a single byte but
+ // because there are not alignment constraints on strings, it's set tight
+ // against the header. Its extent (the reserved space, even if it's not
+ // all used) is calculated so that, when pressed against the header, the
+ // following field will be aligned properly.
+ size_t name_size = name.length();
+ size_t name_extent =
+ RoundUpToAlignment(sizeof(Header) + name_size, kMemoryAlignment) -
+ sizeof(Header);
+ size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
+
+ // The "basic size" is the minimum size of the record. It's possible that
+ // lengthy values will get truncated but there must be at least some bytes
+ // available.
+ size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment;
+ if (basic_size > available_)
+ return; // No space to store even the smallest value.
+
+ // The "full size" is the size for storing the entire value, truncated
+ // to the amount of available memory.
+ size_t full_size =
+ std::min(sizeof(Header) + name_extent + value_extent, available_);
+ size = std::min(full_size - sizeof(Header) - name_extent, size);
+
+ // Allocate a chunk of memory.
+ Header* header = reinterpret_cast<Header*>(memory_);
+ memory_ += full_size;
+ available_ -= full_size;
+
+ // Datafill the header and name records. Memory must be zeroed. The |type|
+ // is written last, atomically, to release all the other values.
+ DCHECK_EQ(END_OF_VALUES, header->type.load(std::memory_order_relaxed));
+ DCHECK_EQ(0, header->value_size.load(std::memory_order_relaxed));
+ header->name_size = static_cast<uint8_t>(name_size);
+ header->record_size = full_size;
+ char* name_memory = reinterpret_cast<char*>(header) + sizeof(Header);
+ void* value_memory =
+ reinterpret_cast<char*>(header) + sizeof(Header) + name_extent;
+ memcpy(name_memory, name.data(), name_size);
+ header->type.store(type, std::memory_order_release);
+
+ // Create an entry in |values_| so that this field can be found and changed
+ // later on without having to allocate new entries.
+ StringPiece persistent_name(name_memory, name_size);
+ auto inserted =
+ values_.insert(std::make_pair(persistent_name, ValueInfo()));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ info = &inserted.first->second;
+ info->name = persistent_name;
+ info->memory = value_memory;
+ info->size_ptr = &header->value_size;
+ info->extent = full_size - sizeof(Header) - name_extent;
+ info->type = type;
+ }
+
+ // Copy the value data to storage. The |size| is written last, atomically, to
+ // release the copied data. Until then, a parallel reader will just ignore
+ // records with a zero size.
+ DCHECK_EQ(type, info->type);
+ size = std::min(size, info->extent);
+ info->size_ptr->store(0, std::memory_order_seq_cst);
+ memcpy(info->memory, memory, size);
+ info->size_ptr->store(size, std::memory_order_release);
+}
+
+void ActivityUserData::SetReference(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size) {
+ ReferenceRecord rec;
+ rec.address = reinterpret_cast<uintptr_t>(memory);
+ rec.size = size;
+ Set(name, type, &rec, sizeof(rec));
+}
// This information is kept for every thread that is tracked. It is filled
// the very first time the thread is seen. All fields must be of exact sizes
@@ -149,6 +355,39 @@ struct ThreadActivityTracker::Header {
char thread_name[32];
};
+ThreadActivityTracker::ScopedActivity::ScopedActivity(
+ ThreadActivityTracker* tracker,
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data)
+ : tracker_(tracker) {
+ if (tracker_)
+ activity_id_ = tracker_->PushActivity(program_counter, origin, type, data);
+}
+
+ThreadActivityTracker::ScopedActivity::~ScopedActivity() {
+ if (tracker_)
+ tracker_->PopActivity(activity_id_);
+}
+
+void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
+ Activity::Type type,
+ const ActivityData& data) {
+ if (tracker_)
+ tracker_->ChangeActivity(activity_id_, type, data);
+}
+
+ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() {
+ if (!user_data_) {
+ if (tracker_)
+ user_data_ = tracker_->GetUserData(activity_id_);
+ else
+ user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ }
+ return *user_data_;
+}
+
ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
: header_(static_cast<Header*>(base)),
stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
@@ -226,9 +465,11 @@ ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
ThreadActivityTracker::~ThreadActivityTracker() {}
-void ThreadActivityTracker::PushActivity(const void* origin,
- Activity::Type type,
- const ActivityData& data) {
+ThreadActivityTracker::ActivityId ThreadActivityTracker::PushActivity(
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
// A thread-checker creates a lock to check the thread-id which means
// re-entry into this code if lock acquisitions are being tracked.
DCHECK(type == Activity::ACT_LOCK_ACQUIRE ||
@@ -244,32 +485,34 @@ void ThreadActivityTracker::PushActivity(const void* origin,
// Since no other threads modify the data, no compare/exchange is needed.
// Since no other memory is being modified, a "relaxed" store is acceptable.
header_->current_depth.store(depth + 1, std::memory_order_relaxed);
- return;
+ return depth;
}
// Get a pointer to the next activity and load it. No atomicity is required
// here because the memory is known only to this thread. It will be made
// known to other threads once the depth is incremented.
- Activity::FillFrom(&stack_[depth], origin, type, data);
+ Activity::FillFrom(&stack_[depth], program_counter, origin, type, data);
// Save the incremented depth. Because this guards |activity| memory filled
// above that may be read by another thread once the recorded depth changes,
// a "release" store is required.
header_->current_depth.store(depth + 1, std::memory_order_release);
+
+ // The current depth is used as the activity ID because it simply identifies
+ // an entry. Once an entry is pop'd, it's okay to reuse the ID.
+ return depth;
}
-void ThreadActivityTracker::ChangeActivity(Activity::Type type,
+void ThreadActivityTracker::ChangeActivity(ActivityId id,
+ Activity::Type type,
const ActivityData& data) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(type != Activity::ACT_NULL || &data != &kNullActivityData);
-
- // Get the current depth of the stack and acquire the data held there.
- uint32_t depth = header_->current_depth.load(std::memory_order_acquire);
- DCHECK_LT(0U, depth);
+ DCHECK_LT(id, header_->current_depth.load(std::memory_order_acquire));
// Update the information if it is being recorded (i.e. within slot limit).
- if (depth <= stack_slots_) {
- Activity* activity = &stack_[depth - 1];
+ if (id < stack_slots_) {
+ Activity* activity = &stack_[id];
if (type != Activity::ACT_NULL) {
DCHECK_EQ(activity->activity_type & Activity::ACT_CATEGORY_MASK,
@@ -282,21 +525,29 @@ void ThreadActivityTracker::ChangeActivity(Activity::Type type,
}
}
-void ThreadActivityTracker::PopActivity() {
+void ThreadActivityTracker::PopActivity(ActivityId id) {
// Do an atomic decrement of the depth. No changes to stack entries guarded
// by this variable are done here so a "relaxed" operation is acceptable.
- // |depth| will receive the value BEFORE it was modified.
+ // |depth| will receive the value BEFORE it was modified which means the
+ // return value must also be decremented. The slot will be "free" after
+ // this call but since only a single thread can access this object, the
+ // data will remain valid until this method returns or calls outside.
uint32_t depth =
- header_->current_depth.fetch_sub(1, std::memory_order_relaxed);
+ header_->current_depth.fetch_sub(1, std::memory_order_relaxed) - 1;
// Validate that everything is running correctly.
- DCHECK_LT(0U, depth);
+ DCHECK_EQ(id, depth);
// A thread-checker creates a lock to check the thread-id which means
// re-entry into this code if lock acquisitions are being tracked.
- DCHECK(stack_[depth - 1].activity_type == Activity::ACT_LOCK_ACQUIRE ||
+ DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
thread_checker_.CalledOnValidThread());
+ // Check if there was any user-data memory. It isn't free'd until later
+ // because the call to release it can push something on the stack.
+ PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data;
+ stack_[depth].user_data = 0;
+
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
// have written a non-zero value into |stack_unchanged|; clearing it here
@@ -304,6 +555,25 @@ void ThreadActivityTracker::PopActivity() {
// happen after the atomic |depth| operation above so a "release" store
// is required.
header_->stack_unchanged.store(0, std::memory_order_release);
+
+ // Release resources located above. All stack processing is done so it's
+ // safe if some outside code does another push.
+ if (user_data)
+ GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data);
+}
+
+std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
+ ActivityId id) {
+ // User-data is only stored for activities actually held in the stack.
+ if (id < stack_slots_) {
+ void* memory =
+ GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data);
+ if (memory)
+ return MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ }
+
+ // Return a dummy object that will still accept (but ignore) Set() calls.
+ return MakeUnique<ActivityUserData>(nullptr, 0);
}
bool ThreadActivityTracker::IsValid() const {
@@ -499,60 +769,25 @@ void GlobalActivityTracker::CreateWithLocalMemory(size_t size,
ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
DCHECK(!this_thread_tracker_.Get());
- PersistentMemoryAllocator::Reference mem_reference =
- PersistentMemoryAllocator::kReferenceNull;
- DCHECK(!mem_reference); // invalid_value should be checkable with !
+ PersistentMemoryAllocator::Reference mem_reference;
- while (true) {
- // Get the first available memory from the top of the FIFO.
- if (!available_memories_.pop(&mem_reference))
- break;
-
- // Turn the reference back into one of the activity-tracker type. This can
- // fail if something else has already taken the block and changed its type.
- if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
- kTypeIdActivityTrackerFree)) {
- break;
- }
+ {
+ base::AutoLock autolock(thread_tracker_allocator_lock_);
+ mem_reference = thread_tracker_allocator_.GetObjectReference();
}
- // Handle the case where no known available memories were found.
if (!mem_reference) {
- // Allocate a block of memory from the persistent segment.
- mem_reference =
- allocator_->Allocate(stack_memory_size_, kTypeIdActivityTracker);
- if (mem_reference) {
- // Success. Make the allocation iterable so it can be found later.
- allocator_->MakeIterable(mem_reference);
- } else {
- // Failure. Look for any free blocks that weren't held in the cache
- // of available memories and try to claim it. This can happen if the
- // |available_memories_| stack isn't sufficiently large to hold all
- // released memories or if multiple independent processes are sharing
- // the memory segment.
- PersistentMemoryAllocator::Iterator iter(allocator_.get());
- while ((mem_reference = iter.GetNextOfType(kTypeIdActivityTrackerFree)) !=
- 0) {
- if (allocator_->ChangeType(mem_reference, kTypeIdActivityTracker,
- kTypeIdActivityTrackerFree)) {
- break;
- }
- mem_reference = 0;
- }
- if (!mem_reference) {
- // Dobule Failure. This shouldn't happen. But be graceful if it does,
- // probably because the underlying allocator wasn't given enough memory
- // to satisfy all possible requests.
- NOTREACHED();
- // Report the thread-count at which the allocator was full so that the
- // failure can be seen and underlying memory resized appropriately.
- UMA_HISTOGRAM_COUNTS_1000(
- "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
- thread_tracker_count_.load(std::memory_order_relaxed));
- // Return null, just as if tracking wasn't enabled.
- return nullptr;
- }
- }
+ // Failure. This shouldn't happen. But be graceful if it does, probably
+ // because the underlying allocator wasn't given enough memory to satisfy
+ // to all possible requests.
+ NOTREACHED();
+ // Report the thread-count at which the allocator was full so that the
+ // failure can be seen and underlying memory resized appropriately.
+ UMA_HISTOGRAM_COUNTS_1000(
+ "ActivityTracker.ThreadTrackers.MemLimitTrackerCount",
+ thread_tracker_count_.load(std::memory_order_relaxed));
+ // Return null, just as if tracking wasn't enabled.
+ return nullptr;
}
// Convert the memory block found above into an actual memory address.
@@ -582,6 +817,29 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
delete tracker;
}
+void* GlobalActivityTracker::GetUserDataMemory(
+ PersistentMemoryAllocator::Reference* reference) {
+ if (!*reference) {
+ base::AutoLock autolock(user_data_allocator_lock_);
+ *reference = user_data_allocator_.GetObjectReference();
+ if (!*reference)
+ return nullptr;
+ }
+
+ void* memory =
+ allocator_->GetAsObject<char>(*reference, kTypeIdUserDataRecord);
+ DCHECK(memory);
+ return memory;
+}
+
+void GlobalActivityTracker::ReleaseUserDataMemory(
+ PersistentMemoryAllocator::Reference* reference) {
+ DCHECK(*reference);
+ base::AutoLock autolock(user_data_allocator_lock_);
+ user_data_allocator_.ReleaseObjectReference(*reference);
+ *reference = PersistentMemoryAllocator::kReferenceNull;
+}
+
GlobalActivityTracker::GlobalActivityTracker(
std::unique_ptr<PersistentMemoryAllocator> allocator,
int stack_depth)
@@ -589,7 +847,23 @@ GlobalActivityTracker::GlobalActivityTracker(
stack_memory_size_(ThreadActivityTracker::SizeForStackDepth(stack_depth)),
this_thread_tracker_(&OnTLSDestroy),
thread_tracker_count_(0),
- available_memories_(kMaxThreadCount) {
+ thread_tracker_allocator_(allocator_.get(),
+ kTypeIdActivityTracker,
+ kTypeIdActivityTrackerFree,
+ stack_memory_size_,
+ kCachedThreadMemories,
+ /*make_iterable=*/true),
+ user_data_allocator_(allocator_.get(),
+ kTypeIdUserDataRecord,
+ kTypeIdUserDataRecordFree,
+ kUserDataSize,
+ kCachedUserDataMemories,
+ /*make_iterable=*/false),
+ user_data_(
+ allocator_->GetAsObject<char>(
+ allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
+ kTypeIdGlobalDataRecord),
+ kGlobalDataSize) {
// Ensure the passed memory is valid and empty (iterator finds nothing).
uint32_t type;
DCHECK(!PersistentMemoryAllocator::Iterator(allocator_.get()).GetNext(&type));
@@ -612,25 +886,13 @@ void GlobalActivityTracker::ReturnTrackerMemory(
DCHECK(mem_reference);
DCHECK(mem_base);
- // Zero the memory so that it is ready for use if needed again later. It's
- // better to clear the memory now, when a thread is exiting, than to do it
- // when it is first needed by a thread doing actual work.
- memset(mem_base, 0, stack_memory_size_);
-
// Remove the destructed tracker from the set of known ones.
DCHECK_LE(1, thread_tracker_count_.load(std::memory_order_relaxed));
thread_tracker_count_.fetch_sub(1, std::memory_order_relaxed);
- // The memory was within the persistent memory allocator. Change its type
- // so it is effectively marked as "free".
- allocator_->ChangeType(mem_reference, kTypeIdActivityTrackerFree,
- kTypeIdActivityTracker);
-
- // Push this on the internal cache of available memory blocks so it can
- // be found and reused quickly. If the push somehow exceeds the maximum
- // size of the cache, it will fail but a fallback check in CreateTracker
- // will find it by (slow) iteration.
- available_memories_.push(mem_reference);
+ // Release this memory for re-use at a later time.
+ base::AutoLock autolock(thread_tracker_allocator_lock_);
+ thread_tracker_allocator_.ReleaseObjectReference(mem_reference);
}
// static
@@ -638,12 +900,13 @@ void GlobalActivityTracker::OnTLSDestroy(void* value) {
delete reinterpret_cast<ManagedActivityTracker*>(value);
}
-ScopedActivity::ScopedActivity(const tracked_objects::Location& location,
+ScopedActivity::ScopedActivity(const void* program_counter,
uint8_t action,
uint32_t id,
int32_t info)
: GlobalActivityTracker::ScopedThreadActivity(
- location.program_counter(),
+ program_counter,
+ nullptr,
static_cast<Activity::Type>(Activity::ACT_GENERIC | action),
ActivityData::ForGeneric(id, info),
/*lock_allowed=*/true),
@@ -668,32 +931,41 @@ void ScopedActivity::ChangeActionAndInfo(uint8_t action, int32_t info) {
ActivityData::ForGeneric(id_, info));
}
-ScopedTaskRunActivity::ScopedTaskRunActivity(const base::PendingTask& task)
+ScopedTaskRunActivity::ScopedTaskRunActivity(
+ const void* program_counter,
+ const base::PendingTask& task)
: GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
task.posted_from.program_counter(),
Activity::ACT_TASK_RUN,
ActivityData::ForTask(task.sequence_num),
/*lock_allowed=*/true) {}
ScopedLockAcquireActivity::ScopedLockAcquireActivity(
+ const void* program_counter,
const base::internal::LockImpl* lock)
: GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
nullptr,
Activity::ACT_LOCK_ACQUIRE,
ActivityData::ForLock(lock),
/*lock_allowed=*/false) {}
ScopedEventWaitActivity::ScopedEventWaitActivity(
+ const void* program_counter,
const base::WaitableEvent* event)
: GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
nullptr,
Activity::ACT_EVENT_WAIT,
ActivityData::ForEvent(event),
/*lock_allowed=*/true) {}
ScopedThreadJoinActivity::ScopedThreadJoinActivity(
+ const void* program_counter,
const base::PlatformThreadHandle* thread)
: GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
nullptr,
Activity::ACT_THREAD_JOIN,
ActivityData::ForThread(*thread),
@@ -701,8 +973,10 @@ ScopedThreadJoinActivity::ScopedThreadJoinActivity(
#if !defined(OS_NACL) && !defined(OS_IOS)
ScopedProcessWaitActivity::ScopedProcessWaitActivity(
+ const void* program_counter,
const base::Process* process)
: GlobalActivityTracker::ScopedThreadActivity(
+ program_counter,
nullptr,
Activity::ACT_PROCESS_WAIT,
ActivityData::ForProcess(process->Pid()),
diff --git a/chromium/base/debug/activity_tracker.h b/chromium/base/debug/activity_tracker.h
index 2cf48509301..24de4317448 100644
--- a/chromium/base/debug/activity_tracker.h
+++ b/chromium/base/debug/activity_tracker.h
@@ -16,11 +16,14 @@
// PersistentMemoryAllocator which also uses std::atomic and is written
// by the same author.
#include <atomic>
+#include <map>
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/gtest_prod_util.h"
#include "base/location.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/threading/platform_thread.h"
@@ -33,7 +36,6 @@ struct PendingTask;
class FilePath;
class Lock;
-class MemoryMappedFile;
class PlatformThreadHandle;
class Process;
class WaitableEvent;
@@ -125,6 +127,56 @@ union ActivityData {
// A "null" activity-data that can be passed to indicate "do not change".
extern const ActivityData kNullActivityData;
+
+// A helper class that is used for managing memory allocations within a
+// persistent memory allocator. Instances of this class are NOT thread-safe.
+// Use from a single thread or protect access with a lock.
+class ActivityTrackerMemoryAllocator {
+ public:
+ using Reference = PersistentMemoryAllocator::Reference;
+
+ // Creates a instance for allocating objects of a fixed |object_type|, a
+ // corresponding |object_free| type, and the |object_size|. An internal
+ // cache of the last |cache_size| released references will be kept for
+ // quick future fetches. If |make_iterable| then allocated objects will
+ // be marked "iterable" in the allocator.
+ ActivityTrackerMemoryAllocator(PersistentMemoryAllocator* allocator,
+ uint32_t object_type,
+ uint32_t object_free_type,
+ size_t object_size,
+ size_t cache_size,
+ bool make_iterable);
+ ~ActivityTrackerMemoryAllocator();
+
+ // Gets a reference to an object of the configured type. This can return
+ // a null reference if it was not possible to allocate the memory.
+ Reference GetObjectReference();
+
+ // Returns an object to the "free" pool.
+ void ReleaseObjectReference(Reference ref);
+
+ // The current "used size" of the internal cache, visible for testing.
+ size_t cache_used() const { return cache_used_; }
+
+ private:
+ PersistentMemoryAllocator* const allocator_;
+ const uint32_t object_type_;
+ const uint32_t object_free_type_;
+ const size_t object_size_;
+ const size_t cache_size_;
+ const bool make_iterable_;
+
+ // An iterator for going through persistent memory looking for free'd objects.
+ PersistentMemoryAllocator::Iterator iterator_;
+
+ // The cache of released object memories.
+ std::unique_ptr<Reference[]> cache_values_;
+ size_t cache_used_;
+
+ DISALLOW_COPY_AND_ASSIGN(ActivityTrackerMemoryAllocator);
+};
+
+
// This structure is the full contents recorded for every activity pushed
// onto the stack. The |activity_type| indicates what is actually stored in
// the |data| field. All fields must be explicitly sized types to ensure no
@@ -176,6 +228,9 @@ struct Activity {
// but when returned in a snapshot, it is "wall time".
int64_t time_internal;
+ // The address that pushed the activity onto the stack as a raw number.
+ uint64_t calling_address;
+
// The address that is the origin of the activity if it not obvious from
// the call stack. This is useful for things like tasks that are posted
// from a completely different thread though most activities will leave
@@ -189,6 +244,9 @@ struct Activity {
// enabled.
uint64_t call_stack[kActivityCallStackSize];
+ // Reference to arbitrary user data within the persistent memory segment.
+ uint32_t user_data;
+
// The (enumerated) type of the activity. This defines what fields of the
// |data| record are valid.
uint8_t activity_type;
@@ -196,12 +254,13 @@ struct Activity {
// Padding to ensure that the next member begins on a 64-bit boundary
// even on 32-bit builds which ensures inter-operability between CPU
// architectures. New fields can be taken from this space.
- uint8_t padding[7];
+ uint8_t padding[3];
// Information specific to the |activity_type|.
ActivityData data;
static void FillFrom(Activity* activity,
+ const void* program_counter,
const void* origin,
Type type,
const ActivityData& data);
@@ -236,6 +295,114 @@ struct BASE_EXPORT ActivitySnapshot {
uint32_t activity_stack_depth = 0;
};
+// This class manages arbitrary user data that can be associated with activities
+// done by a thread by supporting key/value pairs of any type. This can provide
+// additional information during debugging. It is also used to store arbitrary
+// global data. All updates must be done from the same thread.
+class BASE_EXPORT ActivityUserData {
+ // List of known value type. REFERENCE types must immediately follow the non-
+ // external types.
+ enum ValueType : uint8_t {
+ END_OF_VALUES = 0,
+ RAW_VALUE,
+ RAW_VALUE_REFERENCE,
+ STRING_VALUE,
+ STRING_VALUE_REFERENCE,
+ CHAR_VALUE,
+ SIGNED_VALUE,
+ UNSIGNED_VALUE,
+ };
+
+ public:
+ ActivityUserData(void* memory, size_t size);
+ ~ActivityUserData();
+
+ // Writes a |value| (as part of a key/value pair) that will be included with
+ // the activity in any reports. The same |name| can be written multiple times
+ // with each successive call overwriting the previously stored |value|. For
+ // raw and string values, the maximum size of successive writes is limited by
+ // the first call. The length of "name" is limited to 255 characters.
+ //
+ // This information is stored on a "best effort" basis. It may be dropped if
+ // the memory buffer is full or the associated activity is beyond the maximum
+ // recording depth.
+ void Set(StringPiece name, const void* memory, size_t size) {
+ Set(name, RAW_VALUE, memory, size);
+ }
+ void SetString(StringPiece name, StringPiece value) {
+ Set(name, STRING_VALUE, value.data(), value.length());
+ }
+ void SetChar(StringPiece name, char value) {
+ Set(name, CHAR_VALUE, &value, sizeof(value));
+ }
+ void SetInt(StringPiece name, int64_t value) {
+ Set(name, SIGNED_VALUE, &value, sizeof(value));
+ }
+ void SetUint(StringPiece name, uint64_t value) {
+ Set(name, UNSIGNED_VALUE, &value, sizeof(value));
+ }
+
+ // These function as above but don't actually copy the data into the
+ // persistent memory. They store unaltered pointers along with a size. These
+ // can be used in conjuction with a memory dump to find certain large pieces
+ // of information.
+ void SetReference(StringPiece name, const void* memory, size_t size) {
+ SetReference(name, RAW_VALUE_REFERENCE, memory, size);
+ }
+ void SetStringReference(StringPiece name, StringPiece value) {
+ SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
+ }
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
+
+ enum : size_t { kMemoryAlignment = sizeof(uint64_t) };
+
+ // A structure used to reference data held outside of persistent memory.
+ struct ReferenceRecord {
+ uint64_t address;
+ uint64_t size;
+ };
+
+ // Header to a key/value record held in persistent memory.
+ struct Header {
+ std::atomic<uint8_t> type; // Encoded ValueType
+ uint8_t name_size; // Length of "name" key.
+ std::atomic<uint16_t> value_size; // Actual size of of the stored value.
+ uint16_t record_size; // Total storage of name, value, header.
+ };
+
+ // This record is used to hold known value is a map so that they can be
+ // found and overwritten later.
+ struct ValueInfo {
+ ValueInfo();
+ ValueInfo(ValueInfo&&);
+ ~ValueInfo();
+
+ StringPiece name; // The "key" of the record.
+ ValueType type; // The type of the value.
+ void* memory; // Where the "value" is held.
+ std::atomic<uint16_t>* size_ptr; // Address of the actual size of value.
+ size_t extent; // The total storage of the value,
+ }; // typically rounded up for alignment.
+
+ void Set(StringPiece name, ValueType type, const void* memory, size_t size);
+ void SetReference(StringPiece name,
+ ValueType type,
+ const void* memory,
+ size_t size);
+
+ // TODO(bcwhite): Add Get() methods for Analyzer to use.
+
+ std::map<StringPiece, ValueInfo> values_;
+
+ char* memory_;
+ size_t available_;
+
+ base::ThreadChecker thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
+};
// This class manages tracking a stack of activities for a single thread in
// a persistent manner, implementing a bounded-size stack in a fixed-size
@@ -248,6 +415,8 @@ struct BASE_EXPORT ActivitySnapshot {
// objects.
class BASE_EXPORT ThreadActivityTracker {
public:
+ using ActivityId = uint32_t;
+
// This is the base class for having the compiler manage an activity on the
// tracker's stack. It does nothing but call methods on the passed |tracker|
// if it is not null, making it safe (and cheap) to create these objects
@@ -255,29 +424,29 @@ class BASE_EXPORT ThreadActivityTracker {
class BASE_EXPORT ScopedActivity {
public:
ScopedActivity(ThreadActivityTracker* tracker,
+ const void* program_counter,
const void* origin,
Activity::Type type,
- const ActivityData& data)
- : tracker_(tracker) {
- if (tracker_)
- tracker_->PushActivity(origin, type, data);
- }
+ const ActivityData& data);
+ ~ScopedActivity();
- ~ScopedActivity() {
- if (tracker_)
- tracker_->PopActivity();
- }
+ // Changes some basic metadata about the activity.
+ void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
- void ChangeTypeAndData(Activity::Type type, const ActivityData& data) {
- if (tracker_)
- tracker_->ChangeActivity(type, data);
- }
+ // Returns an object for manipulating user data.
+ ActivityUserData& user_data();
private:
// The thread tracker to which this object reports. It can be null if
// activity tracking is not (yet) enabled.
ThreadActivityTracker* const tracker_;
+ // An identifier that indicates a specific activity on the stack.
+ ActivityId activity_id_;
+
+ // An object that manages additional user data, created only upon request.
+ std::unique_ptr<ActivityUserData> user_data_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
};
@@ -289,10 +458,23 @@ class BASE_EXPORT ThreadActivityTracker {
// Indicates that an activity has started from a given |origin| address in
// the code, though it can be null if the creator's address is not known.
- // The |type| and |data| describe the activity.
- void PushActivity(const void* origin,
- Activity::Type type,
- const ActivityData& data);
+ // The |type| and |data| describe the activity. |program_counter| should be
+ // the result of GetProgramCounter() where push is called. Returned is an
+ // ID that can be used to adjust the pushed activity.
+ ActivityId PushActivity(const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data);
+
+ // An inlined version of the above that gets the program counter where it
+ // is called.
+ ALWAYS_INLINE
+ ActivityId PushActivity(const void* origin,
+ Activity::Type type,
+ const ActivityData& data) {
+ return PushActivity(::tracked_objects::GetProgramCounter(), origin, type,
+ data);
+ }
// Changes the activity |type| and |data| of the top-most entry on the stack.
// This is useful if the information has changed and it is desireable to
@@ -301,10 +483,15 @@ class BASE_EXPORT ThreadActivityTracker {
// unchanged. The type, if changed, must remain in the same category.
// Changing both is not atomic so a snapshot operation could occur between
// the update of |type| and |data| or between update of |data| fields.
- void ChangeActivity(Activity::Type type, const ActivityData& data);
+ void ChangeActivity(ActivityId id,
+ Activity::Type type,
+ const ActivityData& data);
// Indicates that an activity has completed.
- void PopActivity();
+ void PopActivity(ActivityId id);
+
+ // Returns an object capable of storing arbitrary user data.
+ std::unique_ptr<ActivityUserData> GetUserData(ActivityId id);
// Returns whether the current data is valid or not. It is not valid if
// corruption has been detected in the header or other data structures.
@@ -344,46 +531,6 @@ class BASE_EXPORT ThreadActivityTracker {
// the thread trackers is taken from a PersistentMemoryAllocator which allows
// for the data to be analyzed by a parallel process or even post-mortem.
class BASE_EXPORT GlobalActivityTracker {
- template <typename T>
- class ThreadSafeStack {
- public:
- ThreadSafeStack(size_t size)
- : size_(size), values_(new T[size]), used_(0) {}
- ~ThreadSafeStack() {}
-
- size_t size() { return size_; }
- size_t used() {
- base::AutoLock autolock(lock_);
- return used_;
- }
-
- bool push(T value) {
- base::AutoLock autolock(lock_);
- if (used_ == size_)
- return false;
- values_[used_++] = value;
- return true;
- }
-
- bool pop(T* out_value) {
- base::AutoLock autolock(lock_);
- if (used_ == 0)
- return false;
- *out_value = values_[--used_];
- return true;
- }
-
- private:
- const size_t size_;
-
- std::unique_ptr<T[]> values_;
- size_t used_;
- base::Lock lock_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(ThreadSafeStack);
- };
-
public:
// Type identifiers used when storing in persistent memory so they can be
// identified during extraction; the first 4 bytes of the SHA1 of the name
@@ -392,8 +539,12 @@ class BASE_EXPORT GlobalActivityTracker {
// will be safely ignored. These are public so that an external process
// can recognize records of this type within an allocator.
enum : uint32_t {
- kTypeIdActivityTracker = 0x5D7381AF + 1, // SHA1(ActivityTracker) v1
- kTypeIdActivityTrackerFree = 0x3F0272FB + 1, // SHA1(ActivityTrackerFree)
+ kTypeIdActivityTracker = 0x5D7381AF + 2, // SHA1(ActivityTracker) v2
+ kTypeIdUserDataRecord = 0x615EDDD7 + 1, // SHA1(UserDataRecord) v1
+ kTypeIdGlobalDataRecord = 0xAFE61ABE + 1, // SHA1(GlobalDataRecord) v1
+
+ kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
+ kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
};
// This is a thin wrapper around the thread-tracker's ScopedActivity that
@@ -403,12 +554,14 @@ class BASE_EXPORT GlobalActivityTracker {
class BASE_EXPORT ScopedThreadActivity
: public ThreadActivityTracker::ScopedActivity {
public:
- ScopedThreadActivity(const void* origin,
+ ScopedThreadActivity(const void* program_counter,
+ const void* origin,
Activity::Type type,
const ActivityData& data,
bool lock_allowed)
: ThreadActivityTracker::ScopedActivity(
GetOrCreateTracker(lock_allowed),
+ program_counter,
origin,
type,
data) {}
@@ -492,6 +645,17 @@ class BASE_EXPORT GlobalActivityTracker {
// Releases the activity-tracker for the current thread (for testing only).
void ReleaseTrackerForCurrentThreadForTesting();
+ // Gets a reference to memory for holding user-defined activity data. If
+ // the reference is valid, it's memory will be returned. If not, then a
+ // new reference will be created (and stored) and that memory returned.
+ void* GetUserDataMemory(PersistentMemoryAllocator::Reference* reference);
+
+ // Releases memory for user-defined activity data.
+ void ReleaseUserDataMemory(PersistentMemoryAllocator::Reference* reference);
+
+ // Accesses the global data record for storing arbitrary key/value pairs.
+ ActivityUserData& user_data() { return user_data_; }
+
private:
friend class ActivityTrackerTest;
@@ -499,6 +663,8 @@ class BASE_EXPORT GlobalActivityTracker {
// The maximum number of threads that can be tracked within a process. If
// more than this number run concurrently, tracking of new ones may cease.
kMaxThreadCount = 100,
+ kCachedThreadMemories = 10,
+ kCachedUserDataMemories = 10,
};
// A thin wrapper around the main thread-tracker that keeps additional
@@ -550,9 +716,17 @@ class BASE_EXPORT GlobalActivityTracker {
// The number of thread trackers currently active.
std::atomic<int> thread_tracker_count_;
- // A cache of thread-tracker memories that have been previously freed and
- // thus can be re-used instead of allocating new ones.
- ThreadSafeStack<PersistentMemoryAllocator::Reference> available_memories_;
+ // A caching memory allocator for thread-tracker objects.
+ ActivityTrackerMemoryAllocator thread_tracker_allocator_;
+ base::Lock thread_tracker_allocator_lock_;
+
+ // A caching memory allocator for user data attached to activity data.
+ ActivityTrackerMemoryAllocator user_data_allocator_;
+ base::Lock user_data_allocator_lock_;
+
+ // An object for holding global arbitrary key value pairs. Values must always
+ // be written from the main UI thread.
+ ActivityUserData user_data_;
// The active global activity tracker.
static GlobalActivityTracker* g_tracker_;
@@ -577,17 +751,16 @@ class BASE_EXPORT ScopedActivity
// echo -n "MayNeverExit" | sha1sum => e44873ccab21e2b71270da24aa1...
//
// void MayNeverExit(int32_t foo) {
- // base::debug::ScopedActivity track_me(FROM_HERE, 0, 0xE44873CC, foo);
+ // base::debug::ScopedActivity track_me(0, 0xE44873CC, foo);
// ...
// }
- ScopedActivity(const tracked_objects::Location& location,
- uint8_t action,
- uint32_t id,
- int32_t info);
-
- // Because this is inline, the FROM_HERE macro will resolve the current
- // program-counter as the location in the calling code.
- ScopedActivity() : ScopedActivity(FROM_HERE, 0, 0, 0) {}
+ ALWAYS_INLINE
+ ScopedActivity(uint8_t action, uint32_t id, int32_t info)
+ : ScopedActivity(::tracked_objects::GetProgramCounter(),
+ action,
+ id,
+ info) {}
+ ScopedActivity() : ScopedActivity(0, 0, 0) {}
// Changes the |action| and/or |info| of this activity on the stack. This
// is useful for tracking progress through a function, updating the action
@@ -599,6 +772,12 @@ class BASE_EXPORT ScopedActivity
void ChangeActionAndInfo(uint8_t action, int32_t info);
private:
+ // Constructs the object using a passed-in program-counter.
+ ScopedActivity(const void* program_counter,
+ uint8_t action,
+ uint32_t id,
+ int32_t info);
+
// A copy of the ID code so it doesn't have to be passed by the caller when
// changing the |info| field.
uint32_t id_;
@@ -612,32 +791,56 @@ class BASE_EXPORT ScopedActivity
class BASE_EXPORT ScopedTaskRunActivity
: public GlobalActivityTracker::ScopedThreadActivity {
public:
- explicit ScopedTaskRunActivity(const base::PendingTask& task);
+ ALWAYS_INLINE
+ explicit ScopedTaskRunActivity(const base::PendingTask& task)
+ : ScopedTaskRunActivity(::tracked_objects::GetProgramCounter(),
+ task) {}
+
private:
+ ScopedTaskRunActivity(const void* program_counter,
+ const base::PendingTask& task);
DISALLOW_COPY_AND_ASSIGN(ScopedTaskRunActivity);
};
class BASE_EXPORT ScopedLockAcquireActivity
: public GlobalActivityTracker::ScopedThreadActivity {
public:
- explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock);
+ ALWAYS_INLINE
+ explicit ScopedLockAcquireActivity(const base::internal::LockImpl* lock)
+ : ScopedLockAcquireActivity(::tracked_objects::GetProgramCounter(),
+ lock) {}
+
private:
+ ScopedLockAcquireActivity(const void* program_counter,
+ const base::internal::LockImpl* lock);
DISALLOW_COPY_AND_ASSIGN(ScopedLockAcquireActivity);
};
class BASE_EXPORT ScopedEventWaitActivity
: public GlobalActivityTracker::ScopedThreadActivity {
public:
- explicit ScopedEventWaitActivity(const base::WaitableEvent* event);
+ ALWAYS_INLINE
+ explicit ScopedEventWaitActivity(const base::WaitableEvent* event)
+ : ScopedEventWaitActivity(::tracked_objects::GetProgramCounter(),
+ event) {}
+
private:
+ ScopedEventWaitActivity(const void* program_counter,
+ const base::WaitableEvent* event);
DISALLOW_COPY_AND_ASSIGN(ScopedEventWaitActivity);
};
class BASE_EXPORT ScopedThreadJoinActivity
: public GlobalActivityTracker::ScopedThreadActivity {
public:
- explicit ScopedThreadJoinActivity(const base::PlatformThreadHandle* thread);
+ ALWAYS_INLINE
+ explicit ScopedThreadJoinActivity(const base::PlatformThreadHandle* thread)
+ : ScopedThreadJoinActivity(::tracked_objects::GetProgramCounter(),
+ thread) {}
+
private:
+ ScopedThreadJoinActivity(const void* program_counter,
+ const base::PlatformThreadHandle* thread);
DISALLOW_COPY_AND_ASSIGN(ScopedThreadJoinActivity);
};
@@ -646,8 +849,14 @@ class BASE_EXPORT ScopedThreadJoinActivity
class BASE_EXPORT ScopedProcessWaitActivity
: public GlobalActivityTracker::ScopedThreadActivity {
public:
- explicit ScopedProcessWaitActivity(const base::Process* process);
+ ALWAYS_INLINE
+ explicit ScopedProcessWaitActivity(const base::Process* process)
+ : ScopedProcessWaitActivity(::tracked_objects::GetProgramCounter(),
+ process) {}
+
private:
+ ScopedProcessWaitActivity(const void* program_counter,
+ const base::Process* process);
DISALLOW_COPY_AND_ASSIGN(ScopedProcessWaitActivity);
};
#endif
diff --git a/chromium/base/debug/activity_tracker_unittest.cc b/chromium/base/debug/activity_tracker_unittest.cc
index f422e50fe26..6e7630e0628 100644
--- a/chromium/base/debug/activity_tracker_unittest.cc
+++ b/chromium/base/debug/activity_tracker_unittest.cc
@@ -44,9 +44,11 @@ class TestActivityTracker : public ThreadActivityTracker {
class ActivityTrackerTest : public testing::Test {
public:
- const int kMemorySize = 1 << 10; // 1MiB
+ const int kMemorySize = 1 << 20; // 1MiB
const int kStackSize = 1 << 10; // 1KiB
+ using ActivityId = ThreadActivityTracker::ActivityId;
+
ActivityTrackerTest() {}
~ActivityTrackerTest() override {
@@ -74,12 +76,48 @@ class ActivityTrackerTest : public testing::Test {
GlobalActivityTracker* global_tracker = GlobalActivityTracker::Get();
if (!global_tracker)
return 0;
- return global_tracker->available_memories_.used();
+ base::AutoLock autolock(global_tracker->thread_tracker_allocator_lock_);
+ return global_tracker->thread_tracker_allocator_.cache_used();
+ }
+
+ size_t GetGlobalUserDataMemoryCacheUsed() {
+ return GlobalActivityTracker::Get()->user_data_allocator_.cache_used();
}
static void DoNothing() {}
};
+TEST_F(ActivityTrackerTest, UserDataTest) {
+ char buffer[256];
+ memset(buffer, 0, sizeof(buffer));
+ ActivityUserData data(buffer, sizeof(buffer));
+ ASSERT_EQ(sizeof(buffer), data.available_);
+
+ data.SetInt("foo", 1);
+ ASSERT_EQ(sizeof(buffer) - 24, data.available_);
+
+ data.SetUint("b", 1U); // Small names fit beside header in a word.
+ ASSERT_EQ(sizeof(buffer) - 24 - 16, data.available_);
+
+ data.Set("c", buffer, 10);
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24, data.available_);
+
+ data.SetString("dear john", "it's been fun");
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+
+ data.Set("c", buffer, 20);
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetString("dear john", "but we're done together");
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetString("dear john", "bye");
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+
+ data.SetChar("d", 'x');
+ ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32 - 16, data.available_);
+}
+
TEST_F(ActivityTrackerTest, PushPopTest) {
std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
ActivitySnapshot snapshot;
@@ -89,8 +127,8 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
ASSERT_EQ(0U, snapshot.activity_stack.size());
char origin1;
- tracker->PushActivity(&origin1, Activity::ACT_TASK,
- ActivityData::ForTask(11));
+ ActivityId id1 = tracker->PushActivity(&origin1, Activity::ACT_TASK,
+ ActivityData::ForTask(11));
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
@@ -102,8 +140,8 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
char origin2;
char lock2;
- tracker->PushActivity(&origin2, Activity::ACT_LOCK,
- ActivityData::ForLock(&lock2));
+ ActivityId id2 = tracker->PushActivity(&origin2, Activity::ACT_LOCK,
+ ActivityData::ForLock(&lock2));
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(2U, snapshot.activity_stack_depth);
ASSERT_EQ(2U, snapshot.activity_stack.size());
@@ -115,7 +153,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
EXPECT_EQ(reinterpret_cast<uintptr_t>(&lock2),
snapshot.activity_stack[1].data.lock.lock_address);
- tracker->PopActivity();
+ tracker->PopActivity(id2);
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
@@ -124,7 +162,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
snapshot.activity_stack[0].origin_address);
EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
- tracker->PopActivity();
+ tracker->PopActivity(id1);
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
@@ -136,6 +174,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ThreadActivityTracker* tracker =
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
ActivitySnapshot snapshot;
+ ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
@@ -144,6 +183,8 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
{
PendingTask task1(FROM_HERE, base::Bind(&DoNothing));
ScopedTaskRunActivity activity1(task1);
+ ActivityUserData& user_data1 = activity1.user_data();
+ (void)user_data1; // Tell compiler it's been used.
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
@@ -153,6 +194,8 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
{
PendingTask task2(FROM_HERE, base::Bind(&DoNothing));
ScopedTaskRunActivity activity2(task2);
+ ActivityUserData& user_data2 = activity2.user_data();
+ (void)user_data2; // Tell compiler it's been used.
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(2U, snapshot.activity_stack_depth);
@@ -169,6 +212,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ASSERT_TRUE(tracker->Snapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
+ ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
}
TEST_F(ActivityTrackerTest, CreateWithFileTest) {
@@ -214,9 +258,10 @@ class SimpleActivityThread : public SimpleThread {
~SimpleActivityThread() override {}
void Run() override {
- GlobalActivityTracker::Get()
- ->GetOrCreateTrackerForCurrentThread()
- ->PushActivity(origin_, activity_, data_);
+ ThreadActivityTracker::ActivityId id =
+ GlobalActivityTracker::Get()
+ ->GetOrCreateTrackerForCurrentThread()
+ ->PushActivity(origin_, activity_, data_);
{
AutoLock auto_lock(lock_);
@@ -225,9 +270,7 @@ class SimpleActivityThread : public SimpleThread {
exit_condition_.Wait();
}
- GlobalActivityTracker::Get()
- ->GetOrCreateTrackerForCurrentThread()
- ->PopActivity();
+ GlobalActivityTracker::Get()->GetTrackerForCurrentThread()->PopActivity(id);
}
void Exit() {
diff --git a/chromium/base/debug/crash_logging.cc b/chromium/base/debug/crash_logging.cc
index 8f121a67832..1df2d0a0c47 100644
--- a/chromium/base/debug/crash_logging.cc
+++ b/chromium/base/debug/crash_logging.cc
@@ -5,7 +5,7 @@
#include "base/debug/crash_logging.h"
#include <cmath>
-#include <map>
+#include <unordered_map>
#include "base/debug/stack_trace.h"
#include "base/format_macros.h"
@@ -22,7 +22,8 @@ namespace debug {
namespace {
// Global map of crash key names to registration entries.
-typedef std::map<base::StringPiece, CrashKey> CrashKeyMap;
+typedef std::unordered_map<base::StringPiece, CrashKey, base::StringPieceHash>
+ CrashKeyMap;
CrashKeyMap* g_crash_keys_ = NULL;
// The maximum length of a single chunk.
diff --git a/chromium/base/debug/debugger_posix.cc b/chromium/base/debug/debugger_posix.cc
index e92d5a5619a..3255552333a 100644
--- a/chromium/base/debug/debugger_posix.cc
+++ b/chromium/base/debug/debugger_posix.cc
@@ -18,6 +18,8 @@
#include <vector>
#include "base/macros.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#if defined(__GLIBCXX__)
diff --git a/chromium/base/debug/gdi_debug_util_win.cc b/chromium/base/debug/gdi_debug_util_win.cc
index ce339559ead..bf9827c5757 100644
--- a/chromium/base/debug/gdi_debug_util_win.cc
+++ b/chromium/base/debug/gdi_debug_util_win.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "base/debug/gdi_debug_util_win.h"
+#include <algorithm>
#include <cmath>
#include <psapi.h>
@@ -12,6 +13,7 @@
#include "base/debug/alias.h"
#include "base/logging.h"
#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
namespace {
@@ -19,6 +21,15 @@ void CollectChildGDIUsageAndDie(DWORD parent_pid) {
HANDLE snapshot = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0);
CHECK_NE(INVALID_HANDLE_VALUE, snapshot);
+ int total_process_count = 0;
+ base::debug::Alias(&total_process_count);
+ int total_peak_gdi_count = 0;
+ base::debug::Alias(&total_peak_gdi_count);
+ int total_gdi_count = 0;
+ base::debug::Alias(&total_gdi_count);
+ int total_user_count = 0;
+ base::debug::Alias(&total_user_count);
+
int child_count = 0;
base::debug::Alias(&child_count);
int peak_gdi_count = 0;
@@ -33,25 +44,30 @@ void CollectChildGDIUsageAndDie(DWORD parent_pid) {
CHECK(Process32First(snapshot, &proc_entry));
do {
- if (parent_pid != proc_entry.th32ParentProcessID)
- continue;
- // Got a child process. Compute GDI usage.
base::win::ScopedHandle process(
OpenProcess(PROCESS_QUERY_INFORMATION,
FALSE,
- proc_entry.th32ParentProcessID));
+ proc_entry.th32ProcessID));
if (!process.IsValid())
continue;
int num_gdi_handles = GetGuiResources(process.Get(), GR_GDIOBJECTS);
int num_user_handles = GetGuiResources(process.Get(), GR_USEROBJECTS);
- // Compute sum and peak counts.
+ // Compute sum and peak counts for all processes.
+ ++total_process_count;
+ total_user_count += num_user_handles;
+ total_gdi_count += num_gdi_handles;
+ total_peak_gdi_count = std::max(total_peak_gdi_count, num_gdi_handles);
+
+ if (parent_pid != proc_entry.th32ParentProcessID)
+ continue;
+
+ // Compute sum and peak counts for child processes.
++child_count;
sum_user_count += num_user_handles;
sum_gdi_count += num_gdi_handles;
- if (peak_gdi_count < num_gdi_handles)
- peak_gdi_count = num_gdi_handles;
+ peak_gdi_count = std::max(peak_gdi_count, num_gdi_handles);
} while (Process32Next(snapshot, &proc_entry));
@@ -64,16 +80,18 @@ void CollectChildGDIUsageAndDie(DWORD parent_pid) {
namespace base {
namespace debug {
-void GDIBitmapAllocFailure(BITMAPINFOHEADER* header, HANDLE shared_section) {
+void CollectGDIUsageAndDie(BITMAPINFOHEADER* header, HANDLE shared_section) {
// Make sure parameters are saved in the minidump.
DWORD last_error = GetLastError();
+ bool is_gdi_available = base::win::IsUser32AndGdi32Available();
- LONG width = header->biWidth;
- LONG heigth = header->biHeight;
+ LONG width = header ? header->biWidth : 0;
+ LONG height = header ? header->biHeight : 0;
base::debug::Alias(&last_error);
+ base::debug::Alias(&is_gdi_available);
base::debug::Alias(&width);
- base::debug::Alias(&heigth);
+ base::debug::Alias(&height);
base::debug::Alias(&shared_section);
DWORD num_user_handles = GetGuiResources(GetCurrentProcess(), GR_USEROBJECTS);
@@ -100,19 +118,19 @@ void GDIBitmapAllocFailure(BITMAPINFOHEADER* header, HANDLE shared_section) {
CHECK_LE(pmc.PagefileUsage, kLotsOfMemory);
CHECK_LE(pmc.PrivateUsage, kLotsOfMemory);
- void* small_data = NULL;
+ void* small_data = nullptr;
base::debug::Alias(&small_data);
- if (std::abs(heigth) * width > 100) {
+ if (std::abs(height) * width > 100) {
// Huh, that's weird. We don't have crazy handle count, we don't have
// ridiculous memory usage. Try to allocate a small bitmap and see if that
// fails too.
header->biWidth = 5;
header->biHeight = -5;
HBITMAP small_bitmap = CreateDIBSection(
- NULL, reinterpret_cast<BITMAPINFO*>(&header),
+ nullptr, reinterpret_cast<BITMAPINFO*>(&header),
0, &small_data, shared_section, 0);
- CHECK(small_bitmap != NULL);
+ CHECK(small_bitmap != nullptr);
DeleteObject(small_bitmap);
}
// Maybe the child processes are the ones leaking GDI or USER resouces.
diff --git a/chromium/base/debug/gdi_debug_util_win.h b/chromium/base/debug/gdi_debug_util_win.h
index 5887ecb8464..3383a4d522d 100644
--- a/chromium/base/debug/gdi_debug_util_win.h
+++ b/chromium/base/debug/gdi_debug_util_win.h
@@ -12,10 +12,12 @@
namespace base {
namespace debug {
-// Crashes the process leaving valuable information on the dump via
-// debug::alias so we can find what is causing the allocation failures.
-void BASE_EXPORT GDIBitmapAllocFailure(BITMAPINFOHEADER* header,
- HANDLE shared_section);
+// Crashes the process, using base::debug::Alias to leave valuable debugging
+// information in the crash dump. Pass values for |header| and |shared_section|
+// in the event of a bitmap allocation failure, to gather information about
+// those as well.
+void BASE_EXPORT CollectGDIUsageAndDie(BITMAPINFOHEADER* header = nullptr,
+ HANDLE shared_section = nullptr);
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/scoped_thread_heap_usage.h b/chromium/base/debug/scoped_thread_heap_usage.h
deleted file mode 100644
index a843fc93d0c..00000000000
--- a/chromium/base/debug/scoped_thread_heap_usage.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_DEBUG_SCOPED_THREAD_HEAP_USAGE_H_
-#define BASE_DEBUG_SCOPED_THREAD_HEAP_USAGE_H_
-
-#include <stdint.h>
-
-#include "base/allocator/features.h"
-#include "base/base_export.h"
-#include "base/threading/thread_checker.h"
-
-namespace base {
-namespace allocator {
-struct AllocatorDispatch;
-} // namespace allocator
-
-namespace debug {
-
-// By keeping a tally on heap operations, it's possible to track:
-// - the number of alloc/free operations, where a realloc is zero or one
-// of each, depending on the input parameters (see man realloc).
-// - the number of bytes allocated/freed.
-// - the number of estimated bytes of heap overhead used.
-// - the high-watermark amount of bytes allocated in the scope.
-// This in turn allows measuring the memory usage and memory usage churn over
-// a scope. Scopes must be cleanly nested, and each scope must be
-// destroyed on the thread where it's created.
-//
-// Note that this depends on the capabilities of the underlying heap shim. If
-// that shim can not yield a size estimate for an allocation, it's not possible
-// to keep track of overhead, freed bytes and the allocation high water mark.
-class BASE_EXPORT ScopedThreadHeapUsage {
- public:
- struct ThreadAllocatorUsage {
- // The cumulative number of allocation operations.
- uint64_t alloc_ops;
-
- // The cumulative number of allocated bytes. Where available, this is
- // inclusive heap padding and estimated or actual heap overhead.
- uint64_t alloc_bytes;
-
- // Where available, cumulative number of heap padding heap
- // and overhead bytes.
- uint64_t alloc_overhead_bytes;
-
- // The cumulative number of free operations.
- uint64_t free_ops;
-
- // The cumulative number of bytes freed.
- // Only recorded if the underlying heap shim can return the size of an
- // allocation.
- uint64_t free_bytes;
-
- // The maximal value of alloc_bytes - free_bytes seen for this thread.
- // Only recorded if the underlying heap shim supports returning the size of
- // an allocation.
- uint64_t max_allocated_bytes;
- };
-
- ScopedThreadHeapUsage();
- ~ScopedThreadHeapUsage();
-
- const ThreadAllocatorUsage& usage_at_creation() const {
- return usage_at_creation_;
- }
-
- // Returns this thread's allocator usage from the creation of the innermost
- // enclosing ScopedThreadHeapUsage instance, if any. Note that this is
- // inclusive allocator usage in all inner scopes.
- static ThreadAllocatorUsage CurrentUsage();
-
- // Initializes the TLS machinery this class uses. Must be called before
- // creating instances of this class.
- static void Initialize();
-
- // Enables the heap intercept. May only be called once, and only if the heap
- // shim is available, e.g. if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) is
- // true.
- static void EnableHeapTracking();
-
- protected:
- // Exposed for testing only - note that it's safe to re-EnableHeapTracking()
- // after calling this function in tests.
- static void DisableHeapTrackingForTesting();
-
- // Exposed to allow testing the shim without inserting it in the allocator
- // shim chain.
- static base::allocator::AllocatorDispatch* GetDispatchForTesting();
-
- private:
- static void EnsureTLSInitialized();
-
- ThreadChecker thread_checker_;
- // The allocator usage captured at creation of this instance.
- ThreadAllocatorUsage usage_at_creation_;
-};
-
-} // namespace debug
-} // namespace base
-
-#endif // BASE_DEBUG_SCOPED_THREAD_HEAP_USAGE_H_ \ No newline at end of file
diff --git a/chromium/base/debug/stack_trace.cc b/chromium/base/debug/stack_trace.cc
index f3ffef360f0..fb2f6b508f2 100644
--- a/chromium/base/debug/stack_trace.cc
+++ b/chromium/base/debug/stack_trace.cc
@@ -9,6 +9,7 @@
#include <algorithm>
#include <sstream>
+#include "base/logging.h"
#include "base/macros.h"
#if HAVE_TRACE_STACK_FRAME_POINTERS
@@ -176,6 +177,17 @@ uintptr_t ScanStackForNextFrame(uintptr_t fp, uintptr_t stack_end) {
return 0;
}
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Returns previous stack frame |fp| was linked to.
+void* LinkStackFrames(void* fpp, void* parent_fp) {
+ uintptr_t fp = reinterpret_cast<uintptr_t>(fpp) - kStackFrameAdjustment;
+ void* prev_parent_fp = reinterpret_cast<void**>(fp)[0];
+ reinterpret_cast<void**>(fp)[0] = parent_fp;
+ return prev_parent_fp;
+}
+
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
} // namespace
@@ -245,6 +257,17 @@ size_t TraceStackFramePointers(const void** out_trace,
return depth;
}
+ScopedStackFrameLinker::ScopedStackFrameLinker(void* fp, void* parent_fp)
+ : fp_(fp),
+ parent_fp_(parent_fp),
+ original_parent_fp_(LinkStackFrames(fp, parent_fp)) {}
+
+ScopedStackFrameLinker::~ScopedStackFrameLinker() {
+ void* previous_parent_fp = LinkStackFrames(fp_, original_parent_fp_);
+ CHECK_EQ(parent_fp_, previous_parent_fp)
+ << "Stack frame's parent pointer has changed!";
+}
+
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
} // namespace debug
diff --git a/chromium/base/debug/stack_trace.h b/chromium/base/debug/stack_trace.h
index 23e7b5164b6..d4918d60654 100644
--- a/chromium/base/debug/stack_trace.h
+++ b/chromium/base/debug/stack_trace.h
@@ -11,6 +11,7 @@
#include <string>
#include "base/base_export.h"
+#include "base/macros.h"
#include "build/build_config.h"
#if defined(OS_POSIX)
@@ -113,6 +114,57 @@ class BASE_EXPORT StackTrace {
BASE_EXPORT size_t TraceStackFramePointers(const void** out_trace,
size_t max_depth,
size_t skip_initial);
+
+// Links stack frame |fp| to |parent_fp|, so that during stack unwinding
+// TraceStackFramePointers() visits |parent_fp| after visiting |fp|.
+// Both frame pointers must come from __builtin_frame_address().
+// Destructor restores original linkage of |fp| to avoid corrupting caller's
+// frame register on return.
+//
+// This class can be used to repair broken stack frame chain in cases
+// when execution flow goes into code built without frame pointers:
+//
+// void DoWork() {
+// Call_SomeLibrary();
+// }
+// static __thread void* g_saved_fp;
+// void Call_SomeLibrary() {
+// g_saved_fp = __builtin_frame_address(0);
+// some_library_call(...); // indirectly calls SomeLibrary_Callback()
+// }
+// void SomeLibrary_Callback() {
+// ScopedStackFrameLinker linker(__builtin_frame_address(0), g_saved_fp);
+// ...
+// TraceStackFramePointers(...);
+// }
+//
+// This produces the following trace:
+//
+// #0 SomeLibrary_Callback()
+// #1 <address of the code inside SomeLibrary that called #0>
+// #2 DoWork()
+// ...rest of the trace...
+//
+// SomeLibrary doesn't use frame pointers, so when SomeLibrary_Callback()
+// is called, stack frame register contains bogus value that becomes callback'
+// parent frame address. Without ScopedStackFrameLinker unwinding would've
+// stopped at that bogus frame address yielding just two first frames (#0, #1).
+// ScopedStackFrameLinker overwrites callback's parent frame address with
+// Call_SomeLibrary's frame, so unwinder produces full trace without even
+// noticing that stack frame chain was broken.
+class BASE_EXPORT ScopedStackFrameLinker {
+ public:
+ ScopedStackFrameLinker(void* fp, void* parent_fp);
+ ~ScopedStackFrameLinker();
+
+ private:
+ void* fp_;
+ void* parent_fp_;
+ void* original_parent_fp_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedStackFrameLinker);
+};
+
#endif // HAVE_TRACE_STACK_FRAME_POINTERS
namespace internal {
diff --git a/chromium/base/debug/task_annotator.cc b/chromium/base/debug/task_annotator.cc
index 2747d63c2bf..437d69a7f8b 100644
--- a/chromium/base/debug/task_annotator.cc
+++ b/chromium/base/debug/task_annotator.cc
@@ -28,34 +28,32 @@ void TaskAnnotator::DidQueueTask(const char* queue_function,
}
void TaskAnnotator::RunTask(const char* queue_function,
- const PendingTask& pending_task) {
- ScopedTaskRunActivity task_activity(pending_task);
+ PendingTask* pending_task) {
+ ScopedTaskRunActivity task_activity(*pending_task);
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
tracked_objects::Duration queue_duration =
- stopwatch.StartTime() - pending_task.EffectiveTimePosted();
+ stopwatch.StartTime() - pending_task->EffectiveTimePosted();
- TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- queue_function,
- TRACE_ID_MANGLE(GetTaskTraceID(pending_task)),
- TRACE_EVENT_FLAG_FLOW_IN,
- "queue_duration",
- queue_duration.InMilliseconds());
+ TRACE_EVENT_WITH_FLOW1(
+ TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), queue_function,
+ TRACE_ID_MANGLE(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN,
+ "queue_duration", queue_duration.InMilliseconds());
// Before running the task, store the program counter where it was posted
// and deliberately alias it to ensure it is on the stack if the task
// crashes. Be careful not to assume that the variable itself will have the
// expected value when displayed by the optimizer in an optimized build.
// Look at a memory dump of the stack.
- const void* program_counter = pending_task.posted_from.program_counter();
+ const void* program_counter = pending_task->posted_from.program_counter();
debug::Alias(&program_counter);
- pending_task.task.Run();
+ std::move(pending_task->task).Run();
stopwatch.Stop();
- tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(
- pending_task, stopwatch);
+ tracked_objects::ThreadData::TallyRunOnNamedThreadIfTracking(*pending_task,
+ stopwatch);
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
diff --git a/chromium/base/debug/task_annotator.h b/chromium/base/debug/task_annotator.h
index 2687c5c930e..34115d8f3de 100644
--- a/chromium/base/debug/task_annotator.h
+++ b/chromium/base/debug/task_annotator.h
@@ -28,7 +28,7 @@ class BASE_EXPORT TaskAnnotator {
// Run a previously queued task. |queue_function| should match what was
// passed into |DidQueueTask| for this task.
- void RunTask(const char* queue_function, const PendingTask& pending_task);
+ void RunTask(const char* queue_function, PendingTask* pending_task);
private:
// Creates a process-wide unique ID to represent this task in trace events.
diff --git a/chromium/base/debug/task_annotator_unittest.cc b/chromium/base/debug/task_annotator_unittest.cc
index 9f5c442327e..8a1c8bdc872 100644
--- a/chromium/base/debug/task_annotator_unittest.cc
+++ b/chromium/base/debug/task_annotator_unittest.cc
@@ -24,7 +24,7 @@ TEST(TaskAnnotatorTest, QueueAndRunTask) {
TaskAnnotator annotator;
annotator.DidQueueTask("TaskAnnotatorTest::Queue", pending_task);
EXPECT_EQ(0, result);
- annotator.RunTask("TaskAnnotatorTest::Queue", pending_task);
+ annotator.RunTask("TaskAnnotatorTest::Queue", &pending_task);
EXPECT_EQ(123, result);
}
diff --git a/chromium/base/debug/scoped_thread_heap_usage.cc b/chromium/base/debug/thread_heap_usage_tracker.cc
index 2f5ed8c267e..b9018e0c705 100644
--- a/chromium/base/debug/scoped_thread_heap_usage.cc
+++ b/chromium/base/debug/thread_heap_usage_tracker.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/debug/scoped_thread_heap_usage.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include <stdint.h>
#include <algorithm>
@@ -29,14 +29,14 @@ using base::allocator::AllocatorDispatch;
ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER;
-ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel =
- reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1);
+ThreadHeapUsage* const kInitializingSentinel =
+ reinterpret_cast<ThreadHeapUsage*>(-1);
bool g_heap_tracking_enabled = false;
// Forward declared as it needs to delegate memory allocation to the next
// lower shim.
-ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage();
+ThreadHeapUsage* GetOrCreateThreadUsage();
size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) {
if (ptr == nullptr)
@@ -46,28 +46,32 @@ size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) {
}
void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) {
- ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
+ ThreadHeapUsage* usage = GetOrCreateThreadUsage();
if (usage == nullptr)
return;
usage->alloc_ops++;
size_t estimate = GetAllocSizeEstimate(next, ptr);
if (size && estimate) {
+ // Only keep track of the net number of bytes allocated in the scope if the
+ // size estimate function returns sane values, e.g. non-zero.
usage->alloc_bytes += estimate;
usage->alloc_overhead_bytes += estimate - size;
- // Only keep track of the net number of bytes allocated in the scope if the
- // size estimate function returns sane values, e.g. non-zero.
- uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
- if (allocated_bytes > usage->max_allocated_bytes)
- usage->max_allocated_bytes = allocated_bytes;
+ // Record the max outstanding number of bytes, but only if the difference
+ // is net positive (e.g. more bytes allocated than freed in the scope).
+ if (usage->alloc_bytes > usage->free_bytes) {
+ uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
+ if (allocated_bytes > usage->max_allocated_bytes)
+ usage->max_allocated_bytes = allocated_bytes;
+ }
} else {
usage->alloc_bytes += size;
}
}
void RecordFree(const AllocatorDispatch* next, void* ptr) {
- ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
+ ThreadHeapUsage* usage = GetOrCreateThreadUsage();
if (usage == nullptr)
return;
@@ -130,10 +134,9 @@ AllocatorDispatch allocator_dispatch = {
&AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn,
&FreeFn, &GetSizeEstimateFn, nullptr};
-ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() {
- ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage =
- static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(
- g_thread_allocator_usage.Get());
+ThreadHeapUsage* GetOrCreateThreadUsage() {
+ ThreadHeapUsage* allocator_usage =
+ static_cast<ThreadHeapUsage*>(g_thread_allocator_usage.Get());
if (allocator_usage == kInitializingSentinel)
return nullptr; // Re-entrancy case.
@@ -141,7 +144,7 @@ ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() {
// Prevent reentrancy due to the allocation below.
g_thread_allocator_usage.Set(kInitializingSentinel);
- allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage;
+ allocator_usage = new ThreadHeapUsage;
memset(allocator_usage, 0, sizeof(*allocator_usage));
g_thread_allocator_usage.Set(allocator_usage);
}
@@ -151,61 +154,75 @@ ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() {
} // namespace
-ScopedThreadHeapUsage::ScopedThreadHeapUsage() {
- // Initialize must be called before creating instances of this class.
- CHECK(g_thread_allocator_usage.initialized());
+ThreadHeapUsageTracker::ThreadHeapUsageTracker() : thread_usage_(nullptr) {
+ static_assert(std::is_pod<ThreadHeapUsage>::value, "Must be POD.");
+}
- ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
- usage_at_creation_ = *usage;
+ThreadHeapUsageTracker::~ThreadHeapUsageTracker() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+
+ if (thread_usage_ != nullptr) {
+ // If this tracker wasn't stopped, make it inclusive so that the
+ // usage isn't lost.
+ Stop(false);
+ }
+}
+
+void ThreadHeapUsageTracker::Start() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(g_thread_allocator_usage.initialized());
+
+ thread_usage_ = GetOrCreateThreadUsage();
+ usage_ = *thread_usage_;
// Reset the stats for our current scope.
// The per-thread usage instance now tracks this scope's usage, while this
// instance persists the outer scope's usage stats. On destruction, this
- // instance will restore the outer scope's usage stats with this scope's usage
- // added.
- memset(usage, 0, sizeof(*usage));
-
- static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD.");
+ // instance will restore the outer scope's usage stats with this scope's
+ // usage added.
+ memset(thread_usage_, 0, sizeof(*thread_usage_));
}
-ScopedThreadHeapUsage::~ScopedThreadHeapUsage() {
+void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) {
DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK_NE(nullptr, thread_usage_);
- ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
-
- // Update the outer max.
- if (usage->max_allocated_bytes) {
- uint64_t outer_net_alloc_bytes =
- usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes;
-
- usage->max_allocated_bytes =
- std::max(usage_at_creation_.max_allocated_bytes,
- outer_net_alloc_bytes + usage->max_allocated_bytes);
+ ThreadHeapUsage current = *thread_usage_;
+ if (usage_is_exclusive) {
+ // Restore the outer scope.
+ *thread_usage_ = usage_;
+ } else {
+ // Update the outer scope with the accrued inner usage.
+ if (thread_usage_->max_allocated_bytes) {
+ uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes;
+
+ thread_usage_->max_allocated_bytes =
+ std::max(usage_.max_allocated_bytes,
+ outer_net_alloc_bytes + thread_usage_->max_allocated_bytes);
+ }
+
+ thread_usage_->alloc_ops += usage_.alloc_ops;
+ thread_usage_->alloc_bytes += usage_.alloc_bytes;
+ thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes;
+ thread_usage_->free_ops += usage_.free_ops;
+ thread_usage_->free_bytes += usage_.free_bytes;
}
- usage->alloc_ops += usage_at_creation_.alloc_ops;
- usage->alloc_bytes += usage_at_creation_.alloc_bytes;
- usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes;
- usage->free_ops += usage_at_creation_.free_ops;
- usage->free_bytes += usage_at_creation_.free_bytes;
+ thread_usage_ = nullptr;
+ usage_ = current;
}
-ScopedThreadHeapUsage::ThreadAllocatorUsage
-ScopedThreadHeapUsage::CurrentUsage() {
- ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
+ThreadHeapUsage ThreadHeapUsageTracker::GetUsageSnapshot() {
+ DCHECK(g_thread_allocator_usage.initialized());
+
+ ThreadHeapUsage* usage = GetOrCreateThreadUsage();
+ DCHECK_NE(nullptr, usage);
return *usage;
}
-void ScopedThreadHeapUsage::Initialize() {
- if (!g_thread_allocator_usage.initialized()) {
- g_thread_allocator_usage.Initialize([](void* allocator_usage) {
- delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(
- allocator_usage);
- });
- }
-}
+void ThreadHeapUsageTracker::EnableHeapTracking() {
+ EnsureTLSInitialized();
-void ScopedThreadHeapUsage::EnableHeapTracking() {
CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
g_heap_tracking_enabled = true;
#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
@@ -215,7 +232,11 @@ void ScopedThreadHeapUsage::EnableHeapTracking() {
#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
}
-void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() {
+bool ThreadHeapUsageTracker::IsHeapTrackingEnabled() {
+ return g_heap_tracking_enabled;
+}
+
+void ThreadHeapUsageTracker::DisableHeapTrackingForTesting() {
#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
#else
@@ -226,9 +247,17 @@ void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() {
}
base::allocator::AllocatorDispatch*
-ScopedThreadHeapUsage::GetDispatchForTesting() {
+ThreadHeapUsageTracker::GetDispatchForTesting() {
return &allocator_dispatch;
}
+void ThreadHeapUsageTracker::EnsureTLSInitialized() {
+ if (!g_thread_allocator_usage.initialized()) {
+ g_thread_allocator_usage.Initialize([](void* allocator_usage) {
+ delete static_cast<ThreadHeapUsage*>(allocator_usage);
+ });
+ }
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/thread_heap_usage_tracker.h b/chromium/base/debug/thread_heap_usage_tracker.h
new file mode 100644
index 00000000000..508a0a3973c
--- /dev/null
+++ b/chromium/base/debug/thread_heap_usage_tracker.h
@@ -0,0 +1,117 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+#define BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_
+
+#include <stdint.h>
+
+#include "base/allocator/features.h"
+#include "base/base_export.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace allocator {
+struct AllocatorDispatch;
+} // namespace allocator
+
+namespace debug {
+
+// Used to store the heap allocator usage in a scope.
+struct ThreadHeapUsage {
+ // The cumulative number of allocation operations.
+ uint64_t alloc_ops;
+
+ // The cumulative number of allocated bytes. Where available, this is
+ // inclusive heap padding and estimated or actual heap overhead.
+ uint64_t alloc_bytes;
+
+ // Where available, cumulative number of heap padding and overhead bytes.
+ uint64_t alloc_overhead_bytes;
+
+ // The cumulative number of free operations.
+ uint64_t free_ops;
+
+ // The cumulative number of bytes freed.
+ // Only recorded if the underlying heap shim can return the size of an
+ // allocation.
+ uint64_t free_bytes;
+
+ // The maximal value of |alloc_bytes| - |free_bytes| seen for this thread.
+ // Only recorded if the underlying heap shim supports returning the size of
+ // an allocation.
+ uint64_t max_allocated_bytes;
+};
+
+// By keeping a tally on heap operations, it's possible to track:
+// - the number of alloc/free operations, where a realloc is zero or one
+// of each, depending on the input parameters (see man realloc).
+// - the number of bytes allocated/freed.
+// - the number of estimated bytes of heap overhead used.
+// - the high-watermark amount of bytes allocated in the scope.
+// This in turn allows measuring the memory usage and memory usage churn over
+// a scope. Scopes must be cleanly nested, and each scope must be
+// destroyed on the thread where it's created.
+//
+// Note that this depends on the capabilities of the underlying heap shim. If
+// that shim can not yield a size estimate for an allocation, it's not possible
+// to keep track of overhead, freed bytes and the allocation high water mark.
+class BASE_EXPORT ThreadHeapUsageTracker {
+ public:
+ ThreadHeapUsageTracker();
+ ~ThreadHeapUsageTracker();
+
+ // Start tracking heap usage on this thread.
+ // This may only be called on the thread where the instance is created.
+ // Note IsHeapTrackingEnabled() must be true.
+ void Start();
+
+ // Stop tracking heap usage on this thread and store the usage tallied.
+ // If |usage_is_exclusive| is true, the usage tallied won't be added to the
+ // outer scope's usage. If |usage_is_exclusive| is false, the usage tallied
+ // in this scope will also tally to any outer scope.
+ // This may only be called on the thread where the instance is created.
+ void Stop(bool usage_is_exclusive);
+
+ // After Stop() returns the usage tallied from Start() to Stop().
+ const ThreadHeapUsage& usage() const { return usage_; }
+
+ // Returns this thread's heap usage from the start of the innermost
+ // enclosing ThreadHeapUsageTracker instance, if any.
+ static ThreadHeapUsage GetUsageSnapshot();
+
+ // Enables the heap intercept. May only be called once, and only if the heap
+ // shim is available, e.g. if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) is
+ // true.
+ static void EnableHeapTracking();
+
+ // Returns true iff heap tracking is enabled.
+ static bool IsHeapTrackingEnabled();
+
+ protected:
+ // Exposed for testing only - note that it's safe to re-EnableHeapTracking()
+ // after calling this function in tests.
+ static void DisableHeapTrackingForTesting();
+
+ // Exposed for testing only.
+ static void EnsureTLSInitialized();
+
+ // Exposed to allow testing the shim without inserting it in the allocator
+ // shim chain.
+ static base::allocator::AllocatorDispatch* GetDispatchForTesting();
+
+ private:
+ ThreadChecker thread_checker_;
+
+ // The heap usage at Start(), or the difference from Start() to Stop().
+ ThreadHeapUsage usage_;
+
+ // This thread's heap usage, non-null from Start() to Stop().
+ ThreadHeapUsage* thread_usage_;
+};
+
+} // namespace debug
+} // namespace base
+
+#endif // BASE_DEBUG_THREAD_HEAP_USAGE_TRACKER_H_ \ No newline at end of file
diff --git a/chromium/base/debug/scoped_thread_heap_usage_unittest.cc b/chromium/base/debug/thread_heap_usage_tracker_unittest.cc
index da66a32b630..6aac4b594a3 100644
--- a/chromium/base/debug/scoped_thread_heap_usage_unittest.cc
+++ b/chromium/base/debug/thread_heap_usage_tracker_unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/debug/scoped_thread_heap_usage.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include <map>
@@ -15,16 +15,18 @@ namespace debug {
namespace {
-class TestingScopedThreadHeapUsage : public ScopedThreadHeapUsage {
+class TestingThreadHeapUsageTracker : public ThreadHeapUsageTracker {
public:
- using ScopedThreadHeapUsage::DisableHeapTrackingForTesting;
- using ScopedThreadHeapUsage::GetDispatchForTesting;
+ using ThreadHeapUsageTracker::DisableHeapTrackingForTesting;
+ using ThreadHeapUsageTracker::EnsureTLSInitialized;
+ using ThreadHeapUsageTracker::GetDispatchForTesting;
};
// A fixture class that allows testing the AllocatorDispatch associated with
-// the ScopedThreadHeapUsage class in isolation against a mocked underlying
+// the ThreadHeapUsageTracker class in isolation against a mocked
+// underlying
// heap implementation.
-class ScopedThreadHeapUsageTest : public testing::Test {
+class ThreadHeapUsageTrackerTest : public testing::Test {
public:
using AllocatorDispatch = base::allocator::AllocatorDispatch;
@@ -35,12 +37,12 @@ class ScopedThreadHeapUsageTest : public testing::Test {
ZERO_SIZE_FUNCTION,
};
- ScopedThreadHeapUsageTest() : size_function_kind_(EXACT_SIZE_FUNCTION) {
+ ThreadHeapUsageTrackerTest() : size_function_kind_(EXACT_SIZE_FUNCTION) {
EXPECT_EQ(nullptr, g_self);
g_self = this;
}
- ~ScopedThreadHeapUsageTest() override {
+ ~ThreadHeapUsageTrackerTest() override {
EXPECT_EQ(this, g_self);
g_self = nullptr;
}
@@ -50,10 +52,10 @@ class ScopedThreadHeapUsageTest : public testing::Test {
}
void SetUp() override {
- ScopedThreadHeapUsage::Initialize();
+ TestingThreadHeapUsageTracker::EnsureTLSInitialized();
dispatch_under_test_ =
- TestingScopedThreadHeapUsage::GetDispatchForTesting();
+ TestingThreadHeapUsageTracker::GetDispatchForTesting();
ASSERT_EQ(nullptr, dispatch_under_test_->next);
dispatch_under_test_->next = &g_mock_dispatch;
@@ -186,35 +188,36 @@ class ScopedThreadHeapUsageTest : public testing::Test {
AllocatorDispatch* dispatch_under_test_;
static base::allocator::AllocatorDispatch g_mock_dispatch;
- static ScopedThreadHeapUsageTest* g_self;
+ static ThreadHeapUsageTrackerTest* g_self;
};
-const size_t ScopedThreadHeapUsageTest::kAllocationPadding = 23;
+const size_t ThreadHeapUsageTrackerTest::kAllocationPadding = 23;
-ScopedThreadHeapUsageTest* ScopedThreadHeapUsageTest::g_self = nullptr;
+ThreadHeapUsageTrackerTest* ThreadHeapUsageTrackerTest::g_self = nullptr;
-base::allocator::AllocatorDispatch ScopedThreadHeapUsageTest::g_mock_dispatch =
+base::allocator::AllocatorDispatch ThreadHeapUsageTrackerTest::g_mock_dispatch =
{
- &ScopedThreadHeapUsageTest::OnAllocFn, // alloc_function
- &ScopedThreadHeapUsageTest::
+ &ThreadHeapUsageTrackerTest::OnAllocFn, // alloc_function
+ &ThreadHeapUsageTrackerTest::
OnAllocZeroInitializedFn, // alloc_zero_initialized_function
- &ScopedThreadHeapUsageTest::OnAllocAlignedFn, // alloc_aligned_function
- &ScopedThreadHeapUsageTest::OnReallocFn, // realloc_function
- &ScopedThreadHeapUsageTest::OnFreeFn, // free_function
- &ScopedThreadHeapUsageTest::
+ &ThreadHeapUsageTrackerTest::
+ OnAllocAlignedFn, // alloc_aligned_function
+ &ThreadHeapUsageTrackerTest::OnReallocFn, // realloc_function
+ &ThreadHeapUsageTrackerTest::OnFreeFn, // free_function
+ &ThreadHeapUsageTrackerTest::
OnGetSizeEstimateFn, // get_size_estimate_function
nullptr, // next
};
} // namespace
-TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithExactSizeFunction) {
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithExactSizeFunction) {
set_size_function_kind(EXACT_SIZE_FUNCTION);
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
- ScopedThreadHeapUsage::ThreadAllocatorUsage u1 =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
EXPECT_EQ(0U, u1.alloc_ops);
EXPECT_EQ(0U, u1.alloc_bytes);
@@ -227,8 +230,8 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithExactSizeFunction) {
void* ptr = MockMalloc(kAllocSize);
MockFree(ptr);
- ScopedThreadHeapUsage::ThreadAllocatorUsage u2 =
- ScopedThreadHeapUsage::CurrentUsage();
+ usage_tracker.Stop(false);
+ ThreadHeapUsage u2 = usage_tracker.usage();
EXPECT_EQ(1U, u2.alloc_ops);
EXPECT_EQ(kAllocSize, u2.alloc_bytes);
@@ -238,13 +241,13 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithExactSizeFunction) {
EXPECT_EQ(kAllocSize, u2.max_allocated_bytes);
}
-TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithPaddingSizeFunction) {
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithPaddingSizeFunction) {
set_size_function_kind(PADDING_SIZE_FUNCTION);
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
- ScopedThreadHeapUsage::ThreadAllocatorUsage u1 =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
EXPECT_EQ(0U, u1.alloc_ops);
EXPECT_EQ(0U, u1.alloc_bytes);
@@ -257,8 +260,8 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithPaddingSizeFunction) {
void* ptr = MockMalloc(kAllocSize);
MockFree(ptr);
- ScopedThreadHeapUsage::ThreadAllocatorUsage u2 =
- ScopedThreadHeapUsage::CurrentUsage();
+ usage_tracker.Stop(false);
+ ThreadHeapUsage u2 = usage_tracker.usage();
EXPECT_EQ(1U, u2.alloc_ops);
EXPECT_EQ(kAllocSize + kAllocationPadding, u2.alloc_bytes);
@@ -268,13 +271,13 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithPaddingSizeFunction) {
EXPECT_EQ(kAllocSize + kAllocationPadding, u2.max_allocated_bytes);
}
-TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithZeroSizeFunction) {
+TEST_F(ThreadHeapUsageTrackerTest, SimpleUsageWithZeroSizeFunction) {
set_size_function_kind(ZERO_SIZE_FUNCTION);
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
- ScopedThreadHeapUsage::ThreadAllocatorUsage u1 =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
EXPECT_EQ(0U, u1.alloc_ops);
EXPECT_EQ(0U, u1.alloc_bytes);
EXPECT_EQ(0U, u1.alloc_overhead_bytes);
@@ -286,8 +289,8 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithZeroSizeFunction) {
void* ptr = MockMalloc(kAllocSize);
MockFree(ptr);
- ScopedThreadHeapUsage::ThreadAllocatorUsage u2 =
- ScopedThreadHeapUsage::CurrentUsage();
+ usage_tracker.Stop(false);
+ ThreadHeapUsage u2 = usage_tracker.usage();
// With a get-size function that returns zero, there's no way to get the size
// of an allocation that's being freed, hence the shim can't tally freed bytes
@@ -300,16 +303,16 @@ TEST_F(ScopedThreadHeapUsageTest, SimpleUsageWithZeroSizeFunction) {
EXPECT_EQ(0U, u2.max_allocated_bytes);
}
-TEST_F(ScopedThreadHeapUsageTest, ReallocCorrectlyTallied) {
+TEST_F(ThreadHeapUsageTrackerTest, ReallocCorrectlyTallied) {
const size_t kAllocSize = 237U;
{
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
// Reallocating nullptr should count as a single alloc.
void* ptr = MockRealloc(nullptr, kAllocSize);
- ScopedThreadHeapUsage::ThreadAllocatorUsage usage =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
EXPECT_EQ(1U, usage.alloc_ops);
EXPECT_EQ(kAllocSize, usage.alloc_bytes);
EXPECT_EQ(0U, usage.alloc_overhead_bytes);
@@ -321,13 +324,13 @@ TEST_F(ScopedThreadHeapUsageTest, ReallocCorrectlyTallied) {
// free.
ptr = MockRealloc(ptr, 0U);
- usage = ScopedThreadHeapUsage::CurrentUsage();
- EXPECT_EQ(1U, usage.alloc_ops);
- EXPECT_EQ(kAllocSize, usage.alloc_bytes);
- EXPECT_EQ(0U, usage.alloc_overhead_bytes);
- EXPECT_EQ(1U, usage.free_ops);
- EXPECT_EQ(kAllocSize, usage.free_bytes);
- EXPECT_EQ(kAllocSize, usage.max_allocated_bytes);
+ usage_tracker.Stop(false);
+ EXPECT_EQ(1U, usage_tracker.usage().alloc_ops);
+ EXPECT_EQ(kAllocSize, usage_tracker.usage().alloc_bytes);
+ EXPECT_EQ(0U, usage_tracker.usage().alloc_overhead_bytes);
+ EXPECT_EQ(1U, usage_tracker.usage().free_ops);
+ EXPECT_EQ(kAllocSize, usage_tracker.usage().free_bytes);
+ EXPECT_EQ(kAllocSize, usage_tracker.usage().max_allocated_bytes);
// Realloc to zero size may or may not return a nullptr - make sure to
// free the zero-size alloc in the latter case.
@@ -336,11 +339,11 @@ TEST_F(ScopedThreadHeapUsageTest, ReallocCorrectlyTallied) {
}
{
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
void* ptr = MockMalloc(kAllocSize);
- ScopedThreadHeapUsage::ThreadAllocatorUsage usage =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
EXPECT_EQ(1U, usage.alloc_ops);
// Now try reallocating a valid pointer to a larger size, this should count
@@ -348,82 +351,171 @@ TEST_F(ScopedThreadHeapUsageTest, ReallocCorrectlyTallied) {
const size_t kLargerAllocSize = kAllocSize + 928U;
ptr = MockRealloc(ptr, kLargerAllocSize);
- usage = ScopedThreadHeapUsage::CurrentUsage();
- EXPECT_EQ(2U, usage.alloc_ops);
- EXPECT_EQ(kAllocSize + kLargerAllocSize, usage.alloc_bytes);
- EXPECT_EQ(0U, usage.alloc_overhead_bytes);
- EXPECT_EQ(1U, usage.free_ops);
- EXPECT_EQ(kAllocSize, usage.free_bytes);
- EXPECT_EQ(kLargerAllocSize, usage.max_allocated_bytes);
+ usage_tracker.Stop(false);
+ EXPECT_EQ(2U, usage_tracker.usage().alloc_ops);
+ EXPECT_EQ(kAllocSize + kLargerAllocSize, usage_tracker.usage().alloc_bytes);
+ EXPECT_EQ(0U, usage_tracker.usage().alloc_overhead_bytes);
+ EXPECT_EQ(1U, usage_tracker.usage().free_ops);
+ EXPECT_EQ(kAllocSize, usage_tracker.usage().free_bytes);
+ EXPECT_EQ(kLargerAllocSize, usage_tracker.usage().max_allocated_bytes);
MockFree(ptr);
}
}
-TEST_F(ScopedThreadHeapUsageTest, NestedMaxWorks) {
- ScopedThreadHeapUsage outer_scoped_usage;
+TEST_F(ThreadHeapUsageTrackerTest, NestedMaxWorks) {
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
const size_t kOuterAllocSize = 1029U;
void* ptr = MockMalloc(kOuterAllocSize);
MockFree(ptr);
EXPECT_EQ(kOuterAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
{
- ScopedThreadHeapUsage inner_scoped_usage;
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
const size_t kInnerAllocSize = 673U;
ptr = MockMalloc(kInnerAllocSize);
MockFree(ptr);
- EXPECT_EQ(kInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ inner_usage_tracker.Stop(false);
+
+ EXPECT_EQ(kInnerAllocSize, inner_usage_tracker.usage().max_allocated_bytes);
}
// The greater, outer allocation size should have been restored.
EXPECT_EQ(kOuterAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
const size_t kLargerInnerAllocSize = kOuterAllocSize + 673U;
{
- ScopedThreadHeapUsage inner_scoped_usage;
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
ptr = MockMalloc(kLargerInnerAllocSize);
MockFree(ptr);
+ inner_usage_tracker.Stop(false);
EXPECT_EQ(kLargerInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ inner_usage_tracker.usage().max_allocated_bytes);
}
// The greater, inner allocation size should have been preserved.
EXPECT_EQ(kLargerInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
// Now try the case with an outstanding net alloc size when entering the
// inner scope.
void* outer_ptr = MockMalloc(kOuterAllocSize);
EXPECT_EQ(kLargerInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
{
- ScopedThreadHeapUsage inner_scoped_usage;
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
ptr = MockMalloc(kLargerInnerAllocSize);
MockFree(ptr);
+ inner_usage_tracker.Stop(false);
EXPECT_EQ(kLargerInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ inner_usage_tracker.usage().max_allocated_bytes);
}
// While the inner scope saw only the inner net outstanding allocation size,
// the outer scope saw both outstanding at the same time.
EXPECT_EQ(kOuterAllocSize + kLargerInnerAllocSize,
- ScopedThreadHeapUsage::CurrentUsage().max_allocated_bytes);
+ ThreadHeapUsageTracker::GetUsageSnapshot().max_allocated_bytes);
MockFree(outer_ptr);
+
+ // Test a net-negative scope.
+ ptr = MockMalloc(kLargerInnerAllocSize);
+ {
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
+
+ MockFree(ptr);
+
+ const size_t kInnerAllocSize = 1;
+ ptr = MockMalloc(kInnerAllocSize);
+
+ inner_usage_tracker.Stop(false);
+ // Since the scope is still net-negative, the max is clamped at zero.
+ EXPECT_EQ(0U, inner_usage_tracker.usage().max_allocated_bytes);
+ }
+
+ MockFree(ptr);
}
-TEST_F(ScopedThreadHeapUsageTest, AllShimFunctionsAreProvided) {
+TEST_F(ThreadHeapUsageTrackerTest, NoStopImpliesInclusive) {
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
+
+ const size_t kOuterAllocSize = 1029U;
+ void* ptr = MockMalloc(kOuterAllocSize);
+ MockFree(ptr);
+
+ ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+ EXPECT_EQ(kOuterAllocSize, usage.max_allocated_bytes);
+
+ const size_t kInnerLargerAllocSize = kOuterAllocSize + 673U;
+
+ {
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
+
+ // Make a larger allocation than the outer scope.
+ ptr = MockMalloc(kInnerLargerAllocSize);
+ MockFree(ptr);
+
+ // inner_usage_tracker goes out of scope without a Stop().
+ }
+
+ ThreadHeapUsage current = ThreadHeapUsageTracker::GetUsageSnapshot();
+ EXPECT_EQ(usage.alloc_ops + 1, current.alloc_ops);
+ EXPECT_EQ(usage.alloc_bytes + kInnerLargerAllocSize, current.alloc_bytes);
+ EXPECT_EQ(usage.free_ops + 1, current.free_ops);
+ EXPECT_EQ(usage.free_bytes + kInnerLargerAllocSize, current.free_bytes);
+ EXPECT_EQ(kInnerLargerAllocSize, current.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, ExclusiveScopesWork) {
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
+
+ const size_t kOuterAllocSize = 1029U;
+ void* ptr = MockMalloc(kOuterAllocSize);
+ MockFree(ptr);
+
+ ThreadHeapUsage usage = ThreadHeapUsageTracker::GetUsageSnapshot();
+ EXPECT_EQ(kOuterAllocSize, usage.max_allocated_bytes);
+
+ {
+ ThreadHeapUsageTracker inner_usage_tracker;
+ inner_usage_tracker.Start();
+
+ // Make a larger allocation than the outer scope.
+ ptr = MockMalloc(kOuterAllocSize + 673U);
+ MockFree(ptr);
+
+ // This tracker is exlusive, all activity should be private to this scope.
+ inner_usage_tracker.Stop(true);
+ }
+
+ ThreadHeapUsage current = ThreadHeapUsageTracker::GetUsageSnapshot();
+ EXPECT_EQ(usage.alloc_ops, current.alloc_ops);
+ EXPECT_EQ(usage.alloc_bytes, current.alloc_bytes);
+ EXPECT_EQ(usage.alloc_overhead_bytes, current.alloc_overhead_bytes);
+ EXPECT_EQ(usage.free_ops, current.free_ops);
+ EXPECT_EQ(usage.free_bytes, current.free_bytes);
+ EXPECT_EQ(usage.max_allocated_bytes, current.max_allocated_bytes);
+}
+
+TEST_F(ThreadHeapUsageTrackerTest, AllShimFunctionsAreProvided) {
const size_t kAllocSize = 100;
void* alloc = MockMalloc(kAllocSize);
size_t estimate = MockGetSizeEstimate(alloc);
@@ -446,26 +538,29 @@ TEST_F(ScopedThreadHeapUsageTest, AllShimFunctionsAreProvided) {
}
#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
-TEST(ScopedThreadHeapShimTest, HooksIntoMallocWhenShimAvailable) {
- ScopedThreadHeapUsage::Initialize();
- ScopedThreadHeapUsage::EnableHeapTracking();
+TEST(ThreadHeapUsageShimTest, HooksIntoMallocWhenShimAvailable) {
+ ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
+
+ ThreadHeapUsageTracker::EnableHeapTracking();
+
+ ASSERT_TRUE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
const size_t kAllocSize = 9993;
// This test verifies that the scoped heap data is affected by malloc &
// free only when the shim is available.
- ScopedThreadHeapUsage scoped_usage;
+ ThreadHeapUsageTracker usage_tracker;
+ usage_tracker.Start();
- ScopedThreadHeapUsage::ThreadAllocatorUsage u1 =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage u1 = ThreadHeapUsageTracker::GetUsageSnapshot();
void* ptr = malloc(kAllocSize);
// Prevent the compiler from optimizing out the malloc/free pair.
ASSERT_NE(nullptr, ptr);
- ScopedThreadHeapUsage::ThreadAllocatorUsage u2 =
- ScopedThreadHeapUsage::CurrentUsage();
+ ThreadHeapUsage u2 = ThreadHeapUsageTracker::GetUsageSnapshot();
free(ptr);
- ScopedThreadHeapUsage::ThreadAllocatorUsage u3 =
- ScopedThreadHeapUsage::CurrentUsage();
+
+ usage_tracker.Stop(false);
+ ThreadHeapUsage u3 = usage_tracker.usage();
// Verify that at least one allocation operation was recorded, and that free
// operations are at least monotonically growing.
@@ -479,7 +574,9 @@ TEST(ScopedThreadHeapShimTest, HooksIntoMallocWhenShimAvailable) {
// Verify that at least the one free operation above was recorded.
EXPECT_LE(u2.free_ops + 1, u3.free_ops);
- TestingScopedThreadHeapUsage::DisableHeapTrackingForTesting();
+ TestingThreadHeapUsageTracker::DisableHeapTrackingForTesting();
+
+ ASSERT_FALSE(ThreadHeapUsageTracker::IsHeapTrackingEnabled());
}
#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 698ecd53342..80209ba6588 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -163,6 +163,8 @@ class BASE_EXPORT FeatureList {
// Registers the given |instance| to be the singleton feature list for this
// process. This should only be called once and |instance| must not be null.
+ // Note: If you are considering using this for the purposes of testing, take
+ // a look at using base/test/scoped_feature_list.h instead.
static void SetInstance(std::unique_ptr<FeatureList> instance);
// Clears the previously-registered singleton instance for tests and returns
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index 0b06493c883..cff862ae19d 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -560,6 +560,12 @@ FilePath FilePath::StripTrailingSeparators() const {
}
bool FilePath::ReferencesParent() const {
+ if (path_.find(kParentDirectory) == StringType::npos) {
+ // GetComponents is quite expensive, so avoid calling it in the majority
+ // of cases where there isn't a kParentDirectory anywhere in the path.
+ return false;
+ }
+
std::vector<StringType> components;
GetComponents(&components);
diff --git a/chromium/base/files/file_path_watcher.cc b/chromium/base/files/file_path_watcher.cc
index e370fcb1355..245bd8efe22 100644
--- a/chromium/base/files/file_path_watcher.cc
+++ b/chromium/base/files/file_path_watcher.cc
@@ -8,12 +8,12 @@
#include "base/files/file_path_watcher.h"
#include "base/logging.h"
-#include "base/message_loop/message_loop.h"
#include "build/build_config.h"
namespace base {
FilePathWatcher::~FilePathWatcher() {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
impl_->Cancel();
}
@@ -38,6 +38,7 @@ FilePathWatcher::PlatformDelegate::~PlatformDelegate() {
bool FilePathWatcher::Watch(const FilePath& path,
bool recursive,
const Callback& callback) {
+ DCHECK(sequence_checker_.CalledOnValidSequence());
DCHECK(path.IsAbsolute());
return impl_->Watch(path, recursive, callback);
}
diff --git a/chromium/base/files/file_path_watcher.h b/chromium/base/files/file_path_watcher.h
index 7b1483a4043..267c03ae00b 100644
--- a/chromium/base/files/file_path_watcher.h
+++ b/chromium/base/files/file_path_watcher.h
@@ -12,6 +12,7 @@
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
namespace base {
@@ -25,6 +26,8 @@ namespace base {
// detect the creation and deletion of files in a watched directory, but will
// not detect modifications to those files. See file_path_watcher_kqueue.cc for
// details.
+//
+// Must be destroyed on the sequence that invokes Watch().
class BASE_EXPORT FilePathWatcher {
public:
// Callback type for Watch(). |path| points to the file that was updated,
@@ -76,7 +79,7 @@ class BASE_EXPORT FilePathWatcher {
};
FilePathWatcher();
- virtual ~FilePathWatcher();
+ ~FilePathWatcher();
// A callback that always cleans up the PlatformDelegate, either when executed
// or when deleted without having been executed at all, as can happen during
@@ -98,6 +101,8 @@ class BASE_EXPORT FilePathWatcher {
private:
scoped_refptr<PlatformDelegate> impl_;
+ SequenceChecker sequence_checker_;
+
DISALLOW_COPY_AND_ASSIGN(FilePathWatcher);
};
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 958e60cb9d6..3b0ff6236ec 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -657,6 +657,7 @@ bool FilePathWatcherImpl::HasValidWatchVector() const {
} // namespace
FilePathWatcher::FilePathWatcher() {
+ sequence_checker_.DetachFromSequence();
impl_ = new FilePathWatcherImpl();
}
diff --git a/chromium/base/files/file_path_watcher_mac.cc b/chromium/base/files/file_path_watcher_mac.cc
index b65591a22c9..d59ca2156be 100644
--- a/chromium/base/files/file_path_watcher_mac.cc
+++ b/chromium/base/files/file_path_watcher_mac.cc
@@ -49,6 +49,7 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
} // namespace
FilePathWatcher::FilePathWatcher() {
+ sequence_checker_.DetachFromSequence();
impl_ = new FilePathWatcherImpl();
}
diff --git a/chromium/base/files/file_path_watcher_stub.cc b/chromium/base/files/file_path_watcher_stub.cc
index c637e3c0fe1..c224e379f37 100644
--- a/chromium/base/files/file_path_watcher_stub.cc
+++ b/chromium/base/files/file_path_watcher_stub.cc
@@ -28,6 +28,7 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
} // namespace
FilePathWatcher::FilePathWatcher() {
+ sequence_checker_.DetachFromSequence();
impl_ = new FilePathWatcherImpl();
}
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index fc6f5a85fc9..d2ec37bbec1 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -28,7 +28,6 @@
#include "base/synchronization/waitable_event.h"
#include "base/test/test_file_util.h"
#include "base/test/test_timeouts.h"
-#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -37,6 +36,10 @@
#include "base/android/path_utils.h"
#endif // defined(OS_ANDROID)
+#if defined(OS_POSIX)
+#include "base/files/file_descriptor_watcher_posix.h"
+#endif // defined(OS_POSIX)
+
namespace base {
namespace {
@@ -131,30 +134,19 @@ class TestDelegate : public TestDelegateBase {
DISALLOW_COPY_AND_ASSIGN(TestDelegate);
};
-void SetupWatchCallback(const FilePath& target,
- FilePathWatcher* watcher,
- TestDelegateBase* delegate,
- bool recursive_watch,
- bool* result,
- base::WaitableEvent* completion) {
- *result = watcher->Watch(target, recursive_watch,
- base::Bind(&TestDelegateBase::OnFileChanged,
- delegate->AsWeakPtr()));
- completion->Signal();
-}
-
class FilePathWatcherTest : public testing::Test {
public:
FilePathWatcherTest()
- : file_thread_("FilePathWatcherTest") {}
+#if defined(OS_POSIX)
+ : file_descriptor_watcher_(&loop_)
+#endif
+ {
+ }
~FilePathWatcherTest() override {}
protected:
void SetUp() override {
- // Create a separate file thread in order to test proper thread usage.
- base::Thread::Options options(MessageLoop::TYPE_IO, 0);
- ASSERT_TRUE(file_thread_.StartWithOptions(options));
#if defined(OS_ANDROID)
// Watching files is only permitted when all parent directories are
// accessible, which is not the case for the default temp directory
@@ -171,10 +163,6 @@ class FilePathWatcherTest : public testing::Test {
void TearDown() override { RunLoop().RunUntilIdle(); }
- void DeleteDelegateOnFileThread(TestDelegate* delegate) {
- file_thread_.task_runner()->DeleteSoon(FROM_HERE, delegate);
- }
-
FilePath test_file() {
return temp_dir_.GetPath().AppendASCII("FilePathWatcherTest");
}
@@ -196,18 +184,23 @@ class FilePathWatcherTest : public testing::Test {
bool WaitForEvents() WARN_UNUSED_RESULT {
collector_->Reset();
+
+ RunLoop run_loop;
// Make sure we timeout if we don't get notified.
- loop_.task_runner()->PostDelayedTask(FROM_HERE,
- MessageLoop::QuitWhenIdleClosure(),
- TestTimeouts::action_timeout());
- RunLoop().Run();
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, run_loop.QuitWhenIdleClosure(),
+ TestTimeouts::action_timeout());
+ run_loop.Run();
return collector_->Success();
}
NotificationCollector* collector() { return collector_.get(); }
- MessageLoop loop_;
- base::Thread file_thread_;
+ MessageLoopForIO loop_;
+#if defined(OS_POSIX)
+ FileDescriptorWatcher file_descriptor_watcher_;
+#endif
+
ScopedTempDir temp_dir_;
scoped_refptr<NotificationCollector> collector_;
@@ -219,14 +212,9 @@ bool FilePathWatcherTest::SetupWatch(const FilePath& target,
FilePathWatcher* watcher,
TestDelegateBase* delegate,
bool recursive_watch) {
- base::WaitableEvent completion(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED);
- bool result;
- file_thread_.task_runner()->PostTask(
- FROM_HERE, base::Bind(SetupWatchCallback, target, watcher, delegate,
- recursive_watch, &result, &completion));
- completion.Wait();
- return result;
+ return watcher->Watch(
+ target, recursive_watch,
+ base::Bind(&TestDelegateBase::OnFileChanged, delegate->AsWeakPtr()));
}
// Basic test: Create the file and verify that we notice.
@@ -237,7 +225,6 @@ TEST_F(FilePathWatcherTest, NewFile) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that modifying the file is caught.
@@ -251,7 +238,6 @@ TEST_F(FilePathWatcherTest, ModifiedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(WriteFile(test_file(), "new content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that moving the file into place is caught.
@@ -266,7 +252,6 @@ TEST_F(FilePathWatcherTest, MovedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(base::Move(source_file, test_file()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, DeletedFile) {
@@ -279,7 +264,6 @@ TEST_F(FilePathWatcherTest, DeletedFile) {
// Now make sure we get notified if the file is deleted.
base::DeleteFile(test_file(), false);
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Used by the DeleteDuringNotify test below.
@@ -327,11 +311,9 @@ TEST_F(FilePathWatcherTest, DeleteDuringNotify) {
// Flaky on MacOS (and ARM linux): http://crbug.com/85930
TEST_F(FilePathWatcherTest, DISABLED_DestroyWithPendingNotification) {
std::unique_ptr<TestDelegate> delegate(new TestDelegate(collector()));
- FilePathWatcher* watcher = new FilePathWatcher;
- ASSERT_TRUE(SetupWatch(test_file(), watcher, delegate.get(), false));
+ FilePathWatcher watcher;
+ ASSERT_TRUE(SetupWatch(test_file(), &watcher, delegate.get(), false));
ASSERT_TRUE(WriteFile(test_file(), "content"));
- file_thread_.task_runner()->DeleteSoon(FROM_HERE, watcher);
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
@@ -343,8 +325,6 @@ TEST_F(FilePathWatcherTest, MultipleWatchersSingleFile) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate1.release());
- DeleteDelegateOnFileThread(delegate2.release());
}
// Verify that watching a file whose parent directory doesn't exist yet works if
@@ -370,7 +350,6 @@ TEST_F(FilePathWatcherTest, NonExistentDirectory) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Exercises watch reconfiguration for the case that directories on the path
@@ -403,7 +382,6 @@ TEST_F(FilePathWatcherTest, DirectoryChain) {
ASSERT_TRUE(WriteFile(file, "content v2"));
VLOG(1) << "Waiting for file modification";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_MACOSX)
@@ -421,7 +399,6 @@ TEST_F(FilePathWatcherTest, DisappearingDirectory) {
ASSERT_TRUE(base::DeleteFile(dir, true));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Tests that a file that is deleted and reappears is tracked correctly.
@@ -438,7 +415,6 @@ TEST_F(FilePathWatcherTest, DeleteAndRecreate) {
ASSERT_TRUE(WriteFile(test_file(), "content"));
VLOG(1) << "Waiting for file creation";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, WatchDirectory) {
@@ -471,7 +447,6 @@ TEST_F(FilePathWatcherTest, WatchDirectory) {
ASSERT_TRUE(WriteFile(file2, "content"));
VLOG(1) << "Waiting for file2 creation";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
TEST_F(FilePathWatcherTest, MoveParent) {
@@ -497,8 +472,6 @@ TEST_F(FilePathWatcherTest, MoveParent) {
base::Move(dir, dest);
VLOG(1) << "Waiting for directory move";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(file_delegate.release());
- DeleteDelegateOnFileThread(subdir_delegate.release());
}
TEST_F(FilePathWatcherTest, RecursiveWatch) {
@@ -508,7 +481,6 @@ TEST_F(FilePathWatcherTest, RecursiveWatch) {
bool setup_result = SetupWatch(dir, &watcher, delegate.get(), true);
if (!FilePathWatcher::RecursiveWatchAvailable()) {
ASSERT_FALSE(setup_result);
- DeleteDelegateOnFileThread(delegate.release());
return;
}
ASSERT_TRUE(setup_result);
@@ -564,7 +536,6 @@ TEST_F(FilePathWatcherTest, RecursiveWatch) {
// Delete "$dir/subdir/subdir_child_dir/child_dir_file1".
ASSERT_TRUE(base::DeleteFile(child_dir_file1, false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_POSIX)
@@ -612,8 +583,6 @@ TEST_F(FilePathWatcherTest, RecursiveWithSymLink) {
FilePath target2_file(target2.AppendASCII("file"));
ASSERT_TRUE(WriteFile(target2_file, "content"));
ASSERT_TRUE(WaitForEvents());
-
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_POSIX
@@ -640,8 +609,6 @@ TEST_F(FilePathWatcherTest, MoveChild) {
// Move the directory into place, s.t. the watched file appears.
ASSERT_TRUE(base::Move(source_dir, dest_dir));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(file_delegate.release());
- DeleteDelegateOnFileThread(subdir_delegate.release());
}
// Verify that changing attributes on a file is caught
@@ -662,7 +629,6 @@ TEST_F(FilePathWatcherTest, FileAttributesChanged) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(base::MakeFileUnreadable(test_file()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#if defined(OS_LINUX)
@@ -678,7 +644,6 @@ TEST_F(FilePathWatcherTest, CreateLink) {
// Note that test_file() doesn't have to exist.
ASSERT_TRUE(CreateSymbolicLink(test_file(), test_link()));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that deleting a symlink is caught.
@@ -694,7 +659,6 @@ TEST_F(FilePathWatcherTest, DeleteLink) {
// Now make sure we get notified if the link is deleted.
ASSERT_TRUE(base::DeleteFile(test_link(), false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that modifying a target file that a link is pointing to
@@ -710,7 +674,6 @@ TEST_F(FilePathWatcherTest, ModifiedLinkedFile) {
// Now make sure we get notified if the file is modified.
ASSERT_TRUE(WriteFile(test_file(), "new content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that creating a target file that a link is pointing to
@@ -725,7 +688,6 @@ TEST_F(FilePathWatcherTest, CreateTargetLinkedFile) {
// Now make sure we get notified if the target file is created.
ASSERT_TRUE(WriteFile(test_file(), "content"));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that deleting a target file that a link is pointing to
@@ -741,7 +703,6 @@ TEST_F(FilePathWatcherTest, DeleteTargetLinkedFile) {
// Now make sure we get notified if the target file is deleted.
ASSERT_TRUE(base::DeleteFile(test_file(), false));
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file whose parent directory is a link that
@@ -770,7 +731,6 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart1) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file whose parent directory is a
@@ -800,7 +760,6 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart2) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
// Verify that watching a file with a symlink on the path
@@ -828,7 +787,6 @@ TEST_F(FilePathWatcherTest, LinkedDirectoryPart3) {
ASSERT_TRUE(base::DeleteFile(file, false));
VLOG(1) << "Waiting for file deletion";
ASSERT_TRUE(WaitForEvents());
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_LINUX
@@ -906,7 +864,6 @@ TEST_F(FilePathWatcherTest, DirAttributesChanged) {
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, false));
ASSERT_TRUE(WaitForEvents());
ASSERT_TRUE(ChangeFilePermissions(test_dir1, Execute, true));
- DeleteDelegateOnFileThread(delegate.release());
}
#endif // OS_MACOSX
diff --git a/chromium/base/files/file_path_watcher_win.cc b/chromium/base/files/file_path_watcher_win.cc
index 3bbf2fba503..48470b6b3db 100644
--- a/chromium/base/files/file_path_watcher_win.cc
+++ b/chromium/base/files/file_path_watcher_win.cc
@@ -296,6 +296,7 @@ void FilePathWatcherImpl::DestroyWatch() {
} // namespace
FilePathWatcher::FilePathWatcher() {
+ sequence_checker_.DetachFromSequence();
impl_ = new FilePathWatcherImpl();
}
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index 3d2d54a6632..a0f2328fe20 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -11,7 +11,7 @@
#include <unistd.h>
#include "base/logging.h"
-#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
diff --git a/chromium/base/files/file_proxy.h b/chromium/base/files/file_proxy.h
index 0dfa9bc5e20..f73815ce1c0 100644
--- a/chromium/base/files/file_proxy.h
+++ b/chromium/base/files/file_proxy.h
@@ -15,10 +15,6 @@
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
-namespace tracked_objects {
-class Location;
-};
-
namespace base {
class TaskRunner;
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index bf69ae796b0..77a87e7dade 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -361,6 +361,17 @@ BASE_EXPORT int GetUniquePathNumber(const FilePath& path,
BASE_EXPORT bool SetNonBlocking(int fd);
#if defined(OS_POSIX)
+// Creates a non-blocking, close-on-exec pipe.
+// This creates a non-blocking pipe that is not intended to be shared with any
+// child process. This will be done atomically if the operating system supports
+// it. Returns true if it was able to create the pipe, otherwise false.
+BASE_EXPORT bool CreateLocalNonBlockingPipe(int fds[2]);
+
+// Sets the given |fd| to close-on-exec mode.
+// Returns true if it was able to set it in the close-on-exec mode, otherwise
+// false.
+BASE_EXPORT bool SetCloseOnExec(int fd);
+
// Test that |path| can only be changed by a given user and members of
// a given set of groups.
// Specifically, test that all parts of |path| under (and including) |base|:
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index 42de9316f1c..a8db2599c64 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -351,6 +351,29 @@ bool CopyDirectory(const FilePath& from_path,
}
#endif // !defined(OS_NACL_NONSFI)
+bool CreateLocalNonBlockingPipe(int fds[2]) {
+#if defined(OS_LINUX)
+ return pipe2(fds, O_CLOEXEC | O_NONBLOCK) == 0;
+#else
+ int raw_fds[2];
+ if (pipe(raw_fds) != 0)
+ return false;
+ ScopedFD fd_out(raw_fds[0]);
+ ScopedFD fd_in(raw_fds[1]);
+ if (!SetCloseOnExec(fd_out.get()))
+ return false;
+ if (!SetCloseOnExec(fd_in.get()))
+ return false;
+ if (!SetNonBlocking(fd_out.get()))
+ return false;
+ if (!SetNonBlocking(fd_in.get()))
+ return false;
+ fds[0] = fd_out.release();
+ fds[1] = fd_in.release();
+ return true;
+#endif
+}
+
bool SetNonBlocking(int fd) {
const int flags = fcntl(fd, F_GETFL);
if (flags == -1)
@@ -362,6 +385,21 @@ bool SetNonBlocking(int fd) {
return true;
}
+bool SetCloseOnExec(int fd) {
+#if defined(OS_NACL_NONSFI)
+ const int flags = 0;
+#else
+ const int flags = fcntl(fd, F_GETFD);
+ if (flags == -1)
+ return false;
+ if (flags & FD_CLOEXEC)
+ return true;
+#endif // defined(OS_NACL_NONSFI)
+ if (HANDLE_EINTR(fcntl(fd, F_SETFD, flags | FD_CLOEXEC)) == -1)
+ return false;
+ return true;
+}
+
bool PathExists(const FilePath& path) {
ThreadRestrictions::AssertIOAllowed();
#if defined(OS_ANDROID)
diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc
index b5bb232a104..20c8908375c 100644
--- a/chromium/base/files/file_win.cc
+++ b/chromium/base/files/file_win.cc
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "base/logging.h"
-#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_restrictions.h"
namespace base {
diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h
index 8823a9c7670..f154b043b23 100644
--- a/chromium/base/files/important_file_writer.h
+++ b/chromium/base/files/important_file_writer.h
@@ -20,7 +20,6 @@
namespace base {
class SequencedTaskRunner;
-class Thread;
// Helper for atomically writing a file to ensure that it won't be corrupted by
// *application* crash during write (implemented as create, flush, rename).
diff --git a/chromium/base/i18n/icu_util.h b/chromium/base/i18n/icu_util.h
index a0e49e56535..5f9948fa655 100644
--- a/chromium/base/i18n/icu_util.h
+++ b/chromium/base/i18n/icu_util.h
@@ -50,10 +50,10 @@ BASE_I18N_EXPORT const uint8_t* GetRawIcuMemory();
//
// This does nothing in component builds; this initialization should only be
// done in cases where there could be two copies of base in a single process in
-// non-component builds. (The big example is mojo: the shell will have a copy
-// of base linked in, and the majority of mojo applications will have base
-// linked in but in non-component builds, these will be separate copies of
-// base.)
+// non-component builds. (The big example is standalone service libraries: the
+// Service Manager will have a copy of base linked in, and the majority of
+// service libraries will have base linked in but in non-component builds,
+// these will be separate copies of base.)
BASE_I18N_EXPORT bool InitializeICUFromRawMemory(const uint8_t* raw_memory);
#endif // ICU_UTIL_DATA_IMPL == ICU_UTIL_DATA_FILE
#endif // !defined(OS_NACL)
diff --git a/chromium/base/i18n/number_formatting_unittest.cc b/chromium/base/i18n/number_formatting_unittest.cc
index a131bf3e7d7..baa7ba7d807 100644
--- a/chromium/base/i18n/number_formatting_unittest.cc
+++ b/chromium/base/i18n/number_formatting_unittest.cc
@@ -99,11 +99,18 @@ TEST(NumberFormattingTest, FormatPercent) {
int64_t number;
const char* expected_english;
const wchar_t* expected_german; // Note: Space before % isn't \x20.
- const wchar_t* expected_persian; // Note: Non-Arabic numbers and %.
+ // Note: Eastern Arabic-Indic digits (U+06Fx) for Persian and
+ // Arabic-Indic digits (U+066x) for Arabic.
+ // See http://unicode.org/cldr/trac/ticket/9040 for details.
+ const wchar_t* expected_persian;
+ const wchar_t* expected_arabic;
} cases[] = {
- {0, "0%", L"0\xa0%", L"\x6f0\x200f\x66a"},
- {42, "42%", L"42\xa0%", L"\x6f4\x6f2\x200f\x66a"},
- {1024, "1,024%", L"1.024\xa0%", L"\x6f1\x66c\x6f0\x6f2\x6f4\x200f\x66a"},
+ {0, "0%", L"0\xa0%", L"\x200e\x66a\xa0\x6f0", L"\x660\xa0\x66a\x61c"},
+ {42, "42%", L"42\xa0%", L"\x200e\x66a\xa0\x6f4\x6f2",
+ L"\x664\x662\xa0\x66a\x61c"},
+ {1024, "1,024%", L"1.024\xa0%",
+ L"\x200e\x66a\xa0\x6f1\x66c\x6f0\x6f2\x6f4",
+ L"\x661\x66c\x660\x662\x664\xa0\x66a\x61c"},
};
test::ScopedRestoreICUDefaultLocale restore_locale;
@@ -117,6 +124,9 @@ TEST(NumberFormattingTest, FormatPercent) {
i18n::SetICUDefaultLocale("fa");
EXPECT_EQ(WideToUTF16(cases[i].expected_persian),
FormatPercent(cases[i].number));
+ i18n::SetICUDefaultLocale("ar");
+ EXPECT_EQ(WideToUTF16(cases[i].expected_arabic),
+ FormatPercent(cases[i].number));
}
}
diff --git a/chromium/base/ios/OWNERS b/chromium/base/ios/OWNERS
index 06f5ff1549d..bdb59ec471e 100644
--- a/chromium/base/ios/OWNERS
+++ b/chromium/base/ios/OWNERS
@@ -1,3 +1,3 @@
-droger@chromium.org
-qsr@chromium.org
+eugenebut@chromium.org
rohitrao@chromium.org
+sdefresne@chromium.org
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index fe9ec202c5d..953a502d891 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -13,9 +13,6 @@
namespace base {
namespace ios {
-// Returns whether the operating system is iOS 8 or later.
-BASE_EXPORT bool IsRunningOnIOS8OrLater();
-
// Returns whether the operating system is iOS 9 or later.
BASE_EXPORT bool IsRunningOnIOS9OrLater();
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index 7af85444ada..4b702db3bdf 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -28,11 +28,6 @@ std::string* g_icudtl_path_override = nullptr;
namespace base {
namespace ios {
-// When dropping iOS7 support, also address issues listed in crbug.com/502968.
-bool IsRunningOnIOS8OrLater() {
- return IsRunningOnOrLater(8, 0, 0);
-}
-
bool IsRunningOnIOS9OrLater() {
return IsRunningOnOrLater(9, 0, 0);
}
diff --git a/chromium/base/json/correctness_fuzzer.cc b/chromium/base/json/correctness_fuzzer.cc
new file mode 100644
index 00000000000..57434341a2c
--- /dev/null
+++ b/chromium/base/json/correctness_fuzzer.cc
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A fuzzer that checks correctness of json parser/writer.
+// The fuzzer input is passed through parsing twice,
+// so that presumably valid json is parsed/written again.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <string>
+
+#include "base/json/json_reader.h"
+#include "base/json/json_writer.h"
+#include "base/json/string_escape.h"
+#include "base/logging.h"
+#include "base/values.h"
+
+// Entry point for libFuzzer.
+// We will use the last byte of data as parsing options.
+// The rest will be used as text input to the parser.
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ if (size < 2)
+ return 0;
+
+ int error_code, error_line, error_column;
+ std::string error_message;
+
+ const std::string input_string(reinterpret_cast<const char*>(data), size - 1);
+ const int options = data[size - 1];
+ auto parsed_value = base::JSONReader::ReadAndReturnError(
+ input_string, options, &error_code, &error_message, &error_line,
+ &error_column);
+ if (!parsed_value)
+ return 0;
+
+ std::string parsed_output;
+ bool b = base::JSONWriter::Write(*parsed_value, &parsed_output);
+ LOG_ASSERT(b);
+
+ auto double_parsed_value = base::JSONReader::ReadAndReturnError(
+ parsed_output, options, &error_code, &error_message, &error_line,
+ &error_column);
+ LOG_ASSERT(double_parsed_value);
+ std::string double_parsed_output;
+ bool b2 =
+ base::JSONWriter::Write(*double_parsed_value, &double_parsed_output);
+ LOG_ASSERT(b2);
+
+ LOG_ASSERT(parsed_output == double_parsed_output)
+ << "Parser/Writer mismatch."
+ << "\nInput=" << base::GetQuotedJSONString(parsed_output)
+ << "\nOutput=" << base::GetQuotedJSONString(double_parsed_output);
+
+ return 0;
+}
diff --git a/chromium/base/json/json_file_value_serializer.cc b/chromium/base/json/json_file_value_serializer.cc
index 1a9b7a23b20..661d25d7984 100644
--- a/chromium/base/json/json_file_value_serializer.cc
+++ b/chromium/base/json/json_file_value_serializer.cc
@@ -53,11 +53,9 @@ bool JSONFileValueSerializer::SerializeInternal(const base::Value& root,
}
JSONFileValueDeserializer::JSONFileValueDeserializer(
- const base::FilePath& json_file_path)
- : json_file_path_(json_file_path),
- allow_trailing_comma_(false),
- last_read_size_(0U) {
-}
+ const base::FilePath& json_file_path,
+ int options)
+ : json_file_path_(json_file_path), options_(options), last_read_size_(0U) {}
JSONFileValueDeserializer::~JSONFileValueDeserializer() {
}
@@ -114,7 +112,6 @@ std::unique_ptr<base::Value> JSONFileValueDeserializer::Deserialize(
return NULL;
}
- JSONStringValueDeserializer deserializer(json_string);
- deserializer.set_allow_trailing_comma(allow_trailing_comma_);
+ JSONStringValueDeserializer deserializer(json_string, options_);
return deserializer.Deserialize(error_code, error_str);
}
diff --git a/chromium/base/json/json_file_value_serializer.h b/chromium/base/json/json_file_value_serializer.h
index 67d2342b4cc..a93950a6080 100644
--- a/chromium/base/json/json_file_value_serializer.h
+++ b/chromium/base/json/json_file_value_serializer.h
@@ -48,8 +48,9 @@ class BASE_EXPORT JSONFileValueSerializer : public base::ValueSerializer {
class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
public:
// |json_file_path_| is the path of a file that will be source of the
- // deserialization.
- explicit JSONFileValueDeserializer(const base::FilePath& json_file_path);
+ // deserialization. |options| is a bitmask of JSONParserOptions.
+ explicit JSONFileValueDeserializer(const base::FilePath& json_file_path,
+ int options = 0);
~JSONFileValueDeserializer() override;
@@ -82,10 +83,6 @@ class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
// be a JsonFileError.
static const char* GetErrorMessageForCode(int error_code);
- void set_allow_trailing_comma(bool new_value) {
- allow_trailing_comma_ = new_value;
- }
-
// Returns the size (in bytes) of JSON string read from disk in the last
// successful |Deserialize()| call.
size_t get_last_read_size() const { return last_read_size_; }
@@ -96,7 +93,7 @@ class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
int ReadFileToString(std::string* json_string);
const base::FilePath json_file_path_;
- bool allow_trailing_comma_;
+ const int options_;
size_t last_read_size_;
DISALLOW_IMPLICIT_CONSTRUCTORS(JSONFileValueDeserializer);
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index 9ce6a2c1e73..cd427da9ea2 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -188,6 +188,9 @@ class StackMarker {
} // namespace
+// This is U+FFFD.
+const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
+
JSONParser::JSONParser(int options)
: options_(options),
start_pos_(nullptr),
@@ -626,11 +629,18 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
int32_t next_char = 0;
while (CanConsume(1)) {
+ int start_index = index_;
pos_ = start_pos_ + index_; // CBU8_NEXT is postcrement.
CBU8_NEXT(start_pos_, index_, length, next_char);
if (next_char < 0 || !IsValidCharacter(next_char)) {
- ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
- return false;
+ if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
+ ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 1);
+ return false;
+ }
+ CBU8_NEXT(start_pos_, start_index, length, next_char);
+ string.Convert();
+ string.AppendString(kUnicodeReplacementString);
+ continue;
}
if (next_char == '"') {
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index 11a51115de4..1cebb8da3ed 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -257,10 +257,14 @@ class BASE_EXPORT JSONParser {
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeLiterals);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ConsumeNumbers);
FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ErrorMessages);
+ FRIEND_TEST_ALL_PREFIXES(JSONParserTest, ReplaceInvalidCharacters);
DISALLOW_COPY_AND_ASSIGN(JSONParser);
};
+// Used when decoding and an invalid utf-8 sequence is encountered.
+BASE_EXPORT extern const char kUnicodeReplacementString[];
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index eacd1024993..6023288925f 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -17,8 +17,9 @@ namespace internal {
class JSONParserTest : public testing::Test {
public:
- JSONParser* NewTestParser(const std::string& input) {
- JSONParser* parser = new JSONParser(JSON_PARSE_RFC);
+ JSONParser* NewTestParser(const std::string& input,
+ int options = JSON_PARSE_RFC) {
+ JSONParser* parser = new JSONParser(options);
parser->start_pos_ = input.data();
parser->pos_ = parser->start_pos_;
parser->end_pos_ = parser->start_pos_ + input.length();
@@ -328,5 +329,18 @@ TEST_F(JSONParserTest, DecodeNegativeEscapeSequence) {
EXPECT_FALSE(JSONReader::Read("[\"\\u-00A\"]"));
}
+// Verifies invalid utf-8 characters are replaced.
+TEST_F(JSONParserTest, ReplaceInvalidCharacters) {
+ const std::string bogus_char = "󿿿";
+ const std::string quoted_bogus_char = "\"" + bogus_char + "\"";
+ std::unique_ptr<JSONParser> parser(
+ NewTestParser(quoted_bogus_char, JSON_REPLACE_INVALID_CHARACTERS));
+ std::unique_ptr<Value> value(parser->ConsumeString());
+ ASSERT_TRUE(value.get());
+ std::string str;
+ EXPECT_TRUE(value->GetAsString(&str));
+ EXPECT_EQ(kUnicodeReplacementString, str);
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/json/json_reader.h b/chromium/base/json/json_reader.h
index a954821a288..a39b37adeb1 100644
--- a/chromium/base/json/json_reader.h
+++ b/chromium/base/json/json_reader.h
@@ -55,6 +55,11 @@ enum JSONParserOptions {
// if the child is Remove()d from root, it would result in use-after-free
// unless it is DeepCopy()ed or this option is used.
JSON_DETACHABLE_CHILDREN = 1 << 1,
+
+ // If set the parser replaces invalid characters with the Unicode replacement
+ // character (U+FFFD). If not set, invalid characters trigger a hard error and
+ // parsing fails.
+ JSON_REPLACE_INVALID_CHARACTERS = 1 << 2,
};
class BASE_EXPORT JSONReader {
diff --git a/chromium/base/json/json_string_value_serializer.cc b/chromium/base/json/json_string_value_serializer.cc
index cd786db9e77..2e46ab387a2 100644
--- a/chromium/base/json/json_string_value_serializer.cc
+++ b/chromium/base/json/json_string_value_serializer.cc
@@ -41,18 +41,15 @@ bool JSONStringValueSerializer::SerializeInternal(const Value& root,
}
JSONStringValueDeserializer::JSONStringValueDeserializer(
- const base::StringPiece& json_string)
- : json_string_(json_string),
- allow_trailing_comma_(false) {
-}
+ const base::StringPiece& json_string,
+ int options)
+ : json_string_(json_string), options_(options) {}
JSONStringValueDeserializer::~JSONStringValueDeserializer() {}
std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
int* error_code,
std::string* error_str) {
- return base::JSONReader::ReadAndReturnError(
- json_string_, allow_trailing_comma_ ? base::JSON_ALLOW_TRAILING_COMMAS
- : base::JSON_PARSE_RFC,
- error_code, error_str);
+ return base::JSONReader::ReadAndReturnError(json_string_, options_,
+ error_code, error_str);
}
diff --git a/chromium/base/json/json_string_value_serializer.h b/chromium/base/json/json_string_value_serializer.h
index a97da239202..55a53e207fe 100644
--- a/chromium/base/json/json_string_value_serializer.h
+++ b/chromium/base/json/json_string_value_serializer.h
@@ -47,8 +47,10 @@ class BASE_EXPORT JSONStringValueSerializer : public base::ValueSerializer {
class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
public:
// This retains a reference to the contents of |json_string|, so the data
- // must outlive the JSONStringValueDeserializer.
- explicit JSONStringValueDeserializer(const base::StringPiece& json_string);
+ // must outlive the JSONStringValueDeserializer. |options| is a bitmask of
+ // JSONParserOptions.
+ explicit JSONStringValueDeserializer(const base::StringPiece& json_string,
+ int options = 0);
~JSONStringValueDeserializer() override;
@@ -62,15 +64,10 @@ class BASE_EXPORT JSONStringValueDeserializer : public base::ValueDeserializer {
std::unique_ptr<base::Value> Deserialize(int* error_code,
std::string* error_message) override;
- void set_allow_trailing_comma(bool new_value) {
- allow_trailing_comma_ = new_value;
- }
-
private:
// Data is owned by the caller of the constructor.
base::StringPiece json_string_;
- // If true, deserialization will allow trailing commas.
- bool allow_trailing_comma_;
+ const int options_;
DISALLOW_COPY_AND_ASSIGN(JSONStringValueDeserializer);
};
diff --git a/chromium/base/json/json_value_serializer_unittest.cc b/chromium/base/json/json_value_serializer_unittest.cc
index 5bf07fd386c..43ddc9c4574 100644
--- a/chromium/base/json/json_value_serializer_unittest.cc
+++ b/chromium/base/json/json_value_serializer_unittest.cc
@@ -134,9 +134,10 @@ TEST(JSONValueDeserializerTest, ReadJSONWithTrailingCommasFromString) {
ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Now the flag is set and it must pass.
- str_deserializer.set_allow_trailing_comma(true);
- value = str_deserializer.Deserialize(&error_code, &error_message);
+ // Repeat with commas allowed.
+ JSONStringValueDeserializer str_deserializer2(kProperJSONWithCommas,
+ JSON_ALLOW_TRAILING_COMMAS);
+ value = str_deserializer2.Deserialize(&error_code, &error_message);
ASSERT_TRUE(value);
ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
// Verify if the same JSON is still there.
@@ -187,9 +188,10 @@ TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Now the flag is set and it must pass.
- file_deserializer.set_allow_trailing_comma(true);
- value = file_deserializer.Deserialize(&error_code, &error_message);
+ // Repeat with commas allowed.
+ JSONFileValueDeserializer file_deserializer2(temp_file,
+ JSON_ALLOW_TRAILING_COMMAS);
+ value = file_deserializer2.Deserialize(&error_code, &error_message);
ASSERT_TRUE(value);
ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
// Verify if the same JSON is still there.
@@ -200,8 +202,8 @@ TEST(JSONValueDeserializerTest, AllowTrailingComma) {
static const char kTestWithCommas[] = "{\"key\": [true,],}";
static const char kTestNoCommas[] = "{\"key\": [true]}";
- JSONStringValueDeserializer deserializer(kTestWithCommas);
- deserializer.set_allow_trailing_comma(true);
+ JSONStringValueDeserializer deserializer(kTestWithCommas,
+ JSON_ALLOW_TRAILING_COMMAS);
JSONStringValueDeserializer deserializer_expected(kTestNoCommas);
std::unique_ptr<Value> root = deserializer.Deserialize(nullptr, nullptr);
ASSERT_TRUE(root);
diff --git a/chromium/base/lazy_instance.h b/chromium/base/lazy_instance.h
index 9218bf3fb2a..1183806bef5 100644
--- a/chromium/base/lazy_instance.h
+++ b/chromium/base/lazy_instance.h
@@ -102,7 +102,7 @@ struct LeakyLazyInstanceTraits {
};
// Our AtomicWord doubles as a spinlock, where a value of
-// kBeingCreatedMarker means the spinlock is being held for creation.
+// kLazyInstanceStateCreating means the spinlock is being held for creation.
static const subtle::AtomicWord kLazyInstanceStateCreating = 1;
// Check if instance needs to be created. If so return true otherwise
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index 0771b47c182..e0ca19933e7 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -765,13 +765,12 @@ void LogMessage::Init(const char* file, int line) {
if (g_log_thread_id)
stream_ << base::PlatformThread::CurrentId() << ':';
if (g_log_timestamp) {
- time_t t = time(nullptr);
- struct tm local_time = {0};
-#ifdef _MSC_VER
- localtime_s(&local_time, &t);
-#else
+#if defined(OS_POSIX)
+ timeval tv;
+ gettimeofday(&tv, nullptr);
+ time_t t = tv.tv_sec;
+ struct tm local_time;
localtime_r(&t, &local_time);
-#endif
struct tm* tm_time = &local_time;
stream_ << std::setfill('0')
<< std::setw(2) << 1 + tm_time->tm_mon
@@ -780,7 +779,23 @@ void LogMessage::Init(const char* file, int line) {
<< std::setw(2) << tm_time->tm_hour
<< std::setw(2) << tm_time->tm_min
<< std::setw(2) << tm_time->tm_sec
+ << '.'
+ << std::setw(6) << tv.tv_usec
<< ':';
+#elif defined(OS_WIN)
+ SYSTEMTIME local_time;
+ GetLocalTime(&local_time);
+ stream_ << std::setfill('0')
+ << std::setw(2) << local_time.wMonth
+ << std::setw(2) << local_time.wDay
+ << '/'
+ << std::setw(2) << local_time.wHour
+ << std::setw(2) << local_time.wMinute
+ << std::setw(2) << local_time.wSecond
+ << '.'
+ << std::setw(3) << local_time.wMilliseconds
+ << ':';
+#endif
}
if (g_log_tickcount)
stream_ << TickCount() << ':';
diff --git a/chromium/base/mac/bundle_locations.h b/chromium/base/mac/bundle_locations.h
index 276290b0e64..5cc44ba9662 100644
--- a/chromium/base/mac/bundle_locations.h
+++ b/chromium/base/mac/bundle_locations.h
@@ -12,7 +12,6 @@
#import <Foundation/Foundation.h>
#else // __OBJC__
class NSBundle;
-class NSString;
#endif // __OBJC__
namespace base {
diff --git a/chromium/base/mac/mac_util.h b/chromium/base/mac/mac_util.h
index d87aa8a07e6..67d1880849e 100644
--- a/chromium/base/mac/mac_util.h
+++ b/chromium/base/mac/mac_util.h
@@ -5,10 +5,11 @@
#ifndef BASE_MAC_MAC_UTIL_H_
#define BASE_MAC_MAC_UTIL_H_
-#include <Carbon/Carbon.h>
#include <stdint.h>
#include <string>
+#import <CoreGraphics/CoreGraphics.h>
+
#include "base/base_export.h"
namespace base {
@@ -30,9 +31,6 @@ enum FullScreenMode {
kFullScreenModeNormal = 10,
};
-BASE_EXPORT std::string PathFromFSRef(const FSRef& ref);
-BASE_EXPORT bool FSRefFromPath(const std::string& path, FSRef* ref);
-
// Returns an sRGB color space. The return value is a static value; do not
// release it!
BASE_EXPORT CGColorSpaceRef GetSRGBColorSpace();
diff --git a/chromium/base/mac/mac_util.mm b/chromium/base/mac/mac_util.mm
index 2f54b2799b5..9615f9d2eeb 100644
--- a/chromium/base/mac/mac_util.mm
+++ b/chromium/base/mac/mac_util.mm
@@ -122,19 +122,6 @@ bool IsHiddenLoginItem(LSSharedFileListItemRef item) {
} // namespace
-std::string PathFromFSRef(const FSRef& ref) {
- ScopedCFTypeRef<CFURLRef> url(
- CFURLCreateFromFSRef(kCFAllocatorDefault, &ref));
- NSString *path_string = [(NSURL *)url.get() path];
- return [path_string fileSystemRepresentation];
-}
-
-bool FSRefFromPath(const std::string& path, FSRef* ref) {
- OSStatus status = FSPathMakeRef((const UInt8*)path.c_str(),
- ref, nil);
- return status == noErr;
-}
-
CGColorSpaceRef GetGenericRGBColorSpace() {
// Leaked. That's OK, it's scoped to the lifetime of the application.
static CGColorSpaceRef g_color_space_generic_rgb(
@@ -309,11 +296,21 @@ bool WasLaunchedAsLoginOrResumeItem() {
ProcessInfoRec info = {};
info.processInfoLength = sizeof(info);
+// GetProcessInformation has been deprecated since macOS 10.9, but there is no
+// replacement that provides the information we need. See
+// https://crbug.com/650854.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
if (GetProcessInformation(&psn, &info) == noErr) {
+#pragma clang diagnostic pop
ProcessInfoRec parent_info = {};
parent_info.processInfoLength = sizeof(parent_info);
- if (GetProcessInformation(&info.processLauncher, &parent_info) == noErr)
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+ if (GetProcessInformation(&info.processLauncher, &parent_info) == noErr) {
+#pragma clang diagnostic pop
return parent_info.processSignature == 'lgnw';
+ }
}
return false;
}
diff --git a/chromium/base/mac/mac_util_unittest.mm b/chromium/base/mac/mac_util_unittest.mm
index ec58f002900..ff508f3336b 100644
--- a/chromium/base/mac/mac_util_unittest.mm
+++ b/chromium/base/mac/mac_util_unittest.mm
@@ -29,14 +29,6 @@ namespace {
typedef PlatformTest MacUtilTest;
-TEST_F(MacUtilTest, TestFSRef) {
- FSRef ref;
- std::string path("/System/Library");
-
- ASSERT_TRUE(FSRefFromPath(path, &ref));
- EXPECT_EQ(path, PathFromFSRef(ref));
-}
-
TEST_F(MacUtilTest, GetUserDirectoryTest) {
// Try a few keys, make sure they come back with non-empty paths.
FilePath caches_dir;
diff --git a/chromium/base/mac/mach_port_broker.mm b/chromium/base/mac/mach_port_broker.mm
index bd47017f154..6d9fec5ab6e 100644
--- a/chromium/base/mac/mach_port_broker.mm
+++ b/chromium/base/mac/mach_port_broker.mm
@@ -154,12 +154,7 @@ void MachPortBroker::HandleRequest() {
// Use the kernel audit information to make sure this message is from
// a task that this process spawned. The kernel audit token contains the
// unspoofable pid of the task that sent the message.
- //
- // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
- pid_t child_pid;
- audit_token_to_au32(msg.trailer.msgh_audit,
- NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
-
+ pid_t child_pid = audit_token_to_pid(msg.trailer.msgh_audit);
mach_port_t child_task_port = msg.child_task_port.name;
// Take the lock and update the broker information.
diff --git a/chromium/base/mac/scoped_ionotificationportref.h b/chromium/base/mac/scoped_ionotificationportref.h
new file mode 100644
index 00000000000..93ebc98b65b
--- /dev/null
+++ b/chromium/base/mac/scoped_ionotificationportref.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
+#define BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
+
+#include <IOKit/IOKitLib.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace mac {
+
+namespace internal {
+
+struct ScopedIONotificationPortRefTraits {
+ static IONotificationPortRef InvalidValue() { return nullptr; }
+ static void Free(IONotificationPortRef object) {
+ IONotificationPortDestroy(object);
+ }
+};
+
+} // namepsace internal
+
+using ScopedIONotificationPortRef =
+ ScopedGeneric<IONotificationPortRef,
+ internal::ScopedIONotificationPortRefTraits>;
+
+} // namespace mac
+} // namespace base
+
+#endif // BASE_MAC_SCOPED_IONOTIFICATIONPORTREF_H_
diff --git a/chromium/base/mac/scoped_nsobject.h b/chromium/base/mac/scoped_nsobject.h
index cc54aa0ca8c..ecd8e78f9d3 100644
--- a/chromium/base/mac/scoped_nsobject.h
+++ b/chromium/base/mac/scoped_nsobject.h
@@ -102,7 +102,7 @@ class scoped_nsprotocol
: ScopedTypeRef<NST, Traits>(that_as_subclass) {}
scoped_nsprotocol(scoped_nsprotocol<NST>&& that)
- : ScopedTypeRef<NST, Traits>(that) {}
+ : ScopedTypeRef<NST, Traits>(std::move(that)) {}
scoped_nsprotocol& operator=(const scoped_nsprotocol<NST>& that) {
ScopedTypeRef<NST, Traits>::operator=(that);
@@ -166,7 +166,7 @@ class scoped_nsobject : public scoped_nsprotocol<NST*> {
: scoped_nsprotocol<NST*>(that_as_subclass) {}
scoped_nsobject(scoped_nsobject<NST>&& that)
- : scoped_nsprotocol<NST*>(that) {}
+ : scoped_nsprotocol<NST*>(std::move(that)) {}
scoped_nsobject& operator=(const scoped_nsobject<NST>& that) {
scoped_nsprotocol<NST*>::operator=(that);
@@ -214,7 +214,8 @@ class scoped_nsobject<id> : public scoped_nsprotocol<id> {
explicit scoped_nsobject(const scoped_nsobject<NSR>& that_as_subclass)
: scoped_nsprotocol<id>(that_as_subclass) {}
- scoped_nsobject(scoped_nsobject<id>&& that) : scoped_nsprotocol<id>(that) {}
+ scoped_nsobject(scoped_nsobject<id>&& that)
+ : scoped_nsprotocol<id>(std::move(that)) {}
scoped_nsobject& operator=(const scoped_nsobject<id>& that) {
scoped_nsprotocol<id>::operator=(that);
diff --git a/chromium/base/mac/scoped_nsobject_unittest.mm b/chromium/base/mac/scoped_nsobject_unittest.mm
index c202287c7d7..72d52422582 100644
--- a/chromium/base/mac/scoped_nsobject_unittest.mm
+++ b/chromium/base/mac/scoped_nsobject_unittest.mm
@@ -53,6 +53,12 @@ TEST(ScopedNSObjectTest, ScopedNSObject) {
ASSERT_EQ(3u, [p1 retainCount]);
}
ASSERT_EQ(2u, [p1 retainCount]);
+
+ base::scoped_nsobject<NSObject> p7([NSObject new]);
+ base::scoped_nsobject<NSObject> p8(std::move(p7));
+ ASSERT_TRUE(p8);
+ ASSERT_EQ(1u, [p8 retainCount]);
+ ASSERT_FALSE(p7.get());
}
// Instantiating scoped_nsobject<> with T=NSAutoreleasePool should trip a
diff --git a/chromium/base/mac/sdk_forward_declarations.h b/chromium/base/mac/sdk_forward_declarations.h
index 3174d0ad0f2..9f60262c836 100644
--- a/chromium/base/mac/sdk_forward_declarations.h
+++ b/chromium/base/mac/sdk_forward_declarations.h
@@ -25,21 +25,6 @@
// OSX SDK being compiled against.
// ----------------------------------------------------------------------------
-#if !defined(MAC_OS_X_VERSION_10_12) || \
- MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
-
-// The protocol was formalized by the 10.12 SDK, but it was informally used
-// before.
-@protocol CAAnimationDelegate
-- (void)animationDidStart:(CAAnimation*)animation;
-- (void)animationDidStop:(CAAnimation*)animation finished:(BOOL)finished;
-@end
-
-@protocol CALayerDelegate
-@end
-
-#endif // MAC_OS_X_VERSION_10_12
-
#if !defined(MAC_OS_X_VERSION_10_11) || \
MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_11
@@ -67,19 +52,27 @@ typedef NSUInteger NSSpringLoadingHighlight;
#endif // MAC_OS_X_VERSION_10_11
+#if !defined(MAC_OS_X_VERSION_10_12) || \
+ MAC_OS_X_VERSION_MAX_ALLOWED < MAC_OS_X_VERSION_10_12
+
+// The protocol was formalized by the 10.12 SDK, but it was informally used
+// before.
+@protocol CAAnimationDelegate
+- (void)animationDidStart:(CAAnimation*)animation;
+- (void)animationDidStop:(CAAnimation*)animation finished:(BOOL)finished;
+@end
+
+@protocol CALayerDelegate
+@end
+
+#endif // MAC_OS_X_VERSION_10_12
+
// ----------------------------------------------------------------------------
// Define NSStrings only available in newer versions of the OSX SDK to force
// them to be statically linked.
// ----------------------------------------------------------------------------
extern "C" {
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-BASE_EXPORT extern NSString* const NSWindowDidChangeOcclusionStateNotification;
-BASE_EXPORT extern NSString* const CBAdvertisementDataOverflowServiceUUIDsKey;
-BASE_EXPORT extern NSString* const CBAdvertisementDataIsConnectable;
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_10) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
BASE_EXPORT extern NSString* const NSUserActivityTypeBrowsingWeb;
@@ -95,122 +88,18 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
// functions to suppress -Wpartial-availability warnings.
// ----------------------------------------------------------------------------
-// Once Chrome no longer supports OSX 10.7, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_8) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_8
-
-@interface NSColor (MountainLionSDK)
-- (CGColorRef)CGColor;
-@end
-
-@interface NSUUID (MountainLionSDK)
-- (NSString*)UUIDString;
-@end
-
-@interface NSControl (MountainLionSDK)
-@property BOOL allowsExpansionToolTips;
-@end
-
-@interface NSNib (MountainLionSDK)
-- (BOOL)instantiateWithOwner:(id)owner
- topLevelObjects:(NSArray**)topLevelObjects;
-@end
-
-@interface NSArray (MountainLionSDK)
-- (id)objectAtIndexedSubscript:(NSUInteger)idx;
-@end
-
-@interface NSDictionary (MountainLionSDK)
-- (id)objectForKeyedSubscript:(id)key;
-@end
-
-@interface NSMutableDictionary (MountainLionSDK)
-- (void)setObject:(id)obj forKeyedSubscript:(id<NSCopying>)key;
-@end
-
-#endif // MAC_OS_X_VERSION_10_8
-
-// Once Chrome no longer supports OSX 10.8, everything within this preprocessor
-// block can be removed.
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-
-// NSProgress is public API in 10.9, but a version of it exists and is usable
-// in 10.8.
-@class NSProgress;
-@class NSAppearance;
-
-@interface NSProgress (MavericksSDK)
-
-- (instancetype)initWithParent:(NSProgress*)parentProgressOrNil
- userInfo:(NSDictionary*)userInfoOrNil;
-@property(copy) NSString* kind;
-
-@property int64_t totalUnitCount;
-@property int64_t completedUnitCount;
-
-@property(getter=isCancellable) BOOL cancellable;
-@property(getter=isPausable) BOOL pausable;
-@property(readonly, getter=isCancelled) BOOL cancelled;
-@property(readonly, getter=isPaused) BOOL paused;
-@property(copy) void (^cancellationHandler)(void);
-@property(copy) void (^pausingHandler)(void);
-- (void)cancel;
-- (void)pause;
-
-- (void)setUserInfoObject:(id)objectOrNil forKey:(NSString*)key;
-- (NSDictionary*)userInfo;
-
-@property(readonly, getter=isIndeterminate) BOOL indeterminate;
-@property(readonly) double fractionCompleted;
-
-- (void)publish;
-- (void)unpublish;
-
-@end
-
-@interface NSScreen (MavericksSDK)
-+ (BOOL)screensHaveSeparateSpaces;
-@end
-
-@interface NSView (MavericksSDK)
-- (void)setCanDrawSubviewsIntoLayer:(BOOL)flag;
-- (void)setAppearance:(NSAppearance*)appearance;
-- (NSAppearance*)effectiveAppearance;
-@end
-
-@interface NSWindow (MavericksSDK)
-- (NSWindowOcclusionState)occlusionState;
-@end
-
-@interface NSAppearance (MavericksSDK)
-+ (id<NSObject>)appearanceNamed:(NSString*)name;
-@end
-
-@interface CBPeripheral (MavericksSDK)
-@property(readonly, nonatomic) NSUUID* identifier;
-@end
-
-@class NSUserActivity;
-
-#endif // MAC_OS_X_VERSION_10_9
-
// Once Chrome no longer supports OSX 10.9, everything within this preprocessor
// block can be removed.
#if !defined(MAC_OS_X_VERSION_10_10) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
@interface NSUserActivity (YosemiteSDK)
-
@property(readonly, copy) NSString* activityType;
@property(copy) NSDictionary* userInfo;
@property(copy) NSURL* webpageURL;
-
- (instancetype)initWithActivityType:(NSString*)activityType;
- (void)becomeCurrent;
- (void)invalidate;
-
@end
@interface CBUUID (YosemiteSDK)
@@ -229,6 +118,10 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
@property(readonly) NSOperatingSystemVersion operatingSystemVersion;
@end
+@interface NSLayoutConstraint (YosemiteSDK)
+@property(getter=isActive) BOOL active;
+@end
+
@interface NSVisualEffectView (YosemiteSDK)
- (void)setState:(NSVisualEffectState)state;
@end
@@ -252,6 +145,26 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
#endif // MAC_OS_X_VERSION_10_10
+// Once Chrome no longer supports OSX 10.10, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_11) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_11
+
+@class NSLayoutXAxisAnchor;
+@class NSLayoutYAxisAnchor;
+
+@interface NSView (ElCapitanSDK)
+@property(readonly, strong) NSLayoutXAxisAnchor* leftAnchor;
+@property(readonly, strong) NSLayoutXAxisAnchor* rightAnchor;
+@property(readonly, strong) NSLayoutYAxisAnchor* bottomAnchor;
+@end
+
+@interface NSWindow (ElCapitanSDK)
+- (void)performWindowDragWithEvent:(NSEvent*)event;
+@end
+
+#endif // MAC_OS_X_VERSION_10_11
+
// Once Chrome no longer supports OSX 10.11, everything within this
// preprocessor block can be removed.
#if !defined(MAC_OS_X_VERSION_10_12) || \
@@ -263,6 +176,20 @@ BASE_EXPORT extern NSString* const NSAppearanceNameVibrantLight;
#endif // MAC_OS_X_VERSION_10_12
+// Once Chrome no longer supports OSX 10.12.0, everything within this
+// preprocessor block can be removed.
+#if !defined(MAC_OS_X_VERSION_10_12_1) || \
+ MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_12_1
+
+@interface NSButton (SierraPointOneSDK)
+@property(copy) NSColor* bezelColor;
++ (instancetype)buttonWithTitle:(NSString*)title
+ target:(id)target
+ action:(SEL)action;
+@end
+
+#endif // MAC_OS_X_VERSION_10_12_1
+
// ----------------------------------------------------------------------------
// The symbol for kCWSSIDDidChangeNotification is available in the
// CoreWLAN.framework for OSX versions 10.6 through 10.10. The symbol is not
diff --git a/chromium/base/mac/sdk_forward_declarations.mm b/chromium/base/mac/sdk_forward_declarations.mm
index 4e1d7ec6704..61f6b52d501 100644
--- a/chromium/base/mac/sdk_forward_declarations.mm
+++ b/chromium/base/mac/sdk_forward_declarations.mm
@@ -4,39 +4,6 @@
#include "base/mac/sdk_forward_declarations.h"
-#if !defined(MAC_OS_X_VERSION_10_7) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_7
-NSString* const NSWindowWillEnterFullScreenNotification =
- @"NSWindowWillEnterFullScreenNotification";
-
-NSString* const NSWindowWillExitFullScreenNotification =
- @"NSWindowWillExitFullScreenNotification";
-
-NSString* const NSWindowDidEnterFullScreenNotification =
- @"NSWindowDidEnterFullScreenNotification";
-
-NSString* const NSWindowDidExitFullScreenNotification =
- @"NSWindowDidExitFullScreenNotification";
-
-NSString* const NSWindowDidChangeBackingPropertiesNotification =
- @"NSWindowDidChangeBackingPropertiesNotification";
-
-NSString* const CBAdvertisementDataServiceDataKey = @"kCBAdvDataServiceData";
-
-NSString* const CBAdvertisementDataServiceUUIDsKey = @"kCBAdvDataServiceUUIDs";
-#endif // MAC_OS_X_VERSION_10_7
-
-#if !defined(MAC_OS_X_VERSION_10_9) || \
- MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_9
-NSString* const NSWindowDidChangeOcclusionStateNotification =
- @"NSWindowDidChangeOcclusionStateNotification";
-
-NSString* const CBAdvertisementDataOverflowServiceUUIDsKey =
- @"kCBAdvDataOverflowServiceUUIDs";
-
-NSString* const CBAdvertisementDataIsConnectable = @"kCBAdvDataIsConnectable";
-#endif // MAC_OS_X_VERSION_10_9
-
#if !defined(MAC_OS_X_VERSION_10_10) || \
MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_X_VERSION_10_10
NSString* const NSUserActivityTypeBrowsingWeb =
diff --git a/chromium/base/memory/memory_coordinator_client.cc b/chromium/base/memory/memory_coordinator_client.cc
new file mode 100644
index 00000000000..7fa62321771
--- /dev/null
+++ b/chromium/base/memory/memory_coordinator_client.cc
@@ -0,0 +1,27 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_client.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+const char* MemoryStateToString(MemoryState state) {
+ switch (state) {
+ case MemoryState::UNKNOWN:
+ return "unknown";
+ case MemoryState::NORMAL:
+ return "normal";
+ case MemoryState::THROTTLED:
+ return "throttled";
+ case MemoryState::SUSPENDED:
+ return "suspended";
+ default:
+ NOTREACHED();
+ }
+ return "";
+}
+
+} // namespace base
diff --git a/chromium/base/memory/memory_coordinator_client.h b/chromium/base/memory/memory_coordinator_client.h
index d63b17cc1f8..148f4c17534 100644
--- a/chromium/base/memory/memory_coordinator_client.h
+++ b/chromium/base/memory/memory_coordinator_client.h
@@ -22,7 +22,9 @@ namespace base {
// MemoryState is an indicator that processes can use to guide their memory
// allocation policies. For example, a process that receives the suspended
// state can use that as as signal to drop memory caches.
-enum class MemoryState {
+// NOTE: This enum is used to back an UMA histogram, and therefore should be
+// treated as append-only.
+enum class MemoryState : int {
// The state is unknown.
UNKNOWN = -1,
// No memory constraints.
@@ -38,6 +40,11 @@ enum class MemoryState {
SUSPENDED = 2,
};
+const int kMemoryStateMax = static_cast<int>(MemoryState::SUSPENDED) + 1;
+
+// Returns a string representation of MemoryState.
+BASE_EXPORT const char* MemoryStateToString(MemoryState state);
+
// This is an interface for components which can respond to memory status
// changes. An initial state is NORMAL. See MemoryCoordinatorClientRegistry for
// threading guarantees and ownership management.
diff --git a/chromium/base/memory/memory_coordinator_proxy.cc b/chromium/base/memory/memory_coordinator_proxy.cc
new file mode 100644
index 00000000000..24b6d66655e
--- /dev/null
+++ b/chromium/base/memory/memory_coordinator_proxy.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_coordinator_proxy.h"
+
+namespace base {
+
+MemoryCoordinatorProxy::MemoryCoordinatorProxy() {
+}
+
+MemoryCoordinatorProxy::~MemoryCoordinatorProxy() {
+}
+
+MemoryCoordinatorProxy* MemoryCoordinatorProxy::GetInstance() {
+ return Singleton<base::MemoryCoordinatorProxy>::get();
+}
+
+MemoryState MemoryCoordinatorProxy::GetCurrentMemoryState() const {
+ if (!getter_callback_)
+ return MemoryState::NORMAL;
+ return getter_callback_.Run();
+}
+
+void MemoryCoordinatorProxy::SetCurrentMemoryStateForTesting(
+ MemoryState memory_state) {
+ DCHECK(setter_callback_);
+ setter_callback_.Run(memory_state);
+}
+
+void MemoryCoordinatorProxy::SetGetCurrentMemoryStateCallback(
+ GetCurrentMemoryStateCallback callback) {
+ getter_callback_ = callback;
+}
+
+void MemoryCoordinatorProxy::SetSetCurrentMemoryStateForTestingCallback(
+ SetCurrentMemoryStateCallback callback) {
+ setter_callback_ = callback;
+}
+
+} // namespace base
diff --git a/chromium/base/memory/memory_coordinator_proxy.h b/chromium/base/memory/memory_coordinator_proxy.h
new file mode 100644
index 00000000000..4148da5dcee
--- /dev/null
+++ b/chromium/base/memory/memory_coordinator_proxy.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+#define BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/memory/memory_coordinator_client.h"
+#include "base/memory/singleton.h"
+
+namespace base {
+
+// The proxy of MemoryCoordinator to be accessed from components that are not
+// in content/browser e.g. net.
+class BASE_EXPORT MemoryCoordinatorProxy {
+ public:
+ using GetCurrentMemoryStateCallback = base::Callback<MemoryState()>;
+ using SetCurrentMemoryStateCallback = base::Callback<void(MemoryState)>;
+
+ static MemoryCoordinatorProxy* GetInstance();
+
+ // Returns the current memory state.
+ MemoryState GetCurrentMemoryState() const;
+
+ // Sets the current memory state. This function is for testing only.
+ void SetCurrentMemoryStateForTesting(MemoryState memory_state);
+
+ // Sets state-getter callback.
+ void SetGetCurrentMemoryStateCallback(GetCurrentMemoryStateCallback callback);
+
+ // Sets state-setter callback.
+ void SetSetCurrentMemoryStateForTestingCallback(
+ SetCurrentMemoryStateCallback callback);
+
+ private:
+ friend struct base::DefaultSingletonTraits<MemoryCoordinatorProxy>;
+
+ MemoryCoordinatorProxy();
+ virtual ~MemoryCoordinatorProxy();
+
+ GetCurrentMemoryStateCallback getter_callback_;
+ SetCurrentMemoryStateCallback setter_callback_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryCoordinatorProxy);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_MEMORY_COORDINATOR_PROXY_H_
diff --git a/chromium/base/memory/memory_pressure_listener.cc b/chromium/base/memory/memory_pressure_listener.cc
index 11859ada613..27e700189bd 100644
--- a/chromium/base/memory/memory_pressure_listener.cc
+++ b/chromium/base/memory/memory_pressure_listener.cc
@@ -38,8 +38,8 @@ class MemoryPressureObserver {
async_observers_->Notify(FROM_HERE,
&MemoryPressureListener::Notify, memory_pressure_level);
AutoLock lock(sync_observers_lock_);
- FOR_EACH_OBSERVER(MemoryPressureListener, *sync_observers_,
- MemoryPressureListener::SyncNotify(memory_pressure_level));
+ for (auto& observer : *sync_observers_)
+ observer.MemoryPressureListener::SyncNotify(memory_pressure_level);
}
private:
diff --git a/chromium/base/memory/memory_pressure_monitor.cc b/chromium/base/memory/memory_pressure_monitor.cc
index 00633f1dd61..ed350b81b98 100644
--- a/chromium/base/memory/memory_pressure_monitor.cc
+++ b/chromium/base/memory/memory_pressure_monitor.cc
@@ -5,12 +5,39 @@
#include "base/memory/memory_pressure_monitor.h"
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
namespace base {
namespace {
MemoryPressureMonitor* g_monitor = nullptr;
+// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
+// histograms.xml and the memory pressure levels defined in
+// MemoryPressureListener.
+enum MemoryPressureLevelUMA {
+ UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
+ UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
+ UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
+ // This must be the last value in the enum.
+ UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+};
+
+// Converts a memory pressure level to an UMA enumeration value.
+MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
+ base::MemoryPressureListener::MemoryPressureLevel level) {
+ switch (level) {
+ case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
+ return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+ case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
+ return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
+ case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
+ return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
+ }
+ NOTREACHED();
+ return UMA_MEMORY_PRESSURE_LEVEL_NONE;
+}
+
} // namespace
MemoryPressureMonitor::MemoryPressureMonitor() {
@@ -27,5 +54,18 @@ MemoryPressureMonitor::~MemoryPressureMonitor() {
MemoryPressureMonitor* MemoryPressureMonitor::Get() {
return g_monitor;
}
+void MemoryPressureMonitor::RecordMemoryPressure(
+ base::MemoryPressureListener::MemoryPressureLevel level,
+ int ticks) {
+ // Use the more primitive STATIC_HISTOGRAM_POINTER_BLOCK macro because the
+ // simple UMA_HISTOGRAM macros don't expose 'AddCount' functionality.
+ STATIC_HISTOGRAM_POINTER_BLOCK(
+ "Memory.PressureLevel",
+ AddCount(MemoryPressureLevelToUmaEnumValue(level), ticks),
+ base::LinearHistogram::FactoryGet(
+ "Memory.PressureLevel", 1, UMA_MEMORY_PRESSURE_LEVEL_COUNT,
+ UMA_MEMORY_PRESSURE_LEVEL_COUNT + 1,
+ base::HistogramBase::kUmaTargetedHistogramFlag));
+}
} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor.h b/chromium/base/memory/memory_pressure_monitor.h
index a4adba4f3ad..033379cdb8d 100644
--- a/chromium/base/memory/memory_pressure_monitor.h
+++ b/chromium/base/memory/memory_pressure_monitor.h
@@ -31,6 +31,9 @@ class BASE_EXPORT MemoryPressureMonitor {
// Return the singleton MemoryPressureMonitor.
static MemoryPressureMonitor* Get();
+ // Record memory pressure UMA statistic. A tick is 5 seconds.
+ static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
+
// Returns the currently observed memory pressure.
virtual MemoryPressureLevel GetCurrentPressureLevel() const = 0;
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.cc b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
index 0e1aa815ab0..b90075d903e 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
@@ -108,6 +108,7 @@ MemoryPressureMonitor::MemoryPressureMonitor(
: current_memory_pressure_level_(
MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
moderate_pressure_repeat_count_(0),
+ seconds_since_reporting_(0),
moderate_pressure_threshold_percent_(
GetModerateMemoryThresholdInPercent(thresholds)),
critical_pressure_threshold_percent_(
@@ -158,8 +159,13 @@ void MemoryPressureMonitor::StopObserving() {
void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
CheckMemoryPressure();
-
+ if (seconds_since_reporting_++ == 5) {
+ seconds_since_reporting_ = 0;
+ RecordMemoryPressure(current_memory_pressure_level_, 1);
+ }
// Record UMA histogram statistics for the current memory pressure level.
+ // TODO(lgrey): Remove this once there's a usable history for the
+ // "Memory.PressureLevel" statistic
MemoryPressureLevelUMA memory_pressure_level_uma(MEMORY_PRESSURE_LEVEL_NONE);
switch (current_memory_pressure_level_) {
case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.h b/chromium/base/memory/memory_pressure_monitor_chromeos.h
index 1000f300987..88dccc9df8e 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.h
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.h
@@ -101,6 +101,13 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
// gets used to count the number of events since the last event occured.
int moderate_pressure_repeat_count_;
+ // The "Memory.PressureLevel" statistic is recorded every
+ // 5 seconds, but the timer to report "ChromeOS.MemoryPressureLevel"
+ // fires every second. This counter is used to allow reporting
+ // "Memory.PressureLevel" correctly without adding another
+ // timer.
+ int seconds_since_reporting_;
+
// The thresholds for moderate and critical pressure.
const int moderate_pressure_threshold_percent_;
const int critical_pressure_threshold_percent_;
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.cc b/chromium/base/memory/memory_pressure_monitor_mac.cc
index d3430e94ca4..5ea381fd3bd 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac.cc
@@ -8,6 +8,8 @@
#include <stddef.h>
#include <sys/sysctl.h>
+#include <cmath>
+
#include "base/bind.h"
#include "base/logging.h"
#include "base/mac/mac_util.h"
@@ -33,29 +35,21 @@ MemoryPressureMonitor::MemoryPressureLevelForMacMemoryPressure(
return MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE;
}
-void MemoryPressureMonitor::NotifyMemoryPressureChanged(
- dispatch_source_s* event_source,
- const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
- int mac_memory_pressure = dispatch_source_get_data(event_source);
- MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
- MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
- dispatch_callback.Run(memory_pressure_level);
-}
-
MemoryPressureMonitor::MemoryPressureMonitor()
- // The MemoryPressureListener doesn't want to know about transitions to
- // MEMORY_PRESSURE_LEVEL_NONE so don't watch for
- // DISPATCH_MEMORYPRESSURE_NORMAL notifications.
: memory_level_event_source_(dispatch_source_create(
DISPATCH_SOURCE_TYPE_MEMORYPRESSURE,
0,
- DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL,
+ DISPATCH_MEMORYPRESSURE_WARN | DISPATCH_MEMORYPRESSURE_CRITICAL |
+ DISPATCH_MEMORYPRESSURE_NORMAL,
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
dispatch_callback_(
- base::Bind(&MemoryPressureListener::NotifyMemoryPressure)) {
+ base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
+ last_pressure_change_(CFAbsoluteTimeGetCurrent()),
+ reporting_error_(0) {
+ last_pressure_level_ = GetCurrentPressureLevel();
dispatch_source_set_event_handler(memory_level_event_source_, ^{
- NotifyMemoryPressureChanged(memory_level_event_source_.get(),
- dispatch_callback_);
+ OnMemoryPressureChanged(memory_level_event_source_.get(),
+ dispatch_callback_);
});
dispatch_resume(memory_level_event_source_);
}
@@ -72,6 +66,35 @@ MemoryPressureMonitor::GetCurrentPressureLevel() const {
&length, nullptr, 0);
return MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
}
+void MemoryPressureMonitor::OnMemoryPressureChanged(
+ dispatch_source_s* event_source,
+ const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
+ int mac_memory_pressure = dispatch_source_get_data(event_source);
+ MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
+ MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
+ CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+ CFTimeInterval since_last_change = now - last_pressure_change_;
+ last_pressure_change_ = now;
+
+ double ticks_to_report;
+ reporting_error_ =
+ modf(since_last_change + reporting_error_, &ticks_to_report);
+
+ // Sierra fails to call the handler when pressure returns to normal,
+ // which would skew our data. For example, if pressure went to 'warn'
+ // at T0, back to 'normal' at T1, then to 'critical' at T10, we would
+ // report 10 ticks of 'warn' instead of 1 tick of 'warn' and 9 ticks
+ // of 'normal'.
+ // This is rdar://29114314
+ if (mac::IsAtMostOS10_11())
+ RecordMemoryPressure(last_pressure_level_,
+ static_cast<int>(ticks_to_report));
+
+ last_pressure_level_ = memory_pressure_level;
+ if (memory_pressure_level !=
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
+ dispatch_callback.Run(memory_pressure_level);
+}
void MemoryPressureMonitor::SetDispatchCallback(
const DispatchCallback& callback) {
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.h b/chromium/base/memory/memory_pressure_monitor_mac.h
index 89794f67bdd..9f89b1661a6 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.h
+++ b/chromium/base/memory/memory_pressure_monitor_mac.h
@@ -5,6 +5,7 @@
#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_MAC_H_
+#include <CoreFoundation/CFDate.h>
#include <dispatch/dispatch.h>
#include "base/base_export.h"
@@ -35,14 +36,22 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
static MemoryPressureLevel
MemoryPressureLevelForMacMemoryPressure(int mac_memory_pressure);
- static void NotifyMemoryPressureChanged(
- dispatch_source_s* event_source,
- const DispatchCallback& dispatch_callback);
+ void OnMemoryPressureChanged(dispatch_source_s* event_source,
+ const DispatchCallback& dispatch_callback);
ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
DispatchCallback dispatch_callback_;
+ CFTimeInterval last_pressure_change_;
+
+ MemoryPressureLevel last_pressure_level_;
+
+ // The UMA statistic is recorded in 5 second increments. This
+ // accumulates the remaining time to be rolled into the next
+ // call.
+ CFTimeInterval reporting_error_;
+
DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
};
diff --git a/chromium/base/memory/memory_pressure_monitor_unittest.cc b/chromium/base/memory/memory_pressure_monitor_unittest.cc
new file mode 100644
index 00000000000..e9747418cc8
--- /dev/null
+++ b/chromium/base/memory/memory_pressure_monitor_unittest.cc
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/memory_pressure_monitor.h"
+
+#include "base/macros.h"
+#include "base/memory/memory_pressure_listener.h"
+#include "base/test/histogram_tester.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(MemoryPressureMonitorTest, RecordMemoryPressure) {
+ base::HistogramTester tester;
+ const char* kHistogram = "Memory.PressureLevel";
+
+ MemoryPressureMonitor::RecordMemoryPressure(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE, 3);
+ tester.ExpectTotalCount(kHistogram, 3);
+ tester.ExpectBucketCount(kHistogram, 0, 3);
+
+ MemoryPressureMonitor::RecordMemoryPressure(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE, 2);
+ tester.ExpectTotalCount(kHistogram, 5);
+ tester.ExpectBucketCount(kHistogram, 1, 2);
+
+ MemoryPressureMonitor::RecordMemoryPressure(
+ MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL, 1);
+ tester.ExpectTotalCount(kHistogram, 6);
+ tester.ExpectBucketCount(kHistogram, 2, 1);
+}
+} // namespace base
diff --git a/chromium/base/memory/memory_pressure_monitor_win.cc b/chromium/base/memory/memory_pressure_monitor_win.cc
index a93b425e822..cad1fcb8d05 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.cc
+++ b/chromium/base/memory/memory_pressure_monitor_win.cc
@@ -6,7 +6,6 @@
#include <windows.h>
-#include "base/metrics/histogram_macros.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
@@ -18,32 +17,6 @@ namespace {
static const DWORDLONG kMBBytes = 1024 * 1024;
-// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
-// histograms.xml and the memory pressure levels defined in
-// MemoryPressureListener.
-enum MemoryPressureLevelUMA {
- UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
- UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
- UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
- // This must be the last value in the enum.
- UMA_MEMORY_PRESSURE_LEVEL_COUNT,
-};
-
-// Converts a memory pressure level to an UMA enumeration value.
-MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
- MemoryPressureListener::MemoryPressureLevel level) {
- switch (level) {
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
- return UMA_MEMORY_PRESSURE_LEVEL_NONE;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
- return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
- case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
- return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
- }
- NOTREACHED();
- return UMA_MEMORY_PRESSURE_LEVEL_NONE;
-}
-
} // namespace
// The following constants have been lifted from similar values in the ChromeOS
@@ -210,10 +183,7 @@ void MemoryPressureMonitor::CheckMemoryPressureAndRecordStatistics() {
CheckMemoryPressure();
- UMA_HISTOGRAM_ENUMERATION(
- "Memory.PressureLevel",
- MemoryPressureLevelToUmaEnumValue(current_memory_pressure_level_),
- UMA_MEMORY_PRESSURE_LEVEL_COUNT);
+ RecordMemoryPressure(current_memory_pressure_level_, 1);
}
MemoryPressureListener::MemoryPressureLevel
diff --git a/chromium/base/memory/ref_counted.cc b/chromium/base/memory/ref_counted.cc
index f5924d0fe76..cd6181b1473 100644
--- a/chromium/base/memory/ref_counted.cc
+++ b/chromium/base/memory/ref_counted.cc
@@ -15,32 +15,32 @@ bool RefCountedThreadSafeBase::HasOneRef() const {
}
RefCountedThreadSafeBase::RefCountedThreadSafeBase() : ref_count_(0) {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
in_dtor_ = false;
#endif
}
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
"calling Release()";
#endif
}
void RefCountedThreadSafeBase::AddRef() const {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
#endif
AtomicRefCountInc(&ref_count_);
}
bool RefCountedThreadSafeBase::Release() const {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!AtomicRefCountIsZero(&ref_count_));
#endif
if (!AtomicRefCountDec(&ref_count_)) {
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
in_dtor_ = true;
#endif
return true;
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 20fe49c926d..960c8a28453 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -14,10 +14,8 @@
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
-#include "base/macros.h"
-#ifndef NDEBUG
#include "base/logging.h"
-#endif
+#include "base/macros.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
@@ -32,16 +30,16 @@ class BASE_EXPORT RefCountedBase {
protected:
RefCountedBase()
: ref_count_(0)
- #ifndef NDEBUG
- , in_dtor_(false)
- #endif
- {
+#if DCHECK_IS_ON()
+ , in_dtor_(false)
+#endif
+ {
}
~RefCountedBase() {
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
- #endif
+#endif
}
@@ -50,9 +48,9 @@ class BASE_EXPORT RefCountedBase {
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- #endif
+#endif
++ref_count_;
}
@@ -62,13 +60,13 @@ class BASE_EXPORT RefCountedBase {
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
- #endif
+#endif
if (--ref_count_ == 0) {
- #ifndef NDEBUG
+#if DCHECK_IS_ON()
in_dtor_ = true;
- #endif
+#endif
return true;
}
return false;
@@ -76,7 +74,7 @@ class BASE_EXPORT RefCountedBase {
private:
mutable size_t ref_count_;
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
mutable bool in_dtor_;
#endif
@@ -100,7 +98,7 @@ class BASE_EXPORT RefCountedThreadSafeBase {
private:
mutable AtomicRefCount ref_count_;
-#ifndef NDEBUG
+#if DCHECK_IS_ON()
mutable bool in_dtor_;
#endif
@@ -126,7 +124,7 @@ class BASE_EXPORT RefCountedThreadSafeBase {
template <class T>
class RefCounted : public subtle::RefCountedBase {
public:
- RefCounted() {}
+ RefCounted() = default;
void AddRef() const {
subtle::RefCountedBase::AddRef();
@@ -139,7 +137,7 @@ class RefCounted : public subtle::RefCountedBase {
}
protected:
- ~RefCounted() {}
+ ~RefCounted() = default;
private:
DISALLOW_COPY_AND_ASSIGN(RefCounted<T>);
@@ -176,7 +174,7 @@ struct DefaultRefCountedThreadSafeTraits {
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
- RefCountedThreadSafe() {}
+ RefCountedThreadSafe() = default;
void AddRef() const {
subtle::RefCountedThreadSafeBase::AddRef();
@@ -189,7 +187,7 @@ class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
}
protected:
- ~RefCountedThreadSafe() {}
+ ~RefCountedThreadSafe() = default;
private:
friend struct DefaultRefCountedThreadSafeTraits<T>;
@@ -213,7 +211,7 @@ class RefCountedData
private:
friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
- ~RefCountedData() {}
+ ~RefCountedData() = default;
};
} // namespace base
@@ -237,7 +235,7 @@ class RefCountedData
// void some_other_function() {
// scoped_refptr<MyFoo> foo = new MyFoo();
// ...
-// foo = NULL; // explicitly releases |foo|
+// foo = nullptr; // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
@@ -252,7 +250,7 @@ class RefCountedData
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
-// // now, |b| references the MyFoo object, and |a| references NULL.
+// // now, |b| references the MyFoo object, and |a| references nullptr.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
@@ -271,8 +269,7 @@ class scoped_refptr {
public:
typedef T element_type;
- scoped_refptr() : ptr_(NULL) {
- }
+ scoped_refptr() {}
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
@@ -314,12 +311,12 @@ class scoped_refptr {
T* get() const { return ptr_; }
T& operator*() const {
- assert(ptr_ != NULL);
+ assert(ptr_ != nullptr);
return *ptr_;
}
T* operator->() const {
- assert(ptr_ != NULL);
+ assert(ptr_ != nullptr);
return ptr_;
}
@@ -382,7 +379,7 @@ class scoped_refptr {
}
protected:
- T* ptr_;
+ T* ptr_ = nullptr;
private:
// Friend required for move constructors that set r.ptr_ to null.
@@ -397,11 +394,13 @@ class scoped_refptr {
static void Release(T* ptr);
};
+// static
template <typename T>
void scoped_refptr<T>::AddRef(T* ptr) {
ptr->AddRef();
}
+// static
template <typename T>
void scoped_refptr<T>::Release(T* ptr) {
ptr->Release();
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index 7c4e07af49e..65c15d26ab1 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -4,6 +4,8 @@
#include "base/memory/ref_counted.h"
+#include <utility>
+
#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,13 +158,34 @@ TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
}
TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
- scoped_refptr<base::OpaqueRefCounted> p = base::MakeOpaqueRefCounted();
- base::TestOpaqueRefCounted(p);
+ scoped_refptr<base::OpaqueRefCounted> initial = base::MakeOpaqueRefCounted();
+ base::TestOpaqueRefCounted(initial);
+
+ scoped_refptr<base::OpaqueRefCounted> assigned;
+ assigned = initial;
+
+ scoped_refptr<base::OpaqueRefCounted> copied(initial);
+
+ scoped_refptr<base::OpaqueRefCounted> moved(std::move(initial));
+
+ scoped_refptr<base::OpaqueRefCounted> move_assigned;
+ move_assigned = std::move(moved);
+}
+
+TEST(RefCountedUnitTest, ScopedRefPtrToOpaqueThreadSafe) {
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> initial =
+ base::MakeOpaqueRefCountedThreadSafe();
+ base::TestOpaqueRefCountedThreadSafe(initial);
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> assigned;
+ assigned = initial;
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> copied(initial);
+
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> moved(std::move(initial));
- scoped_refptr<base::OpaqueRefCounted> q;
- q = p;
- base::TestOpaqueRefCounted(p);
- base::TestOpaqueRefCounted(q);
+ scoped_refptr<base::OpaqueRefCountedThreadSafe> move_assigned;
+ move_assigned = std::move(moved);
}
TEST(RefCountedUnitTest, BooleanTesting) {
diff --git a/chromium/base/memory/scoped_vector.h b/chromium/base/memory/scoped_vector.h
index ebc2617cfb2..a320b1e5d1f 100644
--- a/chromium/base/memory/scoped_vector.h
+++ b/chromium/base/memory/scoped_vector.h
@@ -12,7 +12,6 @@
#include "base/logging.h"
#include "base/macros.h"
-#include "base/stl_util.h"
// ScopedVector wraps a vector deleting the elements from its
// destructor.
@@ -88,8 +87,10 @@ class ScopedVector {
// Resize, deleting elements in the disappearing range if we are shrinking.
void resize(size_t new_size) {
- if (v_.size() > new_size)
- base::STLDeleteContainerPointers(v_.begin() + new_size, v_.end());
+ if (v_.size() > new_size) {
+ for (auto it = v_.begin() + new_size; it != v_.end(); ++it)
+ delete *it;
+ }
v_.resize(new_size);
}
@@ -98,7 +99,11 @@ class ScopedVector {
v_.assign(begin, end);
}
- void clear() { base::STLDeleteElements(&v_); }
+ void clear() {
+ for (auto* item : *this)
+ delete item;
+ v_.clear();
+ }
// Like |clear()|, but doesn't delete any elements.
void weak_clear() { v_.clear(); }
@@ -124,7 +129,8 @@ class ScopedVector {
}
iterator erase(iterator first, iterator last) {
- base::STLDeleteContainerPointers(first, last);
+ for (auto it = first; it != last; ++it)
+ delete *it;
return v_.erase(first, last);
}
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index e1c9fa70bd7..f68c861647c 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -34,31 +34,29 @@ class FilePath;
// Options for creating a shared memory object.
struct BASE_EXPORT SharedMemoryCreateOptions {
- SharedMemoryCreateOptions();
-
#if !(defined(OS_MACOSX) && !defined(OS_IOS))
// DEPRECATED (crbug.com/345734):
// If NULL, the object is anonymous. This pointer is owned by the caller
// and must live through the call to Create().
- const std::string* name_deprecated;
+ const std::string* name_deprecated = nullptr;
// DEPRECATED (crbug.com/345734):
// If true, and the shared memory already exists, Create() will open the
// existing shared memory and ignore the size parameter. If false,
// shared memory must not exist. This flag is meaningless unless
// name_deprecated is non-NULL.
- bool open_existing_deprecated;
+ bool open_existing_deprecated = false;
#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
// Size of the shared memory object to be created.
// When opening an existing object, this has no effect.
- size_t size;
+ size_t size = 0;
// If true, mappings might need to be made executable later.
- bool executable;
+ bool executable = false;
// If true, the file can be shared read-only to a process.
- bool share_read_only;
+ bool share_read_only = false;
};
// Platform abstraction for shared memory. Provides a C++ wrapper
@@ -194,6 +192,13 @@ class BASE_EXPORT SharedMemory {
// identifier is not portable.
SharedMemoryHandle handle() const;
+ // Returns the underlying OS handle for this segment. The caller also gets
+ // ownership of the handle. This is logically equivalent to:
+ // SharedMemoryHandle dup = DuplicateHandle(handle());
+ // Close();
+ // return dup;
+ SharedMemoryHandle TakeHandle();
+
// Closes the open shared memory segment. The memory will remain mapped if
// it was previously mapped.
// It is safe to call Close repeatedly.
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index 8eff26b9dca..c3fd7ae34bf 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -24,8 +24,6 @@
namespace base {
-class Pickle;
-
// SharedMemoryHandle is a platform specific type which represents
// the underlying OS handle to a shared memory segment.
#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index 97ce94ea1b2..a8f09555d96 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -69,11 +69,6 @@ bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
} // namespace
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : size(0),
- executable(false),
- share_read_only(false) {}
-
SharedMemory::SharedMemory()
: mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
@@ -182,6 +177,12 @@ SharedMemoryHandle SharedMemory::handle() const {
return shm_;
}
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ SharedMemoryHandle dup = DuplicateHandle(handle());
+ Close();
+ return dup;
+}
+
void SharedMemory::Close() {
shm_.Close();
shm_ = SharedMemoryHandle();
diff --git a/chromium/base/memory/shared_memory_nacl.cc b/chromium/base/memory/shared_memory_nacl.cc
index a329cd19837..945fc61375a 100644
--- a/chromium/base/memory/shared_memory_nacl.cc
+++ b/chromium/base/memory/shared_memory_nacl.cc
@@ -17,13 +17,6 @@
namespace base {
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : name_deprecated(nullptr),
- open_existing_deprecated(false),
- size(0),
- executable(false),
- share_read_only(false) {}
-
SharedMemory::SharedMemory()
: mapped_file_(-1),
mapped_size_(0),
@@ -129,6 +122,14 @@ SharedMemoryHandle SharedMemory::handle() const {
return FileDescriptor(mapped_file_, false);
}
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ FileDescriptor handle(mapped_file_, true);
+ mapped_file_ = -1;
+ memory_ = nullptr;
+ mapped_size_ = 0;
+ return handle;
+}
+
void SharedMemory::Close() {
if (mapped_file_ > 0) {
if (close(mapped_file_) < 0)
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index d55c2df28bd..783bdfce86c 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -100,13 +100,6 @@ bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
#endif // !defined(OS_ANDROID)
}
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : name_deprecated(nullptr),
- open_existing_deprecated(false),
- size(0),
- executable(false),
- share_read_only(false) {}
-
SharedMemory::SharedMemory()
: mapped_file_(-1),
readonly_mapped_file_(-1),
@@ -386,6 +379,14 @@ SharedMemoryHandle SharedMemory::handle() const {
return FileDescriptor(mapped_file_, false);
}
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ FileDescriptor handle(mapped_file_, true);
+ mapped_file_ = -1;
+ memory_ = nullptr;
+ mapped_size_ = 0;
+ return handle;
+}
+
void SharedMemory::Close() {
if (mapped_file_ > 0) {
if (IGNORE_EINTR(close(mapped_file_)) < 0)
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index 4e1b63c3cf7..1eba23b846b 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -32,10 +32,14 @@ enum CreateError {
CREATE_ERROR_LAST = ALREADY_EXISTS
};
-// Emits an UMA metric.
-void LogError(CreateError error) {
+// Emits UMA metrics about encountered errors. Pass zero (0) for |winerror|
+// if there is no associated Windows error.
+void LogError(CreateError error, DWORD winerror) {
UMA_HISTOGRAM_ENUMERATION("SharedMemory.CreateError", error,
CREATE_ERROR_LAST + 1);
+ static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
+ if (winerror != ERROR_SUCCESS)
+ UMA_HISTOGRAM_SPARSE_SLOWLY("SharedMemory.CreateWinError", winerror);
}
typedef enum _SECTION_INFORMATION_CLASS {
@@ -108,7 +112,7 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
static_cast<DWORD>(rounded_size), name);
if (!h) {
- LogError(CREATE_FILE_MAPPING_FAILURE);
+ LogError(CREATE_FILE_MAPPING_FAILURE, GetLastError());
return nullptr;
}
@@ -120,7 +124,7 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
DCHECK(rv);
if (!success) {
- LogError(REDUCE_PERMISSIONS_FAILURE);
+ LogError(REDUCE_PERMISSIONS_FAILURE, GetLastError());
return nullptr;
}
@@ -131,13 +135,6 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
namespace base {
-SharedMemoryCreateOptions::SharedMemoryCreateOptions()
- : name_deprecated(nullptr),
- open_existing_deprecated(false),
- size(0),
- executable(false),
- share_read_only(false) {}
-
SharedMemory::SharedMemory()
: external_section_(false),
mapped_size_(0),
@@ -218,14 +215,14 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
DCHECK(!options.executable);
DCHECK(!mapped_file_.Get());
if (options.size == 0) {
- LogError(SIZE_ZERO);
+ LogError(SIZE_ZERO, 0);
return false;
}
// Check maximum accounting for overflow.
if (options.size >
static_cast<size_t>(std::numeric_limits<int>::max()) - kSectionMask) {
- LogError(SIZE_TOO_LARGE);
+ LogError(SIZE_TOO_LARGE, 0);
return false;
}
@@ -240,15 +237,15 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
// Add an empty DACL to enforce anonymous read-only sections.
sa.lpSecurityDescriptor = &sd;
if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
- LogError(INITIALIZE_ACL_FAILURE);
+ LogError(INITIALIZE_ACL_FAILURE, GetLastError());
return false;
}
if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
- LogError(INITIALIZE_SECURITY_DESC_FAILURE);
+ LogError(INITIALIZE_SECURITY_DESC_FAILURE, GetLastError());
return false;
}
if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
- LogError(SET_SECURITY_DESC_FAILURE);
+ LogError(SET_SECURITY_DESC_FAILURE, GetLastError());
return false;
}
@@ -277,12 +274,13 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
external_section_ = true;
if (!options.open_existing_deprecated) {
Close();
- LogError(ALREADY_EXISTS);
+ // From "if" above: GetLastError() == ERROR_ALREADY_EXISTS.
+ LogError(ALREADY_EXISTS, ERROR_ALREADY_EXISTS);
return false;
}
}
- LogError(SUCCESS);
+ LogError(SUCCESS, ERROR_SUCCESS);
return true;
}
@@ -386,4 +384,12 @@ SharedMemoryHandle SharedMemory::handle() const {
return SharedMemoryHandle(mapped_file_.Get(), base::GetCurrentProcId());
}
+SharedMemoryHandle SharedMemory::TakeHandle() {
+ SharedMemoryHandle handle(mapped_file_.Take(), base::GetCurrentProcId());
+ handle.SetOwnershipPassesToIPC(true);
+ memory_ = nullptr;
+ mapped_size_ = 0;
+ return handle;
+}
+
} // namespace base
diff --git a/chromium/base/message_loop/incoming_task_queue.h b/chromium/base/message_loop/incoming_task_queue.h
index aff71d20bf8..157e47fa14b 100644
--- a/chromium/base/message_loop/incoming_task_queue.h
+++ b/chromium/base/message_loop/incoming_task_queue.h
@@ -16,7 +16,6 @@
namespace base {
class MessageLoop;
-class WaitableEvent;
namespace internal {
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 74287b148e6..1581f6cfb4b 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -124,8 +124,8 @@ MessageLoop::~MessageLoop() {
DCHECK(!did_work);
// Let interested parties have one last shot at accessing this.
- FOR_EACH_OBSERVER(DestructionObserver, destruction_observers_,
- WillDestroyCurrentMessageLoop());
+ for (auto& observer : destruction_observers_)
+ observer.WillDestroyCurrentMessageLoop();
thread_task_runner_handle_.reset();
@@ -216,11 +216,13 @@ void MessageLoop::RemoveDestructionObserver(
void MessageLoop::AddNestingObserver(NestingObserver* observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_nesting_);
nesting_observers_.AddObserver(observer);
}
void MessageLoop::RemoveNestingObserver(NestingObserver* observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_nesting_);
nesting_observers_.RemoveObserver(observer);
}
@@ -257,6 +259,8 @@ Closure MessageLoop::QuitWhenIdleClosure() {
void MessageLoop::SetNestableTasksAllowed(bool allowed) {
if (allowed) {
+ CHECK(allow_nesting_);
+
// Kick the native pump just in case we enter a OS-driven nested message
// loop.
pump_->ScheduleWork();
@@ -274,11 +278,13 @@ bool MessageLoop::IsNested() {
void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_task_observers_);
task_observers_.AddObserver(task_observer);
}
void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
DCHECK_EQ(this, current());
+ CHECK(allow_task_observers_);
task_observers_.RemoveObserver(task_observer);
}
@@ -367,6 +373,8 @@ void MessageLoop::SetThreadTaskRunnerHandle() {
void MessageLoop::RunHandler() {
DCHECK_EQ(this, current());
+ DCHECK(run_loop_);
+ CHECK(allow_nesting_ || run_loop_->run_depth_ == 1);
pump_->Run(this);
}
@@ -381,15 +389,15 @@ bool MessageLoop::ProcessNextDelayedNonNestableTask() {
std::move(deferred_non_nestable_work_queue_.front());
deferred_non_nestable_work_queue_.pop();
- RunTask(pending_task);
+ RunTask(&pending_task);
return true;
}
-void MessageLoop::RunTask(const PendingTask& pending_task) {
+void MessageLoop::RunTask(PendingTask* pending_task) {
DCHECK(nestable_tasks_allowed_);
#if defined(OS_WIN)
- if (pending_task.is_high_res) {
+ if (pending_task->is_high_res) {
pending_high_res_tasks_--;
CHECK_GE(pending_high_res_tasks_, 0);
}
@@ -398,20 +406,20 @@ void MessageLoop::RunTask(const PendingTask& pending_task) {
// Execute the task and assume the worst: It is probably not reentrant.
nestable_tasks_allowed_ = false;
- TRACE_TASK_EXECUTION("MessageLoop::RunTask", pending_task);
+ TRACE_TASK_EXECUTION("MessageLoop::RunTask", *pending_task);
- FOR_EACH_OBSERVER(TaskObserver, task_observers_,
- WillProcessTask(pending_task));
+ for (auto& observer : task_observers_)
+ observer.WillProcessTask(*pending_task);
task_annotator_.RunTask("MessageLoop::PostTask", pending_task);
- FOR_EACH_OBSERVER(TaskObserver, task_observers_,
- DidProcessTask(pending_task));
+ for (auto& observer : task_observers_)
+ observer.DidProcessTask(*pending_task);
nestable_tasks_allowed_ = true;
}
bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
if (pending_task.nestable || run_loop_->run_depth_ == 1) {
- RunTask(pending_task);
+ RunTask(&pending_task);
// Show that we ran a task (Note: a new one might arrive as a
// consequence!).
return true;
@@ -476,15 +484,9 @@ void MessageLoop::ScheduleWork() {
pump_->ScheduleWork();
}
-#if defined(OS_WIN)
-bool MessageLoop::MessagePumpWasSignaled() {
- return pump_->WasSignaled();
-}
-#endif
-
void MessageLoop::NotifyBeginNestedLoop() {
- FOR_EACH_OBSERVER(NestingObserver, nesting_observers_,
- OnBeginNestedMessageLoop());
+ for (auto& observer : nesting_observers_)
+ observer.OnBeginNestedMessageLoop();
}
bool MessageLoop::DoWork() {
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index 5b1728e4393..ac7a3035636 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -314,16 +314,15 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
debug::TaskAnnotator* task_annotator() { return &task_annotator_; }
// Runs the specified PendingTask.
- void RunTask(const PendingTask& pending_task);
+ void RunTask(PendingTask* pending_task);
-#if defined(OS_WIN)
- // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
- // has been investigated.
- // This should be used for diagnostic only. If message pump wake-up mechanism
- // is based on auto-reset event this call would reset the event to unset
- // state.
- bool MessagePumpWasSignaled();
-#endif
+ // Disallow nesting. After this is called, running a nested RunLoop or calling
+ // Add/RemoveNestingObserver() on this MessageLoop will crash.
+ void DisallowNesting() { allow_nesting_ = false; }
+
+ // Disallow task observers. After this is called, calling
+ // Add/RemoveTaskObserver() on this MessageLoop will crash.
+ void DisallowTaskObservers() { allow_task_observers_ = false; }
//----------------------------------------------------------------------------
protected:
@@ -460,6 +459,12 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// MessageLoop is bound to its thread and constant forever after.
PlatformThreadId thread_id_;
+ // Whether nesting is allowed.
+ bool allow_nesting_ = true;
+
+ // Whether task observers are allowed.
+ bool allow_task_observers_ = true;
+
DISALLOW_COPY_AND_ASSIGN(MessageLoop);
};
diff --git a/chromium/base/message_loop/message_pump.cc b/chromium/base/message_loop/message_pump.cc
index 2f740f24239..3d85b9b5643 100644
--- a/chromium/base/message_loop/message_pump.cc
+++ b/chromium/base/message_loop/message_pump.cc
@@ -15,11 +15,4 @@ MessagePump::~MessagePump() {
void MessagePump::SetTimerSlack(TimerSlack) {
}
-#if defined(OS_WIN)
-bool MessagePump::WasSignaled() {
- NOTREACHED();
- return false;
-}
-#endif
-
} // namespace base
diff --git a/chromium/base/message_loop/message_pump.h b/chromium/base/message_loop/message_pump.h
index af8ed41f278..c53be804109 100644
--- a/chromium/base/message_loop/message_pump.h
+++ b/chromium/base/message_loop/message_pump.h
@@ -124,15 +124,6 @@ class BASE_EXPORT MessagePump : public NonThreadSafe {
// Sets the timer slack to the specified value.
virtual void SetTimerSlack(TimerSlack timer_slack);
-
-#if defined(OS_WIN)
- // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
- // has been investigated.
- // This should be used for diagnostic only. If message pump wake-up mechanism
- // is based on auto-reset event this call would reset the event to unset
- // state.
- virtual bool WasSignaled();
-#endif
};
} // namespace base
diff --git a/chromium/base/message_loop/message_pump_io_ios.h b/chromium/base/message_loop/message_pump_io_ios.h
index 6c2a6c37a86..bb6cd504d16 100644
--- a/chromium/base/message_loop/message_pump_io_ios.h
+++ b/chromium/base/message_loop/message_pump_io_ios.h
@@ -106,9 +106,6 @@ class BASE_EXPORT MessagePumpIOSForIO : public MessagePumpNSRunLoop {
private:
friend class MessagePumpIOSForIOTest;
- void WillProcessIOEvent();
- void DidProcessIOEvent();
-
static void HandleFdIOEvent(CFFileDescriptorRef fdref,
CFOptionFlags callback_types,
void* context);
diff --git a/chromium/base/message_loop/message_pump_libevent.cc b/chromium/base/message_loop/message_pump_libevent.cc
index fef01da9a65..bc7f14db99a 100644
--- a/chromium/base/message_loop/message_pump_libevent.cc
+++ b/chromium/base/message_loop/message_pump_libevent.cc
@@ -73,21 +73,22 @@ bool MessagePumpLibevent::FileDescriptorWatcher::StopWatchingFileDescriptor() {
return (rv == 0);
}
-void MessagePumpLibevent::FileDescriptorWatcher::Init(event *e) {
+void MessagePumpLibevent::FileDescriptorWatcher::Init(event* e) {
DCHECK(e);
DCHECK(!event_);
event_ = e;
}
-event *MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
- struct event *e = event_;
+event* MessagePumpLibevent::FileDescriptorWatcher::ReleaseEvent() {
+ struct event* e = event_;
event_ = NULL;
return e;
}
void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
- int fd, MessagePumpLibevent* pump) {
+ int fd,
+ MessagePumpLibevent* pump) {
// Since OnFileCanWriteWithoutBlocking() gets called first, it can stop
// watching the file descriptor.
if (!watcher_)
@@ -96,7 +97,8 @@ void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanReadWithoutBlocking(
}
void MessagePumpLibevent::FileDescriptorWatcher::OnFileCanWriteWithoutBlocking(
- int fd, MessagePumpLibevent* pump) {
+ int fd,
+ MessagePumpLibevent* pump) {
DCHECK(watcher_);
watcher_->OnFileCanWriteWithoutBlocking(fd);
}
@@ -109,7 +111,7 @@ MessagePumpLibevent::MessagePumpLibevent()
wakeup_pipe_in_(-1),
wakeup_pipe_out_(-1) {
if (!Init())
- NOTREACHED();
+ NOTREACHED();
}
MessagePumpLibevent::~MessagePumpLibevent() {
@@ -131,8 +133,8 @@ MessagePumpLibevent::~MessagePumpLibevent() {
bool MessagePumpLibevent::WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate) {
+ FileDescriptorWatcher* controller,
+ Watcher* delegate) {
DCHECK_GE(fd, 0);
DCHECK(controller);
DCHECK(delegate);
@@ -195,9 +197,8 @@ bool MessagePumpLibevent::WatchFileDescriptor(int fd,
}
// Tell libevent to break out of inner loop.
-static void timer_callback(int fd, short events, void *context)
-{
- event_base_loopbreak((struct event_base *)context);
+static void timer_callback(int fd, short events, void* context) {
+ event_base_loopbreak((struct event_base*)context);
}
// Reentrant!
@@ -290,16 +291,8 @@ void MessagePumpLibevent::ScheduleDelayedWork(
bool MessagePumpLibevent::Init() {
int fds[2];
- if (pipe(fds)) {
- DLOG(ERROR) << "pipe() failed, errno: " << errno;
- return false;
- }
- if (!SetNonBlocking(fds[0])) {
- DLOG(ERROR) << "SetNonBlocking for pipe fd[0] failed, errno: " << errno;
- return false;
- }
- if (!SetNonBlocking(fds[1])) {
- DLOG(ERROR) << "SetNonBlocking for pipe fd[1] failed, errno: " << errno;
+ if (!CreateLocalNonBlockingPipe(fds)) {
+ DPLOG(ERROR) << "pipe creation failed";
return false;
}
wakeup_pipe_out_ = fds[0];
diff --git a/chromium/base/message_loop/message_pump_libevent.h b/chromium/base/message_loop/message_pump_libevent.h
index 76f882f680e..752dc929a04 100644
--- a/chromium/base/message_loop/message_pump_libevent.h
+++ b/chromium/base/message_loop/message_pump_libevent.h
@@ -100,8 +100,8 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
bool WatchFileDescriptor(int fd,
bool persistent,
int mode,
- FileDescriptorWatcher *controller,
- Watcher *delegate);
+ FileDescriptorWatcher* controller,
+ Watcher* delegate);
// MessagePump methods:
void Run(Delegate* delegate) override;
@@ -112,15 +112,11 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
private:
friend class MessagePumpLibeventTest;
- void WillProcessIOEvent();
- void DidProcessIOEvent();
-
// Risky part of constructor. Returns true on success.
bool Init();
// Called by libevent to tell us a registered FD can be read/written to.
- static void OnLibeventNotification(int fd, short flags,
- void* context);
+ static void OnLibeventNotification(int fd, short flags, void* context);
// Unix pipe used to implement ScheduleWork()
// ... callback; called by libevent inside Run() when pipe is ready to read
diff --git a/chromium/base/message_loop/message_pump_mac.mm b/chromium/base/message_loop/message_pump_mac.mm
index b50ea687881..d924ead7cbd 100644
--- a/chromium/base/message_loop/message_pump_mac.mm
+++ b/chromium/base/message_loop/message_pump_mac.mm
@@ -4,7 +4,6 @@
#import "base/message_loop/message_pump_mac.h"
-#include <dlfcn.h>
#import <Foundation/Foundation.h>
#include <limits>
@@ -72,33 +71,6 @@ const CFTimeInterval kCFTimeIntervalMax =
bool g_not_using_cr_app = false;
#endif
-// Call through to CFRunLoopTimerSetTolerance(), which is only available on
-// OS X 10.9.
-void SetTimerTolerance(CFRunLoopTimerRef timer, CFTimeInterval tolerance) {
- typedef void (*CFRunLoopTimerSetTolerancePtr)(CFRunLoopTimerRef timer,
- CFTimeInterval tolerance);
-
- static CFRunLoopTimerSetTolerancePtr settimertolerance_function_ptr;
-
- static dispatch_once_t get_timer_tolerance_function_ptr_once;
- dispatch_once(&get_timer_tolerance_function_ptr_once, ^{
- NSBundle* bundle =[NSBundle
- bundleWithPath:@"/System/Library/Frameworks/CoreFoundation.framework"];
- const char* path = [[bundle executablePath] fileSystemRepresentation];
- CHECK(path);
- void* library_handle = dlopen(path, RTLD_LAZY | RTLD_LOCAL);
- CHECK(library_handle) << dlerror();
- settimertolerance_function_ptr =
- reinterpret_cast<CFRunLoopTimerSetTolerancePtr>(
- dlsym(library_handle, "CFRunLoopTimerSetTolerance"));
-
- dlclose(library_handle);
- });
-
- if (settimertolerance_function_ptr)
- settimertolerance_function_ptr(timer, tolerance);
-}
-
} // namespace
// static
@@ -273,9 +245,9 @@ void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
delayed_work_fire_time_ = CFAbsoluteTimeGetCurrent() + delta.InSecondsF();
CFRunLoopTimerSetNextFireDate(delayed_work_timer_, delayed_work_fire_time_);
if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
- SetTimerTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
+ CFRunLoopTimerSetTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
} else {
- SetTimerTolerance(delayed_work_timer_, 0);
+ CFRunLoopTimerSetTolerance(delayed_work_timer_, 0);
}
}
diff --git a/chromium/base/message_loop/message_pump_perftest.cc b/chromium/base/message_loop/message_pump_perftest.cc
index 3baa6147fe1..04a98c23e55 100644
--- a/chromium/base/message_loop/message_pump_perftest.cc
+++ b/chromium/base/message_loop/message_pump_perftest.cc
@@ -272,7 +272,7 @@ class PostTaskTest : public testing::Test {
while (!loop_local_queue.empty()) {
PendingTask t = std::move(loop_local_queue.front());
loop_local_queue.pop();
- loop.RunTask(t);
+ loop.RunTask(&t);
}
}
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index b9b2c8460c0..f1ec727e7c6 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -29,101 +29,16 @@ enum MessageLoopProblems {
MESSAGE_LOOP_PROBLEM_MAX,
};
-// The following define pointers to user32 API's for the API's which are used
-// in this file. These are added to avoid directly depending on user32 from
-// base as there are users of base who don't want this.
-decltype(::TranslateMessage)* g_translate_message = nullptr;
-decltype(::DispatchMessageW)* g_dispatch_message = nullptr;
-decltype(::PeekMessageW)* g_peek_message = nullptr;
-decltype(::PostMessageW)* g_post_message = nullptr;
-decltype(::DefWindowProcW)* g_def_window_proc = nullptr;
-decltype(::PostQuitMessage)* g_post_quit = nullptr;
-decltype(::UnregisterClassW)* g_unregister_class = nullptr;
-decltype(::RegisterClassExW)* g_register_class = nullptr;
-decltype(::CreateWindowExW)* g_create_window_ex = nullptr;
-decltype(::DestroyWindow)* g_destroy_window = nullptr;
-decltype(::CallMsgFilterW)* g_call_msg_filter = nullptr;
-decltype(::GetQueueStatus)* g_get_queue_status = nullptr;
-decltype(::MsgWaitForMultipleObjectsEx)* g_msg_wait_for_multiple_objects_ex =
- nullptr;
-decltype(::SetTimer)* g_set_timer = nullptr;
-decltype(::KillTimer)* g_kill_timer = nullptr;
-
-#define GET_USER32_API(module, name) \
- reinterpret_cast<decltype(name)*>(::GetProcAddress(module, #name))
-
-// Initializes the global pointers to user32 APIs for the API's used in this
-// file.
-void InitUser32APIs() {
- if (g_translate_message)
- return;
-
- HMODULE user32_module = ::GetModuleHandle(L"user32.dll");
- CHECK(user32_module);
-
- g_translate_message = GET_USER32_API(user32_module, TranslateMessage);
- CHECK(g_translate_message);
-
- g_dispatch_message = GET_USER32_API(user32_module, DispatchMessageW);
- CHECK(g_dispatch_message);
-
- g_peek_message = GET_USER32_API(user32_module, PeekMessageW);
- CHECK(g_peek_message);
-
- g_post_message = GET_USER32_API(user32_module, PostMessageW);
- CHECK(g_post_message);
-
- g_def_window_proc = GET_USER32_API(user32_module, DefWindowProcW);
- CHECK(g_def_window_proc);
-
- g_post_quit = GET_USER32_API(user32_module, PostQuitMessage);
- CHECK(g_post_quit);
-
- g_unregister_class = GET_USER32_API(user32_module, UnregisterClassW);
- CHECK(g_unregister_class);
-
- g_register_class = GET_USER32_API(user32_module, RegisterClassExW);
- CHECK(g_register_class);
-
- g_create_window_ex = GET_USER32_API(user32_module, CreateWindowExW);
- CHECK(g_create_window_ex);
-
- g_destroy_window = GET_USER32_API(user32_module, DestroyWindow);
- CHECK(g_destroy_window);
-
- g_call_msg_filter = GET_USER32_API(user32_module, CallMsgFilterW);
- CHECK(g_call_msg_filter);
-
- g_get_queue_status = GET_USER32_API(user32_module, GetQueueStatus);
- CHECK(g_get_queue_status);
-
- g_msg_wait_for_multiple_objects_ex =
- GET_USER32_API(user32_module, MsgWaitForMultipleObjectsEx);
- CHECK(g_msg_wait_for_multiple_objects_ex);
-
- g_set_timer = GET_USER32_API(user32_module, SetTimer);
- CHECK(g_set_timer);
-
- g_kill_timer = GET_USER32_API(user32_module, KillTimer);
- CHECK(g_kill_timer);
-}
-
} // namespace
-static const wchar_t kWndClassFormat[] = L"Chrome_MessagePumpWindow_%p";
-
// Message sent to get an additional time slice for pumping (processing) another
// task (a series of such messages creates a continuous task pump).
static const int kMsgHaveWork = WM_USER + 1;
-// The application-defined code passed to the hook procedure.
-static const int kMessageFilterCode = 0x5001;
-
//-----------------------------------------------------------------------------
// MessagePumpWin public:
-MessagePumpWin::MessagePumpWin() {
-}
+MessagePumpWin::MessagePumpWin() = default;
void MessagePumpWin::Run(Delegate* delegate) {
RunState s;
@@ -173,24 +88,20 @@ int MessagePumpWin::GetCurrentDelay() const {
//-----------------------------------------------------------------------------
// MessagePumpForUI public:
-MessagePumpForUI::MessagePumpForUI()
- : atom_(0) {
- InitUser32APIs();
- InitMessageWnd();
+MessagePumpForUI::MessagePumpForUI() {
+ bool succeeded = message_window_.Create(
+ Bind(&MessagePumpForUI::MessageCallback, Unretained(this)));
+ DCHECK(succeeded);
}
-MessagePumpForUI::~MessagePumpForUI() {
- g_destroy_window(message_hwnd_);
- g_unregister_class(MAKEINTATOM(atom_), CURRENT_MODULE());
-}
+MessagePumpForUI::~MessagePumpForUI() = default;
void MessagePumpForUI::ScheduleWork() {
if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
- BOOL ret = g_post_message(message_hwnd_, kMsgHaveWork,
- reinterpret_cast<WPARAM>(this), 0);
+ BOOL ret = PostMessage(message_window_.hwnd(), kMsgHaveWork, 0, 0);
if (ret)
return; // There was room in the Window Message queue.
@@ -219,18 +130,17 @@ void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
//-----------------------------------------------------------------------------
// MessagePumpForUI private:
-// static
-LRESULT CALLBACK MessagePumpForUI::WndProcThunk(
- HWND hwnd, UINT message, WPARAM wparam, LPARAM lparam) {
+bool MessagePumpForUI::MessageCallback(
+ UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result) {
switch (message) {
case kMsgHaveWork:
- reinterpret_cast<MessagePumpForUI*>(wparam)->HandleWorkMessage();
+ HandleWorkMessage();
break;
case WM_TIMER:
- reinterpret_cast<MessagePumpForUI*>(wparam)->HandleTimerMessage();
+ HandleTimerMessage();
break;
}
- return g_def_window_proc(hwnd, message, wparam, lparam);
+ return false;
}
void MessagePumpForUI::DoRunLoop() {
@@ -271,7 +181,7 @@ void MessagePumpForUI::DoRunLoop() {
// don't want to disturb that timer if it is already in flight. However,
// if we did do all remaining delayed work, then lets kill the WM_TIMER.
if (more_work_is_plausible && delayed_work_time_.is_null())
- g_kill_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
if (state_->should_quit)
break;
@@ -289,24 +199,6 @@ void MessagePumpForUI::DoRunLoop() {
}
}
-void MessagePumpForUI::InitMessageWnd() {
- // Generate a unique window class name.
- string16 class_name = StringPrintf(kWndClassFormat, this);
-
- HINSTANCE instance = CURRENT_MODULE();
- WNDCLASSEX wc = {0};
- wc.cbSize = sizeof(wc);
- wc.lpfnWndProc = base::win::WrappedWindowProc<WndProcThunk>;
- wc.hInstance = instance;
- wc.lpszClassName = class_name.c_str();
- atom_ = g_register_class(&wc);
- DCHECK(atom_);
-
- message_hwnd_ = g_create_window_ex(0, MAKEINTATOM(atom_), 0, 0, 0, 0, 0, 0,
- HWND_MESSAGE, 0, instance, 0);
- DCHECK(message_hwnd_);
-}
-
void MessagePumpForUI::WaitForWork() {
// Wait until a message is available, up to the time needed by the timer
// manager to fire the next set of timers.
@@ -317,8 +209,8 @@ void MessagePumpForUI::WaitForWork() {
if (delay < 0) // Negative value means no timers waiting.
delay = INFINITE;
- DWORD result = g_msg_wait_for_multiple_objects_ex(0, nullptr, delay,
- QS_ALLINPUT, wait_flags);
+ DWORD result = MsgWaitForMultipleObjectsEx(0, nullptr, delay, QS_ALLINPUT,
+ wait_flags);
if (WAIT_OBJECT_0 == result) {
// A WM_* message is available.
@@ -336,9 +228,9 @@ void MessagePumpForUI::WaitForWork() {
// current thread.
MSG msg = {0};
bool has_pending_sent_message =
- (HIWORD(g_get_queue_status(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
+ (HIWORD(GetQueueStatus(QS_SENDMESSAGE)) & QS_SENDMESSAGE) != 0;
if (has_pending_sent_message ||
- g_peek_message(&msg, nullptr, 0, 0, PM_NOREMOVE)) {
+ PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE)) {
return;
}
@@ -376,7 +268,7 @@ void MessagePumpForUI::HandleWorkMessage() {
}
void MessagePumpForUI::HandleTimerMessage() {
- g_kill_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this));
+ KillTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this));
// If we are being called outside of the context of Run, then don't do
// anything. This could correspond to a MessageBox call or something of
@@ -421,8 +313,7 @@ void MessagePumpForUI::RescheduleTimer() {
// Create a WM_TIMER event that will wake us up to check for any pending
// timers (in case we are running within a nested, external sub-pump).
- BOOL ret = g_set_timer(message_hwnd_, reinterpret_cast<UINT_PTR>(this),
- delay_msec, nullptr);
+ BOOL ret = SetTimer(message_window_.hwnd(), 0, delay_msec, nullptr);
if (ret)
return;
// If we can't set timers, we are in big trouble... but cross our fingers
@@ -439,12 +330,12 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() {
// case to ensure that the message loop peeks again instead of calling
// MsgWaitForMultipleObjectsEx again.
bool sent_messages_in_queue = false;
- DWORD queue_status = g_get_queue_status(QS_SENDMESSAGE);
+ DWORD queue_status = GetQueueStatus(QS_SENDMESSAGE);
if (HIWORD(queue_status) & QS_SENDMESSAGE)
sent_messages_in_queue = true;
MSG msg;
- if (g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE)
+ if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE)
return ProcessMessageHelper(msg);
return sent_messages_in_queue;
@@ -460,19 +351,16 @@ bool MessagePumpForUI::ProcessMessageHelper(const MSG& msg) {
// Repost the QUIT message so that it will be retrieved by the primary
// GetMessage() loop.
state_->should_quit = true;
- g_post_quit(static_cast<int>(msg.wParam));
+ PostQuitMessage(static_cast<int>(msg.wParam));
return false;
}
// While running our main message pump, we discard kMsgHaveWork messages.
- if (msg.message == kMsgHaveWork && msg.hwnd == message_hwnd_)
+ if (msg.message == kMsgHaveWork && msg.hwnd == message_window_.hwnd())
return ProcessPumpReplacementMessage();
- if (g_call_msg_filter(const_cast<MSG*>(&msg), kMessageFilterCode))
- return true;
-
- g_translate_message(&msg);
- g_dispatch_message(&msg);
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
return true;
}
@@ -489,11 +377,11 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
MSG msg;
const bool have_message =
- g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
+ PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
// Expect no message or a message different than kMsgHaveWork.
DCHECK(!have_message || kMsgHaveWork != msg.message ||
- msg.hwnd != message_hwnd_);
+ msg.hwnd != message_window_.hwnd());
// Since we discarded a kMsgHaveWork message, we must update the flag.
int old_work_state_ = InterlockedExchange(&work_state_, READY);
@@ -516,10 +404,9 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
MessagePumpForGpu::MessagePumpForGpu() {
event_.Set(CreateEvent(nullptr, FALSE, FALSE, nullptr));
- InitUser32APIs();
}
-MessagePumpForGpu::~MessagePumpForGpu() {}
+MessagePumpForGpu::~MessagePumpForGpu() = default;
// static
void MessagePumpForGpu::InitFactory() {
@@ -553,11 +440,6 @@ void MessagePumpForGpu::ScheduleDelayedWork(
delayed_work_time_ = delayed_work_time;
}
-bool MessagePumpForGpu::WasSignaled() {
- // If |event_| was set this would reset it back to unset state.
- return WaitForSingleObject(event_.Get(), 0) == WAIT_OBJECT_0;
-}
-
//-----------------------------------------------------------------------------
// MessagePumpForGpu private:
@@ -625,7 +507,7 @@ void MessagePumpForGpu::WaitForWork() {
HANDLE handle = event_.Get();
DWORD result =
- g_msg_wait_for_multiple_objects_ex(1, &handle, delay, QS_ALLINPUT, 0);
+ MsgWaitForMultipleObjectsEx(1, &handle, delay, QS_ALLINPUT, 0);
DCHECK_NE(WAIT_FAILED, result) << GetLastError();
if (result != WAIT_TIMEOUT) {
// Either work or message available.
@@ -636,7 +518,7 @@ void MessagePumpForGpu::WaitForWork() {
bool MessagePumpForGpu::ProcessNextMessage() {
MSG msg;
- if (!g_peek_message(&msg, nullptr, 0, 0, PM_REMOVE))
+ if (!PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE))
return false;
if (msg.message == WM_QUIT) {
@@ -648,10 +530,8 @@ bool MessagePumpForGpu::ProcessNextMessage() {
return true;
}
- if (!g_call_msg_filter(const_cast<MSG*>(&msg), kMessageFilterCode)) {
- g_translate_message(&msg);
- g_dispatch_message(&msg);
- }
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
return true;
}
diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h
index b5f96b89239..f6683e7c547 100644
--- a/chromium/base/message_loop/message_pump_win.h
+++ b/chromium/base/message_loop/message_pump_win.h
@@ -13,6 +13,7 @@
#include "base/base_export.h"
#include "base/message_loop/message_pump.h"
#include "base/time/time.h"
+#include "base/win/message_window.h"
#include "base/win/scoped_handle.h"
namespace base {
@@ -124,12 +125,9 @@ class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
private:
- static LRESULT CALLBACK WndProcThunk(HWND window_handle,
- UINT message,
- WPARAM wparam,
- LPARAM lparam);
+ bool MessageCallback(
+ UINT message, WPARAM wparam, LPARAM lparam, LRESULT* result);
void DoRunLoop() override;
- void InitMessageWnd();
void WaitForWork();
void HandleWorkMessage();
void HandleTimerMessage();
@@ -138,11 +136,7 @@ class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
bool ProcessMessageHelper(const MSG& msg);
bool ProcessPumpReplacementMessage();
- // Atom representing the registered window class.
- ATOM atom_;
-
- // A hidden message-only window.
- HWND message_hwnd_;
+ base::win::MessageWindow message_window_;
};
//-----------------------------------------------------------------------------
@@ -164,13 +158,6 @@ class BASE_EXPORT MessagePumpForGpu : public MessagePumpWin {
void ScheduleWork() override;
void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
- // TODO (stanisc): crbug.com/596190: Remove this after the signaling issue
- // has been investigated.
- // This should be used for diagnostic only. If message pump wake-up mechanism
- // is based on auto-reset event this call would reset the event to unset
- // state.
- bool WasSignaled() override;
-
private:
// MessagePumpWin methods:
void DoRunLoop() override;
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 45ee22d0b5d..c3c353dd67c 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -7,8 +7,14 @@
#include <algorithm>
#include <utility>
+#include "base/base_switches.h"
#include "base/build_time.h"
+#include "base/command_line.h"
+#include "base/debug/alias.h"
+#include "base/feature_list.h"
#include "base/logging.h"
+#include "base/pickle.h"
+#include "base/process/memory.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
@@ -28,6 +34,59 @@ const char kPersistentStringSeparator = '/'; // Currently a slash.
// command line which forces its activation.
const char kActivationMarker = '*';
+// Use shared memory to communicate field trial (experiment) state. Set to false
+// for now while the implementation is fleshed out (e.g. data format, single
+// shared memory segment). See https://codereview.chromium.org/2365273004/ and
+// crbug.com/653874
+const bool kUseSharedMemoryForFieldTrials = false;
+
+// Constants for the field trial allocator.
+const char kAllocatorName[] = "FieldTrialAllocator";
+const uint32_t kFieldTrialType = 0xABA17E13 + 2; // SHA1(FieldTrialEntry) v2
+
+// We allocate 64 KiB to hold all the field trial data. This should be enough,
+// as currently we use ~8KiB for the field trials, and ~10KiB for experiment
+// parameters (as of 9/11/2016). This also doesn't allocate all 64 KiB at once
+// -- the pages only get mapped to physical memory when they are touched. If the
+// size of the allocated field trials does get larger than 64 KiB, then we will
+// drop some field trials in child processes, leading to an inconsistent view
+// between browser and child processes and possibly causing crashes (see
+// crbug.com/661617).
+#if !defined(OS_NACL)
+const size_t kFieldTrialAllocationSize = 64 << 10; // 64 KiB
+#endif
+
+// We create one FieldTrialEntry per field trial in shared memory, via
+// AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a base::Pickle
+// object that we unpickle and read from. Any changes to this structure requires
+// a bump in kFieldTrialType id defined above.
+struct FieldTrialEntry {
+ // Whether or not this field trial is activated. This is really just a boolean
+ // but marked as a uint32_t for portability reasons.
+ uint32_t activated;
+
+ // Size of the pickled structure, NOT the total size of this entry.
+ uint32_t size;
+
+ // Calling this is only valid when the entry is initialized. That is, it
+ // resides in shared memory and has a pickle containing the trial name and
+ // group name following it.
+ bool GetTrialAndGroupName(StringPiece* trial_name,
+ StringPiece* group_name) const {
+ char* src = reinterpret_cast<char*>(const_cast<FieldTrialEntry*>(this)) +
+ sizeof(FieldTrialEntry);
+
+ Pickle pickle(src, size);
+ PickleIterator pickle_iter(pickle);
+
+ if (!pickle_iter.ReadStringPiece(trial_name))
+ return false;
+ if (!pickle_iter.ReadStringPiece(group_name))
+ return false;
+ return true;
+ }
+};
+
// Created a time value based on |year|, |month| and |day_of_month| parameters.
Time CreateTimeFromParams(int year, int month, int day_of_month) {
DCHECK_GT(year, 1970);
@@ -113,6 +172,27 @@ bool ParseFieldTrialsString(const std::string& trials_string,
return true;
}
+void AddForceFieldTrialsFlag(CommandLine* cmd_line) {
+ std::string field_trial_states;
+ FieldTrialList::AllStatesToString(&field_trial_states);
+ if (!field_trial_states.empty()) {
+ cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
+ field_trial_states);
+ }
+}
+
+#if defined(OS_WIN)
+HANDLE CreateReadOnlyHandle(SharedPersistentMemoryAllocator* allocator) {
+ HANDLE src = allocator->shared_memory()->handle().GetHandle();
+ ProcessHandle process = GetCurrentProcess();
+ DWORD access = SECTION_MAP_READ | SECTION_QUERY;
+ HANDLE dst;
+ if (!::DuplicateHandle(process, src, process, &dst, access, true, 0))
+ return nullptr;
+ return dst;
+}
+#endif
+
} // namespace
// statics
@@ -244,7 +324,8 @@ FieldTrial::FieldTrial(const std::string& trial_name,
enable_field_trial_(true),
forced_(false),
group_reported_(false),
- trial_registered_(false) {
+ trial_registered_(false),
+ ref_(SharedPersistentMemoryAllocator::kReferenceNull) {
DCHECK_GT(total_probability, 0);
DCHECK(!trial_name_.empty());
DCHECK(!default_group_name_.empty());
@@ -268,6 +349,10 @@ void FieldTrial::SetGroupChoice(const std::string& group_name, int number) {
}
void FieldTrial::FinalizeGroupChoice() {
+ FinalizeGroupChoiceImpl(false);
+}
+
+void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
if (group_ != kNotFinalized)
return;
accumulated_group_probability_ = divisor_;
@@ -275,6 +360,10 @@ void FieldTrial::FinalizeGroupChoice() {
// finalized.
DCHECK(!forced_);
SetGroupChoice(default_group_name_, kDefaultGroupNumber);
+
+ // Add the field trial to shared memory.
+ if (kUseSharedMemoryForFieldTrials)
+ FieldTrialList::OnGroupFinalized(is_locked, this);
}
bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
@@ -296,6 +385,16 @@ bool FieldTrial::GetState(State* field_trial_state) {
return true;
}
+bool FieldTrial::GetStateWhileLocked(State* field_trial_state) {
+ if (!enable_field_trial_)
+ return false;
+ FinalizeGroupChoiceImpl(true);
+ field_trial_state->trial_name = trial_name_;
+ field_trial_state->group_name = group_name_;
+ field_trial_state->activated = group_reported_;
+ return true;
+}
+
//------------------------------------------------------------------------------
// FieldTrialList methods and members.
@@ -476,7 +575,7 @@ void FieldTrialList::AllStatesToString(std::string* output) {
for (const auto& registered : global_->registered_) {
FieldTrial::State trial;
- if (!registered.second->GetState(&trial))
+ if (!registered.second->GetStateWhileLocked(&trial))
continue;
DCHECK_EQ(std::string::npos,
trial.trial_name.find(kPersistentStringSeparator));
@@ -558,6 +657,99 @@ bool FieldTrialList::CreateTrialsFromString(
}
// static
+void FieldTrialList::CreateTrialsFromCommandLine(
+ const CommandLine& cmd_line,
+ const char* field_trial_handle_switch) {
+ DCHECK(global_);
+
+#if defined(OS_WIN) && !defined(OS_NACL)
+ if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+ std::string arg = cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
+ size_t token = arg.find(",");
+ int field_trial_handle = std::stoi(arg.substr(0, token));
+ size_t field_trial_length = std::stoi(arg.substr(token + 1, arg.length()));
+
+ HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
+ SharedMemoryHandle shm_handle =
+ SharedMemoryHandle(handle, GetCurrentProcId());
+
+ // Gets deleted when it gets out of scope, but that's OK because we need it
+ // only for the duration of this method.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
+ if (!shm.get()->Map(field_trial_length))
+ TerminateBecauseOutOfMemory(field_trial_length);
+
+ bool result = FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ DCHECK(result);
+ return;
+ }
+#endif
+
+ if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
+ bool result = FieldTrialList::CreateTrialsFromString(
+ cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+ std::set<std::string>());
+ DCHECK(result);
+ }
+}
+
+#if defined(OS_WIN)
+// static
+void FieldTrialList::AppendFieldTrialHandleIfNeeded(
+ HandlesToInheritVector* handles) {
+ if (!global_)
+ return;
+ if (kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ if (global_->readonly_allocator_handle_)
+ handles->push_back(global_->readonly_allocator_handle_);
+ }
+}
+#endif
+
+// static
+void FieldTrialList::CopyFieldTrialStateToFlags(
+ const char* field_trial_handle_switch,
+ CommandLine* cmd_line) {
+ // TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
+ // content browser tests currently don't create a FieldTrialList because they
+ // don't run ChromeBrowserMainParts code where it's done for Chrome.
+ if (!global_)
+ return;
+
+#if defined(OS_WIN)
+ // Use shared memory to pass the state if the feature is enabled, otherwise
+ // fallback to passing it via the command line as a string.
+ if (kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ // If the readonly handle didn't get duplicated properly, then fallback to
+ // original behavior.
+ if (!global_->readonly_allocator_handle_) {
+ AddForceFieldTrialsFlag(cmd_line);
+ return;
+ }
+
+ // HANDLE is just typedef'd to void *. We basically cast the handle into an
+ // int (uintptr_t, to be exact), stringify the int, and pass it as a
+ // command-line flag. The child process will do the reverse conversions to
+ // retrieve the handle. See http://stackoverflow.com/a/153077
+ auto uintptr_handle =
+ reinterpret_cast<uintptr_t>(global_->readonly_allocator_handle_);
+ size_t field_trial_length =
+ global_->field_trial_allocator_->shared_memory()->mapped_size();
+ std::string field_trial_handle = std::to_string(uintptr_handle) + "," +
+ std::to_string(field_trial_length);
+
+ cmd_line->AppendSwitchASCII(field_trial_handle_switch, field_trial_handle);
+ global_->field_trial_allocator_->UpdateTrackingHistograms();
+ return;
+ }
+#endif
+
+ AddForceFieldTrialsFlag(cmd_line);
+}
+
+// static
FieldTrial* FieldTrialList::CreateFieldTrial(
const std::string& name,
const std::string& group_name) {
@@ -598,6 +790,18 @@ void FieldTrialList::RemoveObserver(Observer* observer) {
}
// static
+void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
+ if (!global_)
+ return;
+ if (is_locked) {
+ AddToAllocatorWhileLocked(field_trial);
+ } else {
+ AutoLock auto_lock(global_->lock_);
+ AddToAllocatorWhileLocked(field_trial);
+ }
+}
+
+// static
void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (!global_)
return;
@@ -607,10 +811,13 @@ void FieldTrialList::NotifyFieldTrialGroupSelection(FieldTrial* field_trial) {
if (field_trial->group_reported_)
return;
field_trial->group_reported_ = true;
- }
- if (!field_trial->enable_field_trial_)
- return;
+ if (!field_trial->enable_field_trial_)
+ return;
+
+ if (kUseSharedMemoryForFieldTrials)
+ ActivateFieldTrialEntryWhileLocked(field_trial);
+ }
global_->observer_list_->Notify(
FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
@@ -626,6 +833,164 @@ size_t FieldTrialList::GetFieldTrialCount() {
}
// static
+bool FieldTrialList::CreateTrialsFromSharedMemory(
+ std::unique_ptr<SharedMemory> shm) {
+ global_->field_trial_allocator_.reset(new SharedPersistentMemoryAllocator(
+ std::move(shm), 0, kAllocatorName, true));
+ SharedPersistentMemoryAllocator* shalloc =
+ global_->field_trial_allocator_.get();
+ PersistentMemoryAllocator::Iterator mem_iter(shalloc);
+
+ SharedPersistentMemoryAllocator::Reference ref;
+ while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) !=
+ SharedPersistentMemoryAllocator::kReferenceNull) {
+ const FieldTrialEntry* entry =
+ shalloc->GetAsObject<const FieldTrialEntry>(ref, kFieldTrialType);
+
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
+ return false;
+
+ // TODO(lawrencewu): Convert the API for CreateFieldTrial to take
+ // StringPieces.
+ FieldTrial* trial =
+ CreateFieldTrial(trial_name.as_string(), group_name.as_string());
+
+ // If we failed to create the field trial, crash with debug info.
+ // TODO(665129): Remove this when the crash is resolved.
+ if (!trial) {
+ std::string trial_name_string = trial_name.as_string();
+ std::string group_name_string = group_name.as_string();
+ FieldTrial* existing_field_trial =
+ FieldTrialList::Find(trial_name_string);
+ if (existing_field_trial)
+ debug::Alias(existing_field_trial->group_name_internal().c_str());
+ debug::Alias(trial_name_string.c_str());
+ debug::Alias(group_name_string.c_str());
+ CHECK(!trial_name_string.empty());
+ CHECK(!group_name_string.empty());
+ CHECK_EQ(existing_field_trial->group_name_internal(),
+ group_name.as_string());
+ return false;
+ }
+
+ trial->ref_ = ref;
+ if (entry->activated) {
+ // Call |group()| to mark the trial as "used" and notify observers, if
+ // any. This is useful to ensure that field trials created in child
+ // processes are properly reported in crash reports.
+ trial->group();
+ }
+ }
+ return true;
+}
+
+#if !defined(OS_NACL)
+// static
+void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+ // Create the allocator if not already created and add all existing trials.
+ if (global_->field_trial_allocator_ != nullptr)
+ return;
+
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ if (!shm->CreateAndMapAnonymous(kFieldTrialAllocationSize))
+ TerminateBecauseOutOfMemory(kFieldTrialAllocationSize);
+
+ global_->field_trial_allocator_.reset(new SharedPersistentMemoryAllocator(
+ std::move(shm), 0, kAllocatorName, false));
+ global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
+
+ // Add all existing field trials.
+ for (const auto& registered : global_->registered_) {
+ AddToAllocatorWhileLocked(registered.second);
+ }
+
+#if defined(OS_WIN)
+ // Set |readonly_allocator_handle_| so we can pass it to be inherited and
+ // via the command line.
+ global_->readonly_allocator_handle_ =
+ CreateReadOnlyHandle(global_->field_trial_allocator_.get());
+#endif
+}
+#endif
+
+// static
+void FieldTrialList::AddToAllocatorWhileLocked(FieldTrial* field_trial) {
+ SharedPersistentMemoryAllocator* allocator =
+ global_->field_trial_allocator_.get();
+
+ // Don't do anything if the allocator hasn't been instantiated yet.
+ if (allocator == nullptr)
+ return;
+
+ // Or if the allocator is read only, which means we are in a child process and
+ // shouldn't be writing to it.
+ if (allocator->IsReadonly())
+ return;
+
+ FieldTrial::State trial_state;
+ if (!field_trial->GetStateWhileLocked(&trial_state))
+ return;
+
+ // Or if we've already added it. We must check after GetState since it can
+ // also add to the allocator.
+ if (field_trial->ref_)
+ return;
+
+ Pickle pickle;
+ pickle.WriteString(trial_state.trial_name);
+ pickle.WriteString(trial_state.group_name);
+
+ size_t total_size = sizeof(FieldTrialEntry) + pickle.size();
+ SharedPersistentMemoryAllocator::Reference ref =
+ allocator->Allocate(total_size, kFieldTrialType);
+ if (ref == SharedPersistentMemoryAllocator::kReferenceNull)
+ return;
+
+ FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrialEntry>(ref, kFieldTrialType);
+ entry->activated = trial_state.activated;
+ entry->size = pickle.size();
+
+ // TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
+ // memory, so we can avoid this memcpy.
+ char* dst = reinterpret_cast<char*>(entry) + sizeof(FieldTrialEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ allocator->MakeIterable(ref);
+ field_trial->ref_ = ref;
+}
+
+// static
+void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
+ FieldTrial* field_trial) {
+ SharedPersistentMemoryAllocator* allocator =
+ global_->field_trial_allocator_.get();
+
+ // Check if we're in the child process and return early if so.
+ if (allocator && allocator->IsReadonly())
+ return;
+
+ SharedPersistentMemoryAllocator::Reference ref = field_trial->ref_;
+ if (ref == SharedPersistentMemoryAllocator::kReferenceNull) {
+ // It's fine to do this even if the allocator hasn't been instantiated
+ // yet -- it'll just return early.
+ AddToAllocatorWhileLocked(field_trial);
+ } else {
+ // It's also okay to do this even though the callee doesn't have a lock --
+ // the only thing that happens on a stale read here is a slight performance
+ // hit from the child re-synchronizing activation state.
+ FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrialEntry>(ref, kFieldTrialType);
+ entry->activated = true;
+ }
+}
+
+// static
const FieldTrial::EntropyProvider*
FieldTrialList::GetEntropyProviderForOneTimeRandomization() {
if (!global_) {
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index 2b8894904dd..7fd067ad74c 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -64,10 +64,14 @@
#include <vector>
#include "base/base_export.h"
+#include "base/command_line.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/observer_list_threadsafe.h"
+#include "base/process/launch.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
#include "base/time/time.h"
@@ -247,6 +251,10 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// status.
void FinalizeGroupChoice();
+ // Implements FinalizeGroupChoice() with the added flexibility of being
+ // deadlock-free if |is_locked| is true and the caller is holding a lock.
+ void FinalizeGroupChoiceImpl(bool is_locked);
+
// Returns the trial name and selected group name for this field trial via
// the output parameter |active_group|, but only if the group has already
// been chosen and has been externally observed via |group()| and the trial
@@ -262,6 +270,10 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// untouched.
bool GetState(State* field_trial_state);
+ // Does the same thing as above, but is deadlock-free if the caller is holding
+ // a lock.
+ bool GetStateWhileLocked(State* field_trial_state);
+
// Returns the group_name. A winner need not have been chosen.
std::string group_name_internal() const { return group_name_; }
@@ -309,6 +321,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// should notify it when its group is queried.
bool trial_registered_;
+ // Reference to related field trial struct and data in shared memory.
+ SharedPersistentMemoryAllocator::Reference ref_;
+
// When benchmarking is enabled, field trials all revert to the 'default'
// group.
static bool enable_benchmarking_;
@@ -386,7 +401,7 @@ class BASE_EXPORT FieldTrialList {
// PermutedEntropyProvider (which is used when UMA is not enabled). If
// |override_entropy_provider| is not null, then it will be used for
// randomization instead of the provider given when the FieldTrialList was
- // instanciated.
+ // instantiated.
static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
const std::string& trial_name,
FieldTrial::Probability total_probability,
@@ -464,6 +479,35 @@ class BASE_EXPORT FieldTrialList {
const std::string& trials_string,
const std::set<std::string>& ignored_trial_names);
+ // Achieves the same thing as CreateTrialsFromString, except wraps the logic
+ // by taking in the trials from the command line, either via shared memory
+ // handle or command line argument.
+ // If using shared memory to pass around the list of field trials, then
+ // expects |field_trial_handle_switch| command line argument to
+ // contain the shared memory handle.
+ // If not, then create the trials as before (using the kForceFieldTrials
+ // switch). Needs the |field_trial_handle_switch| argument to be passed in
+ // since base/ can't depend on content/.
+ static void CreateTrialsFromCommandLine(
+ const base::CommandLine& cmd_line,
+ const char* field_trial_handle_switch);
+
+#if defined(OS_WIN)
+ // On Windows, we need to explicitly pass down any handles to be inherited.
+ // This function adds the shared memory handle to field trial state to the
+ // list of handles to be inherited.
+ static void AppendFieldTrialHandleIfNeeded(
+ base::HandlesToInheritVector* handles);
+#endif
+
+ // Adds a switch to the command line containing the field trial state as a
+ // string (if not using shared memory to share field trial state), or the
+ // shared memory handle + length.
+ // Needs the |field_trial_handle_switch| argument to be passed in since base/
+ // can't depend on content/.
+ static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
+ base::CommandLine* cmd_line);
+
// Create a FieldTrial with the given |name| and using 100% probability for
// the FieldTrial, force FieldTrial to have the same group string as
// |group_name|. This is commonly used in a non-browser process, to carry
@@ -481,6 +525,10 @@ class BASE_EXPORT FieldTrialList {
// Remove an observer.
static void RemoveObserver(Observer* observer);
+ // Grabs the lock if necessary and adds the field trial to the allocator. This
+ // should only be called from FinalizeGroupChoice().
+ static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
+
// Notify all observers that a group has been finalized for |field_trial|.
static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
@@ -488,6 +536,30 @@ class BASE_EXPORT FieldTrialList {
static size_t GetFieldTrialCount();
private:
+ // Allow tests to access our innards for testing purposes.
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
+
+ // Expects a mapped piece of shared memory |shm| that was created from the
+ // browser process's field_trial_allocator and shared via the command line.
+ // This function recreates the allocator, iterates through all the field
+ // trials in it, and creates them via CreateFieldTrial(). Returns true if
+ // successful and false otherwise.
+ static bool CreateTrialsFromSharedMemory(
+ std::unique_ptr<base::SharedMemory> shm);
+
+ // Instantiate the field trial allocator, add all existing field trials to it,
+ // and duplicates its handle to a read-only handle, which gets stored in
+ // |readonly_allocator_handle|.
+ static void InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Adds the field trial to the allocator. Caller must hold a lock before
+ // calling this.
+ static void AddToAllocatorWhileLocked(FieldTrial* field_trial);
+
+ // Activate the corresponding field trial entry struct in shared memory.
+ static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
+
// A map from FieldTrial names to the actual instances.
typedef std::map<std::string, FieldTrial*> RegistrationMap;
@@ -525,6 +597,20 @@ class BASE_EXPORT FieldTrialList {
// List of observers to be notified when a group is selected for a FieldTrial.
scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
+ // Allocator in shared memory containing field trial data. Used in both
+ // browser and child processes, but readonly in the child.
+ // In the future, we may want to move this to a more generic place if we want
+ // to start passing more data other than field trials.
+ std::unique_ptr<SharedPersistentMemoryAllocator> field_trial_allocator_ =
+ nullptr;
+
+#if defined(OS_WIN)
+ // Readonly copy of the handle to the allocator. Needs to be a member variable
+ // because it's needed from both CopyFieldTrialStateToFlags() and
+ // AppendFieldTrialHandleIfNeeded().
+ HANDLE readonly_allocator_handle_ = nullptr;
+#endif
+
DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
};
diff --git a/chromium/base/metrics/field_trial_param_associator.cc b/chromium/base/metrics/field_trial_param_associator.cc
new file mode 100644
index 00000000000..50619ea2bc4
--- /dev/null
+++ b/chromium/base/metrics/field_trial_param_associator.cc
@@ -0,0 +1,55 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/field_trial_param_associator.h"
+
+#include "base/metrics/field_trial.h"
+
+namespace base {
+
+FieldTrialParamAssociator::FieldTrialParamAssociator() {}
+FieldTrialParamAssociator::~FieldTrialParamAssociator() {}
+
+// static
+FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
+ return Singleton<FieldTrialParamAssociator,
+ LeakySingletonTraits<FieldTrialParamAssociator>>::get();
+}
+
+bool FieldTrialParamAssociator::AssociateFieldTrialParams(
+ const std::string& trial_name,
+ const std::string& group_name,
+ const FieldTrialParams& params) {
+ if (FieldTrialList::IsTrialActive(trial_name))
+ return false;
+
+ AutoLock scoped_lock(lock_);
+ const FieldTrialKey key(trial_name, group_name);
+ if (ContainsKey(field_trial_params_, key))
+ return false;
+
+ field_trial_params_[key] = params;
+ return true;
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParams(
+ const std::string& trial_name,
+ FieldTrialParams* params) {
+ AutoLock scoped_lock(lock_);
+
+ const std::string group_name = FieldTrialList::FindFullName(trial_name);
+ const FieldTrialKey key(trial_name, group_name);
+ if (!ContainsKey(field_trial_params_, key))
+ return false;
+
+ *params = field_trial_params_[key];
+ return true;
+}
+
+void FieldTrialParamAssociator::ClearAllParamsForTesting() {
+ AutoLock scoped_lock(lock_);
+ field_trial_params_.clear();
+}
+
+} // namespace base
diff --git a/chromium/base/metrics/field_trial_param_associator.h b/chromium/base/metrics/field_trial_param_associator.h
new file mode 100644
index 00000000000..214e146966e
--- /dev/null
+++ b/chromium/base/metrics/field_trial_param_associator.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
+
+#include <map>
+#include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/memory/singleton.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// Keeps track of the parameters of all field trials and ensures access to them
+// is thread-safe.
+class BASE_EXPORT FieldTrialParamAssociator {
+ public:
+ FieldTrialParamAssociator();
+ ~FieldTrialParamAssociator();
+
+ // Key-value mapping type for field trial parameters.
+ typedef std::map<std::string, std::string> FieldTrialParams;
+
+ // Retrieve the singleton.
+ static FieldTrialParamAssociator* GetInstance();
+
+ // Sets parameters for the given field trial name and group.
+ bool AssociateFieldTrialParams(const std::string& trial_name,
+ const std::string& group_name,
+ const FieldTrialParams& params);
+
+ // Gets the parameters for a field trial and its chosen group.
+ bool GetFieldTrialParams(const std::string& trial_name,
+ FieldTrialParams* params);
+
+ // Clears the internal field_trial_params_ mapping.
+ void ClearAllParamsForTesting();
+
+ private:
+ friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
+
+ // (field_trial_name, field_trial_group)
+ typedef std::pair<std::string, std::string> FieldTrialKey;
+
+ Lock lock_;
+ std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
+
+ DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
+};
+
+} // namespace base
+
+#endif // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index caa00e814cb..0ad13a22be7 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -6,14 +6,18 @@
#include <stddef.h>
+#include "base/base_switches.h"
#include "base/build_time.h"
+#include "base/feature_list.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
#include "base/rand_util.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/test/gtest_util.h"
+#include "base/test/mock_entropy_provider.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -1132,4 +1136,62 @@ TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
"");
}
+TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
+ base::FieldTrialList field_trial_list(
+ base::MakeUnique<base::MockEntropyProvider>());
+ base::FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+ base::FilePath test_file_path = base::FilePath(FILE_PATH_LITERAL("Program"));
+ base::CommandLine cmd_line = base::CommandLine(test_file_path);
+ const char field_trial_handle[] = "test-field-trial-handle";
+
+ base::FieldTrialList::CopyFieldTrialStateToFlags(field_trial_handle,
+ &cmd_line);
+#if defined(OS_WIN)
+ EXPECT_TRUE(cmd_line.HasSwitch(field_trial_handle) ||
+ cmd_line.HasSwitch(switches::kForceFieldTrials));
+#else
+ EXPECT_TRUE(cmd_line.HasSwitch(switches::kForceFieldTrials));
+#endif
+}
+
+TEST(FieldTrialListTest, InstantiateAllocator) {
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ void* memory = field_trial_list.field_trial_allocator_->shared_memory();
+ size_t used = field_trial_list.field_trial_allocator_->used();
+
+ // Ensure that the function is idempotent.
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ void* new_memory = field_trial_list.field_trial_allocator_->shared_memory();
+ size_t new_used = field_trial_list.field_trial_allocator_->used();
+ EXPECT_EQ(memory, new_memory);
+ EXPECT_EQ(used, new_used);
+}
+
+TEST(FieldTrialListTest, AddTrialsToAllocator) {
+ std::string save_string;
+ base::SharedMemoryHandle handle;
+
+ // Scoping the first FieldTrialList, as we need another one to test that it
+ // matches.
+ {
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial("Trial1", "Group1");
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+ FieldTrialList::AllStatesToString(&save_string);
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ shm.get()->Map(4 << 10); // Hardcoded, equal to kFieldTrialAllocationSize.
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ EXPECT_EQ(save_string, check_string);
+}
+
} // namespace base
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index 2283a4d80fd..0f059457aaa 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -92,7 +92,6 @@ class BooleanHistogram;
class CustomHistogram;
class Histogram;
class LinearHistogram;
-class PersistentMemoryAllocator;
class Pickle;
class PickleIterator;
class SampleVector;
diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h
index 9f836073de1..4f5ba049bc6 100644
--- a/chromium/base/metrics/histogram_base.h
+++ b/chromium/base/metrics/histogram_base.h
@@ -21,7 +21,6 @@
namespace base {
-class BucketRanges;
class DictionaryValue;
class HistogramBase;
class HistogramSamples;
diff --git a/chromium/base/metrics/histogram_functions.cc b/chromium/base/metrics/histogram_functions.cc
new file mode 100644
index 00000000000..67dd2f8c6d1
--- /dev/null
+++ b/chromium/base/metrics/histogram_functions.cc
@@ -0,0 +1,99 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_functions.h"
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/time/time.h"
+
+namespace base {
+
+void UmaHistogramBoolean(const std::string& name, bool sample) {
+ HistogramBase* histogram = BooleanHistogram::FactoryGet(
+ name, base::HistogramBase::kUmaTargetedHistogramFlag);
+ histogram->Add(sample);
+}
+
+void UmaHistogramExactLinear(const std::string& name,
+ int sample,
+ int value_max) {
+ HistogramBase* histogram =
+ LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
+ HistogramBase::kUmaTargetedHistogramFlag);
+ histogram->Add(sample);
+}
+
+void UmaHistogramPercentage(const std::string& name, int percent) {
+ UmaHistogramExactLinear(name, percent, 100);
+}
+
+void UmaHistogramCustomCounts(const std::string& name,
+ int sample,
+ int min,
+ int max,
+ int buckets) {
+ HistogramBase* histogram = Histogram::FactoryGet(
+ name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+ histogram->Add(sample);
+}
+
+void UmaHistogramCounts100(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 100, 50);
+}
+
+void UmaHistogramCounts1000(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
+}
+
+void UmaHistogramCounts10000(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
+}
+
+void UmaHistogramCounts100000(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
+}
+
+void UmaHistogramCounts1M(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
+}
+
+void UmaHistogramCounts10M(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
+}
+
+void UmaHistogramCustomTimes(const std::string& name,
+ TimeDelta sample,
+ TimeDelta min,
+ TimeDelta max,
+ int buckets) {
+ HistogramBase* histogram = Histogram::FactoryTimeGet(
+ name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
+ histogram->AddTime(sample);
+}
+
+void UmaHistogramTimes(const std::string& name, TimeDelta sample) {
+ UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromSeconds(10), 50);
+}
+
+void UmaHistogramMediumTimes(const std::string& name, TimeDelta sample) {
+ UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromMinutes(3), 50);
+}
+
+void UmaHistogramLongTimes(const std::string& name, TimeDelta sample) {
+ UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
+ TimeDelta::FromHours(1), 50);
+}
+
+void UmaHistogramMemoryKB(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
+}
+
+void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
+ UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
+}
+
+} // namespace base
diff --git a/chromium/base/metrics/histogram_functions.h b/chromium/base/metrics/histogram_functions.h
new file mode 100644
index 00000000000..a18c464aadb
--- /dev/null
+++ b/chromium/base/metrics/histogram_functions.h
@@ -0,0 +1,100 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
+#define BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/time/time.h"
+
+// Functions for recording metrics.
+//
+// For best practices on deciding when to emit to a histogram and what form
+// the histogram should take, see
+// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
+
+// Functions for recording UMA histograms. These can be used for cases
+// when the histogram name is generated at runtime. The functionality is
+// equivalent to macros defined in histogram_macros.h but allowing non-constant
+// histogram names. These functions are slower compared to their macro
+// equivalent because the histogram objects are not cached between calls.
+// So, these shouldn't be used in performance critical code.
+namespace base {
+
+// For histograms with linear buckets.
+// Used for capturing integer data with a linear bucketing scheme. This can be
+// used when you want the exact value of some small numeric count, with a max of
+// 100 or less. If you need to capture a range of greater than 100, we recommend
+// the use of the COUNT histograms below.
+// Sample usage:
+// base::UmaHistogramExactLinear("Histogram.Linear", some_value, 10);
+BASE_EXPORT void UmaHistogramExactLinear(const std::string& name,
+ int sample,
+ int value_max);
+
+// For adding sample to enum histogram.
+// Sample usage:
+// base::UmaHistogramEnumeration("My.Enumeration", VALUE, EVENT_MAX_VALUE);
+// Note that new Enum values can be added, but existing enums must never be
+// renumbered or deleted and reused.
+template <typename T>
+void UmaHistogramEnumeration(const std::string& name, T sample, T max) {
+ static_assert(std::is_enum<T>::value,
+ "Non enum passed to UmaHistogramEnumeration");
+ return UmaHistogramExactLinear(name, static_cast<int>(sample), max);
+}
+
+// For adding boolean sample to histogram.
+// Sample usage:
+// base::UmaHistogramBoolean("My.Boolean", true)
+BASE_EXPORT void UmaHistogramBoolean(const std::string& name, bool sample);
+
+// For adding histogram with percent.
+// Percents are integer between 1 and 100.
+// Sample usage:
+// base::UmaHistogramPercentage("My.Percent", 69)
+BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
+
+// For adding counts histogram.
+// Sample usage:
+// base::UmaHistogramCounts("My.Counts", some_value, 1, 600, 30)
+BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
+ int sample,
+ int min,
+ int max,
+ int buckets);
+
+// Counts specialization for maximum counts 100, 1000, 10k, 100k, 1M and 10M.
+BASE_EXPORT void UmaHistogramCounts100(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts100000(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts1M(const std::string& name, int sample);
+BASE_EXPORT void UmaHistogramCounts10M(const std::string& name, int sample);
+
+// For histograms storing times.
+BASE_EXPORT void UmaHistogramCustomTimes(const std::string& name,
+ TimeDelta sample,
+ TimeDelta min,
+ TimeDelta max,
+ int buckets);
+// For short timings from 1 ms up to 10 seconds (50 buckets).
+BASE_EXPORT void UmaHistogramTimes(const std::string& name, TimeDelta sample);
+// For medium timings up to 3 minutes (50 buckets).
+BASE_EXPORT void UmaHistogramMediumTimes(const std::string& name,
+ TimeDelta sample);
+// For time intervals up to 1 hr (50 buckets).
+BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
+ TimeDelta sample);
+
+// For recording memory related histograms.
+// Used to measure common KB-granularity memory stats. Range is up to 500M.
+BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
+// Used to measure common MB-granularity memory stats. Range is up to ~64G.
+BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
+
+} // namespace base
+
+#endif // BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
diff --git a/chromium/base/metrics/histogram_functions_unittest.cc b/chromium/base/metrics/histogram_functions_unittest.cc
new file mode 100644
index 00000000000..77a584bb12c
--- /dev/null
+++ b/chromium/base/metrics/histogram_functions_unittest.cc
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_functions.h"
+
+#include "base/metrics/histogram_macros.h"
+#include "base/test/histogram_tester.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+enum UmaHistogramTestingEnum {
+ UMA_HISTOGRAM_TESTING_ENUM_FIRST,
+ UMA_HISTOGRAM_TESTING_ENUM_SECOND,
+ UMA_HISTOGRAM_TESTING_ENUM_THIRD
+};
+
+TEST(HistogramFunctionsTest, HistogramExactLinear) {
+ std::string histogram("Testing.UMA.HistogramExactLinear");
+ HistogramTester tester;
+ UmaHistogramExactLinear(histogram, 10, 100);
+ tester.ExpectUniqueSample(histogram, 10, 1);
+ UmaHistogramExactLinear(histogram, 20, 100);
+ UmaHistogramExactLinear(histogram, 10, 100);
+ tester.ExpectBucketCount(histogram, 10, 2);
+ tester.ExpectBucketCount(histogram, 20, 1);
+ tester.ExpectTotalCount(histogram, 3);
+ // Test linear buckets overflow.
+ UmaHistogramExactLinear(histogram, 200, 100);
+ tester.ExpectBucketCount(histogram, 101, 1);
+ tester.ExpectTotalCount(histogram, 4);
+ // Test linear buckets underflow.
+ UmaHistogramExactLinear(histogram, 0, 100);
+ tester.ExpectBucketCount(histogram, 0, 1);
+ tester.ExpectTotalCount(histogram, 5);
+}
+
+TEST(HistogramFunctionsTest, HistogramEnumeration) {
+ std::string histogram("Testing.UMA.HistogramEnumeration");
+ HistogramTester tester;
+ UmaHistogramEnumeration(histogram, UMA_HISTOGRAM_TESTING_ENUM_FIRST,
+ UMA_HISTOGRAM_TESTING_ENUM_THIRD);
+ tester.ExpectUniqueSample(histogram, UMA_HISTOGRAM_TESTING_ENUM_FIRST, 1);
+
+ // Verify the overflow & underflow bucket exists.
+ UMA_HISTOGRAM_ENUMERATION(
+ histogram, static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD) + 10,
+ UMA_HISTOGRAM_TESTING_ENUM_THIRD);
+ tester.ExpectBucketCount(
+ histogram, static_cast<int>(UMA_HISTOGRAM_TESTING_ENUM_THIRD) + 1, 1);
+ tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, HistogramBoolean) {
+ std::string histogram("Testing.UMA.HistogramBoolean");
+ HistogramTester tester;
+ UmaHistogramBoolean(histogram, true);
+ tester.ExpectUniqueSample(histogram, 1, 1);
+ UmaHistogramBoolean(histogram, false);
+ tester.ExpectBucketCount(histogram, 0, 1);
+ tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, HistogramPercentage) {
+ std::string histogram("Testing.UMA.HistogramPercentage");
+ HistogramTester tester;
+ UmaHistogramPercentage(histogram, 50);
+ tester.ExpectUniqueSample(histogram, 50, 1);
+ // Test overflows.
+ UmaHistogramPercentage(histogram, 110);
+ tester.ExpectBucketCount(histogram, 101, 1);
+ tester.ExpectTotalCount(histogram, 2);
+}
+
+TEST(HistogramFunctionsTest, HistogramCounts) {
+ std::string histogram("Testing.UMA.HistogramCount.Custom");
+ HistogramTester tester;
+ UmaHistogramCustomCounts(histogram, 10, 1, 100, 10);
+ tester.ExpectUniqueSample(histogram, 10, 1);
+ UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+ UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+ UmaHistogramCustomCounts(histogram, 20, 1, 100, 10);
+ tester.ExpectBucketCount(histogram, 20, 3);
+ tester.ExpectTotalCount(histogram, 4);
+ UmaHistogramCustomCounts(histogram, 110, 1, 100, 10);
+ tester.ExpectBucketCount(histogram, 101, 1);
+ tester.ExpectTotalCount(histogram, 5);
+}
+
+TEST(HistogramFunctionsTest, HistogramTimes) {
+ std::string histogram("Testing.UMA.HistogramTimes");
+ HistogramTester tester;
+ UmaHistogramTimes(histogram, TimeDelta::FromSeconds(1));
+ tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(1), 1);
+ tester.ExpectTotalCount(histogram, 1);
+ UmaHistogramTimes(histogram, TimeDelta::FromSeconds(9));
+ tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(9), 1);
+ tester.ExpectTotalCount(histogram, 2);
+ UmaHistogramTimes(histogram, TimeDelta::FromSeconds(10)); // Overflows
+ tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(10), 1);
+ UmaHistogramTimes(histogram, TimeDelta::FromSeconds(20)); // Overflows.
+ // Check the value by picking any overflow time.
+ tester.ExpectTimeBucketCount(histogram, TimeDelta::FromSeconds(11), 2);
+ tester.ExpectTotalCount(histogram, 4);
+}
+
+} // namespace base.
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index ade89591fcf..9e3caec3a34 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -10,16 +10,12 @@
#include "base/metrics/histogram_macros_local.h"
#include "base/time/time.h"
+
// Macros for efficient use of histograms.
//
// For best practices on deciding when to emit to a histogram and what form
// the histogram should take, see
// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
-//
-// TODO(nikunjb): Move sparse macros to this file.
-//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
-// different #include dependencies.
// TODO(rkaplow): Link to proper documentation on metric creation once we have
// it in a good state.
@@ -71,8 +67,8 @@
// Sample usage:
// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
-#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
- UMA_HISTOGRAM_ENUMERATION(name, sample, value_max+1)
+#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
+ UMA_HISTOGRAM_ENUMERATION(name, sample, value_max)
// Used for capturing basic percentages. This will be 100 buckets of size 1.
@@ -243,6 +239,21 @@
name, sample, enum_max, \
base::HistogramBase::kUmaStabilityHistogramFlag)
+//------------------------------------------------------------------------------
+// Sparse histograms.
+
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and/or
+// infrequently recorded values since the implementation is slower
+// and takes more memory.
+//
+// For instance, Sqlite.Version.* are sparse because for any given database,
+// there's going to be exactly one version logged.
+// The |sample| can be a negative or non-negative number.
+#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+ INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)
//------------------------------------------------------------------------------
// Deprecated histogram macros. Not recommended for current use.
diff --git a/chromium/base/metrics/histogram_macros_internal.h b/chromium/base/metrics/histogram_macros_internal.h
index 8181ead9adf..2deb9284a17 100644
--- a/chromium/base/metrics/histogram_macros_internal.h
+++ b/chromium/base/metrics/histogram_macros_internal.h
@@ -8,6 +8,7 @@
#include "base/atomicops.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
+#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
// This is for macros internal to base/metrics. They should not be used outside
@@ -99,10 +100,17 @@
// values >= boundary_value so that mistakes in calling the UMA enumeration
// macros can be detected.
#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
+ do { \
+ static_assert( \
+ !std::is_enum<decltype(sample)>::value || \
+ !std::is_enum<decltype(boundary)>::value || \
+ std::is_same<std::remove_const<decltype(sample)>::type, \
+ std::remove_const<decltype(boundary)>::type>::value, \
+ "|sample| and |boundary| shouldn't be of different enums"); \
STATIC_HISTOGRAM_POINTER_BLOCK( \
- name, Add(sample), \
- base::LinearHistogram::FactoryGet( \
- name, 1, boundary, boundary + 1, flag))
+ name, Add(sample), base::LinearHistogram::FactoryGet( \
+ name, 1, boundary, boundary + 1, flag)); \
+ } while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
// This is necessary to expand __COUNTER__ to an actual value.
@@ -126,4 +134,16 @@
base::TimeTicks constructed_; \
} scoped_histogram_timer_##key
+// Macro for sparse histogram.
+// The implementation is more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+#define INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
+ do { \
+ base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
+ name, base::HistogramBase::kUmaTargetedHistogramFlag); \
+ histogram->Add(sample); \
+ } while (0)
+
#endif // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/chromium/base/metrics/histogram_unittest.nc b/chromium/base/metrics/histogram_unittest.nc
new file mode 100644
index 00000000000..0dfe1af050a
--- /dev/null
+++ b/chromium/base/metrics/histogram_unittest.nc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This is a "No Compile Test" suite.
+// http://dev.chromium.org/developers/testing/no-compile-tests
+
+#include "base/metrics/histogram_macros.h"
+
+namespace base {
+
+#if defined(NCTEST_DIFFERENT_ENUM) // [r"\|sample\| and \|boundary\| shouldn't be of different enums"]
+
+void WontCompile() {
+ enum TypeA { A };
+ enum TypeB { B };
+ UMA_HISTOGRAM_ENUMERATION("", A, B);
+}
+
+#endif
+
+} // namespace base
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index ae1af09dca8..dab2564b10e 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -495,14 +495,9 @@ PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
if (!initialized) {
initialized = true;
if (g_allocator) {
-// Don't log in release-with-asserts builds, otherwise the test_installer step
-// fails because this code writes to a log file before the installer code had a
-// chance to set the log file's location.
-#if !defined(DCHECK_ALWAYS_ON)
- DLOG(WARNING) << "Creating the results-histogram inside persistent"
- << " memory can cause future allocations to crash if"
- << " that memory is ever released (for testing).";
-#endif
+ DVLOG(1) << "Creating the results-histogram inside persistent"
+ << " memory can cause future allocations to crash if"
+ << " that memory is ever released (for testing).";
}
histogram_pointer = LinearHistogram::FactoryGet(
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index 1ced35f3392..4c36e35c2f4 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -19,6 +19,7 @@
namespace base {
+class BucketRanges;
class FilePath;
class PersistentSampleMapRecords;
class PersistentSparseHistogramDataManager;
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index fd8a73d0ad2..2b3bf17ede2 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -133,7 +133,19 @@ PersistentMemoryAllocator::Iterator::Iterator(
PersistentMemoryAllocator::Iterator::Iterator(
const PersistentMemoryAllocator* allocator,
Reference starting_after)
- : allocator_(allocator), last_record_(starting_after), record_count_(0) {
+ : allocator_(allocator), last_record_(0), record_count_(0) {
+ Reset(starting_after);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset() {
+ last_record_.store(kReferenceQueue, std::memory_order_relaxed);
+ record_count_.store(0, std::memory_order_relaxed);
+}
+
+void PersistentMemoryAllocator::Iterator::Reset(Reference starting_after) {
+ last_record_.store(starting_after, std::memory_order_relaxed);
+ record_count_.store(0, std::memory_order_relaxed);
+
// Ensure that the starting point is a valid, iterable block (meaning it can
// be read and has a non-zero "next" pointer).
const volatile BlockHeader* block =
@@ -145,6 +157,14 @@ PersistentMemoryAllocator::Iterator::Iterator(
}
PersistentMemoryAllocator::Reference
+PersistentMemoryAllocator::Iterator::GetLast() {
+ Reference last = last_record_.load(std::memory_order_relaxed);
+ if (last == kReferenceQueue)
+ return kReferenceNull;
+ return last;
+}
+
+PersistentMemoryAllocator::Reference
PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// Make a copy of the existing count of found-records, acquiring all changes
// made to the allocator, notably "freeptr" (see comment in loop for why
diff --git a/chromium/base/metrics/persistent_memory_allocator.h b/chromium/base/metrics/persistent_memory_allocator.h
index 7004afecc02..ae5d2d7caf7 100644
--- a/chromium/base/metrics/persistent_memory_allocator.h
+++ b/chromium/base/metrics/persistent_memory_allocator.h
@@ -56,6 +56,11 @@ class BASE_EXPORT PersistentMemoryAllocator {
// That means that multiple threads can share an iterator and the same
// reference will not be returned twice.
//
+ // The order of the items returned by an iterator matches the order in which
+ // MakeIterable() was called on them. Once an allocation is made iterable,
+ // it is always such so the only possible difference between successive
+ // iterations is for more to be added to the end.
+ //
// Iteration, in general, is tolerant of corrupted memory. It will return
// what it can and stop only when corruption forces it to. Bad corruption
// could cause the same object to be returned many times but it will
@@ -76,6 +81,17 @@ class BASE_EXPORT PersistentMemoryAllocator {
Iterator(const PersistentMemoryAllocator* allocator,
Reference starting_after);
+ // Resets the iterator back to the beginning.
+ void Reset();
+
+ // Resets the iterator, resuming from the |starting_after| reference.
+ void Reset(Reference starting_after);
+
+ // Returns the previously retrieved reference, or kReferenceNull if none.
+ // If constructor or reset with a starting_after location, this will return
+ // that value.
+ Reference GetLast();
+
// Gets the next iterable, storing that type in |type_return|. The actual
// return value is a reference to the allocation inside the allocator or
// zero if there are no more. GetNext() may still be called again at a
@@ -208,6 +224,19 @@ class BASE_EXPORT PersistentMemoryAllocator {
// TIME before accessing it or risk crashing! Once dereferenced, the pointer
// is safe to reuse forever.
//
+ // IMPORTANT: If there is any possibility that this allocator will be shared
+ // across different CPU architectures (perhaps because it is being persisted
+ // to disk), then it is essential that the object be of a fixed size. All
+ // fields must be of a defined type that does not change across CPU architec-
+ // tures or natural word sizes (i.e. 32/64 bit). Acceptable are char and
+ // (u)intXX_t. Unacceptable are int, bool, or wchar_t which are implemen-
+ // tation defined with regards to their size.
+ //
+ // ALSO: Alignment must be consistent. A uint64_t after a uint32_t will pad
+ // differently between 32 and 64 bit architectures. Either put the bigger
+ // elements first, group smaller elements into blocks the size of larger
+ // elements, or manually insert padding fields as appropriate.
+ //
// NOTE: Though this method will guarantee that an object of the specified
// type can be accessed without going outside the bounds of the memory
// segment, it makes no guarantees of the validity of the data within the
diff --git a/chromium/base/metrics/persistent_memory_allocator_unittest.cc b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
index 6039d3e2512..977d85fd430 100644
--- a/chromium/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
@@ -121,12 +121,15 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Ensure that the test-object can be made iterable.
PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
+ EXPECT_EQ(0U, iter1a.GetLast());
uint32_t type;
EXPECT_EQ(0U, iter1a.GetNext(&type));
allocator_->MakeIterable(block1);
EXPECT_EQ(block1, iter1a.GetNext(&type));
EXPECT_EQ(1U, type);
+ EXPECT_EQ(block1, iter1a.GetLast());
EXPECT_EQ(0U, iter1a.GetNext(&type));
+ EXPECT_EQ(block1, iter1a.GetLast());
// Create second test-object and ensure everything is good and it cannot
// be confused with test-object of another type.
@@ -146,6 +149,24 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
allocator_->MakeIterable(block2);
EXPECT_EQ(block2, iter1a.GetNext(&type));
EXPECT_EQ(2U, type);
+ EXPECT_EQ(block2, iter1a.GetLast());
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
+
+ // Check that the iterator can be reset to the beginning.
+ iter1a.Reset();
+ EXPECT_EQ(0U, iter1a.GetLast());
+ EXPECT_EQ(block1, iter1a.GetNext(&type));
+ EXPECT_EQ(block1, iter1a.GetLast());
+ EXPECT_EQ(block2, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
+ EXPECT_EQ(0U, iter1a.GetNext(&type));
+
+ // Check that the iterator can be reset to an arbitrary location.
+ iter1a.Reset(block1);
+ EXPECT_EQ(block1, iter1a.GetLast());
+ EXPECT_EQ(block2, iter1a.GetNext(&type));
+ EXPECT_EQ(block2, iter1a.GetLast());
EXPECT_EQ(0U, iter1a.GetNext(&type));
// Check that iteration can begin after an arbitrary location.
diff --git a/chromium/base/metrics/persistent_sample_map.h b/chromium/base/metrics/persistent_sample_map.h
index 3c175db5420..853f8621821 100644
--- a/chromium/base/metrics/persistent_sample_map.h
+++ b/chromium/base/metrics/persistent_sample_map.h
@@ -24,7 +24,6 @@ namespace base {
class PersistentHistogramAllocator;
class PersistentSampleMapRecords;
-class PersistentSparseHistogramDataManager;
// The logic here is similar to that of SampleMap but with different data
// structures. Changes here likely need to be duplicated there.
diff --git a/chromium/base/metrics/sparse_histogram.h b/chromium/base/metrics/sparse_histogram.h
index 3b302d6f223..97709ba18f0 100644
--- a/chromium/base/metrics/sparse_histogram.h
+++ b/chromium/base/metrics/sparse_histogram.h
@@ -13,45 +13,17 @@
#include <string>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
-#include "base/metrics/sample_map.h"
+#include "base/metrics/histogram_samples.h"
#include "base/synchronization/lock.h"
namespace base {
-// Sparse histograms are well suited for recording counts of exact sample values
-// that are sparsely distributed over a large range.
-//
-// The implementation uses a lock and a map, whereas other histogram types use a
-// vector and no lock. It is thus more costly to add values to, and each value
-// stored has more overhead, compared to the other histogram types. However it
-// may be more efficient in memory if the total number of sample values is small
-// compared to the range of their values.
-//
-// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
-// enumerations that are (nearly) contiguous. Also for code that is expected to
-// run often or in a tight loop.
-//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
-// infrequently recorded values.
-//
-// For instance, Sqlite.Version.* are SPARSE because for any given database,
-// there's going to be exactly one version logged, meaning no gain to having a
-// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
-// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
-// errors and there are large gaps in the set of possible errors.
-#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
- do { \
- base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
- name, base::HistogramBase::kUmaTargetedHistogramFlag); \
- histogram->Add(sample); \
- } while (0)
-
class HistogramSamples;
class PersistentHistogramAllocator;
+class Pickle;
+class PickleIterator;
class BASE_EXPORT SparseHistogram : public HistogramBase {
public:
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index eab77902765..f4a7c9495ed 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -8,6 +8,7 @@
#include <string>
#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_memory_allocator.h"
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index c3c6aceffd7..b4dae877099 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -26,8 +26,6 @@
#include "base/metrics/histogram_base.h"
#include "base/strings/string_piece.h"
-class SubprocessMetricsProviderTest;
-
namespace base {
class BucketRanges;
diff --git a/chromium/base/numerics/safe_conversions.h b/chromium/base/numerics/safe_conversions.h
index 6b558afde40..4f45caf9c52 100644
--- a/chromium/base/numerics/safe_conversions.h
+++ b/chromium/base/numerics/safe_conversions.h
@@ -41,28 +41,33 @@ constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
return false;
}
-// checked_cast<> is analogous to static_cast<> for numeric types,
-// except that it CHECKs that the specified numeric conversion will not
-// overflow or underflow. NaN source will always trigger a CHECK.
-template <typename Dst, typename Src>
-inline Dst checked_cast(Src value) {
- CHECK(IsValueInRangeForNumericType<Dst>(value));
- return static_cast<Dst>(value);
-}
-
-// HandleNaN will cause this class to CHECK(false).
-struct SaturatedCastNaNBehaviorCheck {
+// Just fires a CHECK(false). Used for numeric boundary errors.
+struct CheckOnFailure {
template <typename T>
- static T HandleNaN() {
+ static T HandleFailure() {
CHECK(false);
return T();
}
};
+// checked_cast<> is analogous to static_cast<> for numeric types,
+// except that it CHECKs that the specified numeric conversion will not
+// overflow or underflow. NaN source will always trigger a CHECK.
+template <typename Dst,
+ class CheckHandler = CheckOnFailure,
+ typename Src>
+constexpr Dst checked_cast(Src value) {
+ // This throws a compile-time error on evaluating the constexpr if it can be
+ // determined at compile-time as failing, otherwise it will CHECK at runtime.
+ return IsValueInRangeForNumericType<Dst>(value)
+ ? static_cast<Dst>(value)
+ : CheckHandler::template HandleFailure<Dst>();
+}
+
// HandleNaN will return 0 in this case.
struct SaturatedCastNaNBehaviorReturnZero {
template <typename T>
- static constexpr T HandleNaN() {
+ static constexpr T HandleFailure() {
return T();
}
};
@@ -80,7 +85,7 @@ constexpr Dst saturated_cast_impl(const Src value,
: (constraint == RANGE_OVERFLOW
? std::numeric_limits<Dst>::max()
: (constraint == RANGE_INVALID
- ? NaNHandler::template HandleNaN<Dst>()
+ ? NaNHandler::template HandleFailure<Dst>()
: (NOTREACHED(), static_cast<Dst>(value)))));
}
} // namespace internal
diff --git a/chromium/base/numerics/safe_math.h b/chromium/base/numerics/safe_math.h
index d0003b79db0..511eb23f787 100644
--- a/chromium/base/numerics/safe_math.h
+++ b/chromium/base/numerics/safe_math.h
@@ -59,11 +59,10 @@ class CheckedNumeric {
// Copy constructor.
template <typename Src>
CheckedNumeric(const CheckedNumeric<Src>& rhs)
- : state_(rhs.ValueUnsafe(), rhs.validity()) {}
+ : state_(rhs.ValueUnsafe(), rhs.IsValid()) {}
template <typename Src>
- CheckedNumeric(Src value, RangeConstraint validity)
- : state_(value, validity) {}
+ CheckedNumeric(Src value, bool is_valid) : state_(value, is_valid) {}
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to CheckedNumerics to make them easier to use.
@@ -82,7 +81,7 @@ class CheckedNumeric {
}
// IsValid() is the public API to test if a CheckedNumeric is currently valid.
- bool IsValid() const { return validity() == RANGE_VALID; }
+ bool IsValid() const { return state_.is_valid(); }
// ValueOrDie() The primary accessor for the underlying value. If the current
// state is not valid it will CHECK and crash.
@@ -106,51 +105,42 @@ class CheckedNumeric {
return CheckedNumeric<T>::cast(*this).ValueUnsafe();
}
- // validity() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now for
- // tests and to avoid a big matrix of friend operator overloads. But the
- // values it returns are likely to change in the future.
- // Returns: current validity state (i.e. valid, overflow, underflow, nan).
- // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
- // saturation/wrapping so we can expose this state consistently and implement
- // saturated arithmetic.
- RangeConstraint validity() const { return state_.validity(); }
-
// ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
// for tests and to avoid a big matrix of friend operator overloads. But the
- // values it returns are likely to change in the future.
+ // values it returns are unintuitive and likely to change in the future.
// Returns: the raw numeric value, regardless of the current state.
- // TODO(jschuh): crbug.com/332611 Figure out and implement semantics for
- // saturation/wrapping so we can expose this state consistently and implement
- // saturated arithmetic.
T ValueUnsafe() const { return state_.value(); }
// Prototypes for the supported arithmetic operator overloads.
- template <typename Src> CheckedNumeric& operator+=(Src rhs);
- template <typename Src> CheckedNumeric& operator-=(Src rhs);
- template <typename Src> CheckedNumeric& operator*=(Src rhs);
- template <typename Src> CheckedNumeric& operator/=(Src rhs);
- template <typename Src> CheckedNumeric& operator%=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator+=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator-=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator*=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator/=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator%=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator<<=(Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator>>=(Src rhs);
CheckedNumeric operator-() const {
- RangeConstraint validity;
- T value = CheckedNeg(state_.value(), &validity);
// Negation is always valid for floating point.
- if (std::numeric_limits<T>::is_iec559)
- return CheckedNumeric<T>(value);
-
- validity = GetRangeConstraint(state_.validity() | validity);
- return CheckedNumeric<T>(value, validity);
+ T value = 0;
+ bool is_valid = (std::numeric_limits<T>::is_iec559 || IsValid()) &&
+ CheckedNeg(state_.value(), &value);
+ return CheckedNumeric<T>(value, is_valid);
}
CheckedNumeric Abs() const {
- RangeConstraint validity;
- T value = CheckedAbs(state_.value(), &validity);
// Absolute value is always valid for floating point.
- if (std::numeric_limits<T>::is_iec559)
- return CheckedNumeric<T>(value);
-
- validity = GetRangeConstraint(state_.validity() | validity);
- return CheckedNumeric<T>(value, validity);
+ T value = 0;
+ bool is_valid = (std::numeric_limits<T>::is_iec559 || IsValid()) &&
+ CheckedAbs(state_.value(), &value);
+ return CheckedNumeric<T>(value, is_valid);
}
// This function is available only for integral types. It returns an unsigned
@@ -158,7 +148,7 @@ class CheckedNumeric {
// of the source, and properly handling signed min.
CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
- CheckedUnsignedAbs(state_.value()), state_.validity());
+ SafeUnsignedAbs(state_.value()), state_.is_valid());
}
CheckedNumeric& operator++() {
@@ -204,16 +194,6 @@ class CheckedNumeric {
static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
private:
- template <typename NumericType>
- struct UnderlyingType {
- using type = NumericType;
- };
-
- template <typename NumericType>
- struct UnderlyingType<CheckedNumeric<NumericType>> {
- using type = NumericType;
- };
-
CheckedNumericState<T> state_;
};
@@ -224,79 +204,53 @@ class CheckedNumeric {
// * We skip range checks for floating points.
// * We skip range checks for destination integers with sufficient range.
// TODO(jschuh): extract these out into templates.
-#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP, PROMOTION) \
/* Binary arithmetic operator for CheckedNumerics of the same type. */ \
- template <typename T> \
- CheckedNumeric<typename ArithmeticPromotion<T>::type> operator OP( \
- const CheckedNumeric<T>& lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T>::type Promotion; \
+ template <typename L, typename R> \
+ CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
+ operator OP(const CheckedNumeric<L>& lhs, const CheckedNumeric<R>& rhs) { \
+ using P = typename ArithmeticPromotion<PROMOTION, L, R>::type; \
+ if (!rhs.IsValid() || !lhs.IsValid()) \
+ return CheckedNumeric<P>(0, false); \
/* Floating point always takes the fast path */ \
- if (std::numeric_limits<T>::is_iec559) \
- return CheckedNumeric<T>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
- if (IsIntegerArithmeticSafe<Promotion, T, T>::value) \
- return CheckedNumeric<Promotion>( \
- lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
- GetRangeConstraint(rhs.validity() | lhs.validity())); \
- RangeConstraint validity = RANGE_VALID; \
- T result = static_cast<T>( \
- Checked##NAME(static_cast<Promotion>(lhs.ValueUnsafe()), \
- static_cast<Promotion>(rhs.ValueUnsafe()), &validity)); \
- return CheckedNumeric<Promotion>( \
- result, \
- GetRangeConstraint(validity | lhs.validity() | rhs.validity())); \
+ if (std::is_floating_point<L>::value || std::is_floating_point<R>::value) \
+ return CheckedNumeric<P>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
+ P result = 0; \
+ bool is_valid = \
+ Checked##NAME(lhs.ValueUnsafe(), rhs.ValueUnsafe(), &result); \
+ return CheckedNumeric<P>(result, is_valid); \
} \
/* Assignment arithmetic operator implementation from CheckedNumeric. */ \
- template <typename T> \
- template <typename Src> \
- CheckedNumeric<T>& CheckedNumeric<T>::operator COMPOUND_OP(Src rhs) { \
- *this = CheckedNumeric<T>::cast(*this) \
- OP CheckedNumeric<typename UnderlyingType<Src>::type>::cast(rhs); \
+ template <typename L> \
+ template <typename R> \
+ CheckedNumeric<L>& CheckedNumeric<L>::operator COMPOUND_OP(R rhs) { \
+ *this = *this OP rhs; \
return *this; \
} \
- /* Binary arithmetic operator for CheckedNumeric of different type. */ \
- template <typename T, typename Src> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- const CheckedNumeric<Src>& lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>( \
- lhs.ValueUnsafe() OP rhs.ValueUnsafe(), \
- GetRangeConstraint(rhs.validity() | lhs.validity())); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
- } \
/* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
- template <typename T, typename Src, \
- typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
+ template <typename L, typename R, \
+ typename std::enable_if<std::is_arithmetic<R>::value>::type* = \
nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- const CheckedNumeric<T>& lhs, Src rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>(lhs.ValueUnsafe() OP rhs, \
- lhs.validity()); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
+ CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
+ operator OP(const CheckedNumeric<L>& lhs, R rhs) { \
+ return lhs OP CheckedNumeric<R>(rhs); \
} \
/* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
- template <typename T, typename Src, \
- typename std::enable_if<std::is_arithmetic<Src>::value>::type* = \
+ template <typename L, typename R, \
+ typename std::enable_if<std::is_arithmetic<L>::value>::type* = \
nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<T, Src>::type> operator OP( \
- Src lhs, const CheckedNumeric<T>& rhs) { \
- typedef typename ArithmeticPromotion<T, Src>::type Promotion; \
- if (IsIntegerArithmeticSafe<Promotion, T, Src>::value) \
- return CheckedNumeric<Promotion>(lhs OP rhs.ValueUnsafe(), \
- rhs.validity()); \
- return CheckedNumeric<Promotion>::cast(lhs) \
- OP CheckedNumeric<Promotion>::cast(rhs); \
+ CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
+ operator OP(L lhs, const CheckedNumeric<R>& rhs) { \
+ return CheckedNumeric<L>(lhs) OP rhs; \
}
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, += )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /= )
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %= )
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=, MAX_EXPONENT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=, MAX_EXPONENT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=, MAX_EXPONENT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=, MAX_EXPONENT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=, MAX_EXPONENT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(LeftShift, <<, <<=, LEFT_PROMOTION)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(RightShift, >>, >>=, LEFT_PROMOTION)
#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
diff --git a/chromium/base/numerics/safe_math_impl.h b/chromium/base/numerics/safe_math_impl.h
index 94ae89494d4..71fc278b297 100644
--- a/chromium/base/numerics/safe_math_impl.h
+++ b/chromium/base/numerics/safe_math_impl.h
@@ -95,7 +95,7 @@ struct PositionOfSignBit {
// This is used for UnsignedAbs, where we need to support floating-point
// template instantiations even though we don't actually support the operations.
-// However, there is no corresponding implementation of e.g. CheckedUnsignedAbs,
+// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
// so the float versions will not compile.
template <typename Numeric,
bool IsInteger = std::numeric_limits<Numeric>::is_integer,
@@ -127,56 +127,222 @@ constexpr T BinaryComplement(T x) {
return static_cast<T>(~x);
}
+// Return if a numeric value is negative regardless of type.
+template <typename T,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsNegative(T x) {
+ return x < 0;
+}
+
+template <typename T,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ !std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsNegative(T x) {
+ return false;
+}
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION, // Use the type of the right-hand argument.
+ MAX_EXPONENT_PROMOTION, // Use the type supporting the largest exponent.
+ BIG_ENOUGH_PROMOTION // Attempt to find a big enough type.
+};
+
+template <ArithmeticPromotionCategory Promotion,
+ typename Lhs,
+ typename Rhs = Lhs>
+struct ArithmeticPromotion;
+
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+template <typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value &&
+ sizeof(typename MaxExponentPromotion<Lhs, Rhs>::type) ==
+ sizeof(intmax_t),
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type = typename IntegerForSizeAndSign<
+ sizeof(typename MaxExponentPromotion<Lhs, Rhs>::type) * 2,
+ std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// These are the four supported promotion types.
+
+// Use the type of the left-hand argument.
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<LEFT_PROMOTION, Lhs, Rhs> {
+ using type = Lhs;
+ static const bool is_contained = true;
+};
+
+// Use the type of the right-hand argument.
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<RIGHT_PROMOTION, Lhs, Rhs> {
+ using type = Rhs;
+ static const bool is_contained = true;
+};
+
+// Use the type supporting the largest exponent.
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<MAX_EXPONENT_PROMOTION, Lhs, Rhs> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// Attempt to find a big enough type.
+template <typename Lhs, typename Rhs>
+struct ArithmeticPromotion<BIG_ENOUGH_PROMOTION, Lhs, Rhs> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = BigEnoughPromotion<Lhs, Rhs>::is_contained;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value = !std::numeric_limits<T>::is_iec559 &&
+ StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Lhs)) &&
+ StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
+ NUMERIC_RANGE_CONTAINED &&
+ sizeof(T) >= (2 * sizeof(Rhs));
+};
+
// Here are the actual portable checked integer math implementations.
// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
// way to coalesce things into the CheckedNumericState specializations below.
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
-CheckedAdd(T x, T y, RangeConstraint* validity) {
+typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
+CheckedAddImpl(T x, T y, T* result) {
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
+ *result = static_cast<T>(uresult);
// Addition is valid if the sign of (x + y) is equal to either that of x or
// that of y.
- if (std::numeric_limits<T>::is_signed) {
- if (HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))) {
- *validity = RANGE_VALID;
- } else { // Direction of wrap is inverse of result sign.
- *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
- }
- } else { // Unsigned is either valid or overflow.
- *validity = BinaryComplement(x) >= y ? RANGE_VALID : RANGE_OVERFLOW;
+ return (std::numeric_limits<T>::is_signed)
+ ? HasSignBit(BinaryComplement(
+ static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))
+ : (BinaryComplement(x) >=
+ y); // Unsigned is either valid or underflow.
+}
+
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedAdd(T x, U y, V* result) {
+ using Promotion =
+ typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedAddImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
}
- return static_cast<T>(uresult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, T>::type
-CheckedSub(T x, T y, RangeConstraint* validity) {
+typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
+CheckedSubImpl(T x, T y, T* result) {
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
+ *result = static_cast<T>(uresult);
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
// the same sign.
- if (std::numeric_limits<T>::is_signed) {
- if (HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))) {
- *validity = RANGE_VALID;
- } else { // Direction of wrap is inverse of result sign.
- *validity = HasSignBit(uresult) ? RANGE_OVERFLOW : RANGE_UNDERFLOW;
- }
- } else { // Unsigned is either valid or underflow.
- *validity = x >= y ? RANGE_VALID : RANGE_UNDERFLOW;
+ return (std::numeric_limits<T>::is_signed)
+ ? HasSignBit(BinaryComplement(
+ static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))
+ : (x >= y);
+}
+
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedSub(T x, U y, V* result) {
+ using Promotion =
+ typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedSubImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
}
- return static_cast<T>(uresult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
// Integer multiplication is a bit complicated. In the fast case we just
@@ -186,140 +352,243 @@ CheckedSub(T x, T y, RangeConstraint* validity) {
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
sizeof(T) * 2 <= sizeof(uintmax_t),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
+ bool>::type
+CheckedMulImpl(T x, T y, T* result) {
typedef typename TwiceWiderInteger<T>::type IntermediateType;
IntermediateType tmp =
static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
- *validity = DstRangeRelationToSrcRange<T>(tmp);
- return static_cast<T>(tmp);
+ *result = static_cast<T>(tmp);
+ return DstRangeRelationToSrcRange<T>(tmp) == RANGE_VALID;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed &&
(sizeof(T) * 2 > sizeof(uintmax_t)),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
- // If either side is zero then the result will be zero.
- if (!x || !y) {
- *validity = RANGE_VALID;
- return static_cast<T>(0);
-
- } else if (x > 0) {
- if (y > 0)
- *validity =
- x <= std::numeric_limits<T>::max() / y ? RANGE_VALID : RANGE_OVERFLOW;
- else
- *validity = y >= std::numeric_limits<T>::min() / x ? RANGE_VALID
- : RANGE_UNDERFLOW;
-
- } else {
- if (y > 0)
- *validity = x >= std::numeric_limits<T>::min() / y ? RANGE_VALID
- : RANGE_UNDERFLOW;
- else
- *validity =
- y >= std::numeric_limits<T>::max() / x ? RANGE_VALID : RANGE_OVERFLOW;
+ bool>::type
+CheckedMulImpl(T x, T y, T* result) {
+ if (x && y) {
+ if (x > 0) {
+ if (y > 0) {
+ if (x > std::numeric_limits<T>::max() / y)
+ return false;
+ } else {
+ if (y < std::numeric_limits<T>::min() / x)
+ return false;
+ }
+ } else {
+ if (y > 0) {
+ if (x < std::numeric_limits<T>::min() / y)
+ return false;
+ } else {
+ if (y < std::numeric_limits<T>::max() / x)
+ return false;
+ }
+ }
}
-
- return static_cast<T>(*validity == RANGE_VALID ? x * y : 0);
+ *result = x * y;
+ return true;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed &&
(sizeof(T) * 2 > sizeof(uintmax_t)),
- T>::type
-CheckedMul(T x, T y, RangeConstraint* validity) {
- *validity = (y == 0 || x <= std::numeric_limits<T>::max() / y)
- ? RANGE_VALID
- : RANGE_OVERFLOW;
- return static_cast<T>(*validity == RANGE_VALID ? x * y : 0);
+ bool>::type
+CheckedMulImpl(T x, T y, T* result) {
+ *result = x * y;
+ return (y == 0 || x <= std::numeric_limits<T>::max() / y);
+}
+
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedMul(T x, U y, V* result) {
+ using Promotion =
+ typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedMulImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
-// Division just requires a check for an invalid negation on signed min/-1.
+// Division just requires a check for a zero denominator or an invalid negation
+// on signed min/-1.
template <typename T>
-T CheckedDiv(T x,
- T y,
- RangeConstraint* validity,
- typename std::enable_if<std::numeric_limits<T>::is_integer,
- int>::type = 0) {
- if (std::numeric_limits<T>::is_signed && x == std::numeric_limits<T>::min() &&
- y == static_cast<T>(-1)) {
- *validity = RANGE_OVERFLOW;
- return std::numeric_limits<T>::min();
+typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
+CheckedDivImpl(T x, T y, T* result) {
+ if (y && (!std::numeric_limits<T>::is_signed ||
+ x != std::numeric_limits<T>::min() || y != static_cast<T>(-1))) {
+ *result = x / y;
+ return true;
}
+ return false;
+}
- *validity = RANGE_VALID;
- return static_cast<T>(x / y);
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedDiv(T x, U y, V* result) {
+ using Promotion =
+ typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+ is_valid &= CheckedDivImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed,
- T>::type
-CheckedMod(T x, T y, RangeConstraint* validity) {
- *validity = y > 0 ? RANGE_VALID : RANGE_INVALID;
- return static_cast<T>(*validity == RANGE_VALID ? x % y: 0);
+ bool>::type
+CheckedModImpl(T x, T y, T* result) {
+ if (y > 0) {
+ *result = static_cast<T>(x % y);
+ return true;
+ }
+ return false;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed,
- T>::type
-CheckedMod(T x, T y, RangeConstraint* validity) {
- *validity = RANGE_VALID;
- return static_cast<T>(x % y);
+ bool>::type
+CheckedModImpl(T x, T y, T* result) {
+ if (y != 0) {
+ *result = static_cast<T>(x % y);
+ return true;
+ }
+ return false;
+}
+
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedMod(T x, U y, V* result) {
+ using Promotion =
+ typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
+ Promotion presult;
+ bool is_valid = CheckedModImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+}
+
+// Left shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Shifts of negative values
+// are undefined. Otherwise it is defined when the result fits.
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedLeftShift(T x, U shift, V* result) {
+ using ShiftType = typename UnsignedIntegerForSize<T>::type;
+ static const ShiftType kBitWidth = CHAR_BIT * sizeof(T);
+ const ShiftType real_shift = static_cast<ShiftType>(shift);
+ // Signed shift is not legal on negative values.
+ if (!IsNegative(x) && real_shift < kBitWidth) {
+ // Just use a multiplication because it's easy.
+ // TODO(jschuh): This could probably be made more efficient.
+ if (!std::is_signed<T>::value || real_shift != kBitWidth - 1)
+ return CheckedMul(x, static_cast<T>(1) << shift, result);
+ return !x; // Special case zero for a full width signed shift.
+ }
+ return false;
+}
+
+// Right shift. Shifts less than 0 or greater than or equal to the number
+// of bits in the promoted type are undefined. Otherwise, it is always defined,
+// but a right shift of a negative value is implementation-dependent.
+template <typename T, typename U, typename V>
+typename std::enable_if<std::numeric_limits<T>::is_integer &&
+ std::numeric_limits<U>::is_integer &&
+ std::numeric_limits<V>::is_integer,
+ bool>::type
+CheckedRightShift(T x, U shift, V* result) {
+ // Use the type conversion push negative values out of range.
+ using ShiftType = typename UnsignedIntegerForSize<T>::type;
+ if (static_cast<ShiftType>(shift) < (CHAR_BIT * sizeof(T))) {
+ T tmp = x >> shift;
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<unsigned>(tmp);
+ }
+ return false;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed,
- T>::type
-CheckedNeg(T value, RangeConstraint* validity) {
- *validity =
- value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
+ bool>::type
+CheckedNeg(T value, T* result) {
// The negation of signed min is min, so catch that one.
- return static_cast<T>(*validity == RANGE_VALID ? -value : 0);
+ if (value != std::numeric_limits<T>::min()) {
+ *result = static_cast<T>(-value);
+ return true;
+ }
+ return false;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed,
- T>::type
-CheckedNeg(T value, RangeConstraint* validity) {
- // The only legal unsigned negation is zero.
- *validity = value ? RANGE_UNDERFLOW : RANGE_VALID;
- return static_cast<T>(*validity == RANGE_VALID ?
- -static_cast<typename SignedIntegerForSize<T>::type>(value) : 0);
+ bool>::type
+CheckedNeg(T value, T* result) {
+ if (!value) { // The only legal unsigned negation is zero.
+ *result = static_cast<T>(0);
+ return true;
+ }
+ return false;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed,
- T>::type
-CheckedAbs(T value, RangeConstraint* validity) {
- *validity =
- value != std::numeric_limits<T>::min() ? RANGE_VALID : RANGE_OVERFLOW;
- return static_cast<T>(*validity == RANGE_VALID ? std::abs(value) : 0);
+ bool>::type
+CheckedAbs(T value, T* result) {
+ if (value != std::numeric_limits<T>::min()) {
+ *result = std::abs(value);
+ return true;
+ }
+ return false;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed,
- T>::type
-CheckedAbs(T value, RangeConstraint* validity) {
+ bool>::type
+CheckedAbs(T value, T* result) {
// T is unsigned, so |value| must already be positive.
- *validity = RANGE_VALID;
- return value;
+ *result = value;
+ return true;
}
template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
std::numeric_limits<T>::is_signed,
typename UnsignedIntegerForSize<T>::type>::type
-CheckedUnsignedAbs(T value) {
+SafeUnsignedAbs(T value) {
typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
return value == std::numeric_limits<T>::min()
? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
@@ -330,19 +599,36 @@ template <typename T>
typename std::enable_if<std::numeric_limits<T>::is_integer &&
!std::numeric_limits<T>::is_signed,
T>::type
-CheckedUnsignedAbs(T value) {
+SafeUnsignedAbs(T value) {
// T is unsigned, so |value| must already be positive.
return static_cast<T>(value);
}
-// These are the floating point stubs that the compiler needs to see. Only the
-// negation operation is ever called.
-#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
- template <typename T> \
- typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type \
- Checked##NAME(T, T, RangeConstraint*) { \
- NOTREACHED(); \
- return static_cast<T>(0); \
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
+ T value,
+ bool*) {
+ NOTREACHED();
+ return static_cast<T>(-value);
+}
+
+template <typename T>
+typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
+ T value,
+ bool*) {
+ NOTREACHED();
+ return static_cast<T>(std::abs(value));
+}
+
+// These are the floating point stubs that the compiler needs to see.
+#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
+ template <typename T, typename U, typename V> \
+ typename std::enable_if<std::numeric_limits<T>::is_iec559 || \
+ std::numeric_limits<U>::is_iec559 || \
+ std::numeric_limits<V>::is_iec559, \
+ bool>::type Checked##NAME(T, U, V*) { \
+ NOTREACHED(); \
+ return static_cast<T>(false); \
}
BASE_FLOAT_ARITHMETIC_STUBS(Add)
@@ -354,17 +640,17 @@ BASE_FLOAT_ARITHMETIC_STUBS(Mod)
#undef BASE_FLOAT_ARITHMETIC_STUBS
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
- T value,
- RangeConstraint*) {
- return static_cast<T>(-value);
+typename std::enable_if<std::numeric_limits<T>::is_iec559, bool>::type
+CheckedNeg(T value, T* result) {
+ *result = static_cast<T>(-value);
+ return true;
}
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
- T value,
- RangeConstraint*) {
- return static_cast<T>(std::abs(value));
+typename std::enable_if<std::numeric_limits<T>::is_iec559, bool>::type
+CheckedAbs(T value, T* result) {
+ *result = static_cast<T>(std::abs(value));
+ return true;
}
// Floats carry around their validity state with them, but integers do not. So,
@@ -394,19 +680,19 @@ template <typename T>
class CheckedNumericState<T, NUMERIC_INTEGER> {
private:
T value_;
- RangeConstraint validity_ : CHAR_BIT; // Actually requires only two bits.
+ bool is_valid_;
public:
template <typename Src, NumericRepresentation type>
friend class CheckedNumericState;
- CheckedNumericState() : value_(0), validity_(RANGE_VALID) {}
+ CheckedNumericState() : value_(0), is_valid_(true) {}
template <typename Src>
- CheckedNumericState(Src value, RangeConstraint validity)
+ CheckedNumericState(Src value, bool is_valid)
: value_(static_cast<T>(value)),
- validity_(GetRangeConstraint(validity |
- DstRangeRelationToSrcRange<T>(value))) {
+ is_valid_(is_valid &&
+ (DstRangeRelationToSrcRange<T>(value) == RANGE_VALID)) {
static_assert(std::numeric_limits<Src>::is_specialized,
"Argument must be numeric.");
}
@@ -414,9 +700,7 @@ class CheckedNumericState<T, NUMERIC_INTEGER> {
// Copy constructor.
template <typename Src>
CheckedNumericState(const CheckedNumericState<Src>& rhs)
- : value_(static_cast<T>(rhs.value())),
- validity_(GetRangeConstraint(
- rhs.validity() | DstRangeRelationToSrcRange<T>(rhs.value()))) {}
+ : value_(static_cast<T>(rhs.value())), is_valid_(rhs.IsValid()) {}
template <typename Src>
explicit CheckedNumericState(
@@ -424,9 +708,9 @@ class CheckedNumericState<T, NUMERIC_INTEGER> {
typename std::enable_if<std::numeric_limits<Src>::is_specialized,
int>::type = 0)
: value_(static_cast<T>(value)),
- validity_(DstRangeRelationToSrcRange<T>(value)) {}
+ is_valid_(DstRangeRelationToSrcRange<T>(value) == RANGE_VALID) {}
- RangeConstraint validity() const { return validity_; }
+ bool is_valid() const { return is_valid_; }
T value() const { return value_; }
};
@@ -445,29 +729,12 @@ class CheckedNumericState<T, NUMERIC_FLOATING> {
template <typename Src>
CheckedNumericState(
Src value,
- RangeConstraint validity,
+ bool is_valid,
typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
0) {
- switch (DstRangeRelationToSrcRange<T>(value)) {
- case RANGE_VALID:
- value_ = static_cast<T>(value);
- break;
-
- case RANGE_UNDERFLOW:
- value_ = -std::numeric_limits<T>::infinity();
- break;
-
- case RANGE_OVERFLOW:
- value_ = std::numeric_limits<T>::infinity();
- break;
-
- case RANGE_INVALID:
- value_ = std::numeric_limits<T>::quiet_NaN();
- break;
-
- default:
- NOTREACHED();
- }
+ value_ = (is_valid && (DstRangeRelationToSrcRange<T>(value) == RANGE_VALID))
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
}
template <typename Src>
@@ -482,50 +749,10 @@ class CheckedNumericState<T, NUMERIC_FLOATING> {
CheckedNumericState(const CheckedNumericState<Src>& rhs)
: value_(static_cast<T>(rhs.value())) {}
- RangeConstraint validity() const {
- return GetRangeConstraint(value_ <= std::numeric_limits<T>::max(),
- value_ >= -std::numeric_limits<T>::max());
- }
+ bool is_valid() const { return std::isfinite(value_); }
T value() const { return value_; }
};
-// For integers less than 128-bit and floats 32-bit or larger, we have the type
-// with the larger maximum exponent take precedence.
-enum ArithmeticPromotionCategory { LEFT_PROMOTION, RIGHT_PROMOTION };
-
-template <typename Lhs,
- typename Rhs = Lhs,
- ArithmeticPromotionCategory Promotion =
- (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
- ? LEFT_PROMOTION
- : RIGHT_PROMOTION>
-struct ArithmeticPromotion;
-
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, LEFT_PROMOTION> {
- typedef Lhs type;
-};
-
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
- typedef Rhs type;
-};
-
-// We can statically check if operations on the provided types can wrap, so we
-// can skip the checked operations if they're not needed. So, for an integer we
-// care if the destination type preserves the sign and is twice the width of
-// the source.
-template <typename T, typename Lhs, typename Rhs>
-struct IsIntegerArithmeticSafe {
- static const bool value = !std::numeric_limits<T>::is_iec559 &&
- StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Lhs)) &&
- StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Rhs));
-};
-
} // namespace internal
} // namespace base
diff --git a/chromium/base/numerics/safe_numerics_unittest.cc b/chromium/base/numerics/safe_numerics_unittest.cc
index b804d36f7af..ac7a5ba5f80 100644
--- a/chromium/base/numerics/safe_numerics_unittest.cc
+++ b/chromium/base/numerics/safe_numerics_unittest.cc
@@ -35,11 +35,11 @@ using base::internal::RANGE_OVERFLOW;
using base::internal::RANGE_UNDERFLOW;
using base::internal::SignedIntegerForSize;
-// These tests deliberately cause arithmetic overflows. If the compiler is
-// aggressive enough, it can const fold these overflows. Disable warnings about
-// overflows for const expressions.
+// These tests deliberately cause arithmetic boundary errors. If the compiler is
+// aggressive enough, it can const detect these errors, so we disable warnings.
#if defined(OS_WIN)
-#pragma warning(disable:4756)
+#pragma warning(disable : 4756) // Arithmetic overflow.
+#pragma warning(disable : 4293) // Invalid shift.
#endif
// This is a helper function for finding the maximum value in Src that can be
@@ -66,16 +66,16 @@ Dst GetMaxConvertibleToFloat() {
#define TEST_EXPECTED_VALIDITY(expected, actual) \
EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid()) \
<< "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
- << " on line " << line;
+ << " on line " << line
#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
#define TEST_EXPECTED_VALUE(expected, actual) \
EXPECT_EQ(static_cast<Dst>(expected), \
- CheckedNumeric<Dst>(actual).ValueUnsafe()) \
+ CheckedNumeric<Dst>(actual).ValueOrDie()) \
<< "Result test: Value " << +((actual).ValueUnsafe()) << " as " << dst \
- << " on line " << line;
+ << " on line " << line
// Signed integer arithmetic.
template <typename Dst>
@@ -106,6 +106,7 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * -1);
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
@@ -120,6 +121,24 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
CheckedNumeric<Dst> checked_dst = 1;
TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+ // Test that div by 0 is avoided but returns invalid result.
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+ // Test bit shifts.
+ volatile Dst negative_one = -1;
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+ TEST_EXPECTED_VALUE(static_cast<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 2),
+ CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 2));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0)
+ << (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_VALUE(0,
+ CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
}
// Unsigned integer arithmetic.
@@ -155,6 +174,23 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
CheckedNumeric<Dst> checked_dst = 1;
TEST_EXPECTED_VALUE(0, checked_dst %= 1);
+ // Test that div by 0 is avoided but returns invalid result.
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT));
+ // Test bit shifts.
+ volatile int negative_one = -1;
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
+ TEST_EXPECTED_VALUE(static_cast<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1),
+ CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
+ TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_VALUE(0,
+ CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
}
// Floating point arithmetic.
@@ -240,28 +276,37 @@ static void TestArithmetic(const char* dst, int line) {
// Generic addition.
TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>() + 1));
TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
+ if (numeric_limits<Dst>::is_signed)
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
DstLimits::max());
// Generic subtraction.
- TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(1) - 1));
- TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) - 1);
+ if (numeric_limits<Dst>::is_signed) {
+ TEST_EXPECTED_VALUE(-1, (CheckedNumeric<Dst>() - 1));
+ TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) - 1));
+ } else {
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) - -1);
+ }
// Generic multiplication.
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>() * 1));
TEST_EXPECTED_VALUE(1, (CheckedNumeric<Dst>(1) * 1));
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * 0));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
- TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
- DstLimits::max());
- if (DstLimits::is_signed) {
+ if (numeric_limits<Dst>::is_signed) {
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) * 0));
+ TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(0) * -1));
TEST_EXPECTED_VALUE(-2, (CheckedNumeric<Dst>(-1) * 2));
+ } else {
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) * -2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+ CheckedNumeric<uintmax_t>(-2));
}
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) *
+ DstLimits::max());
// Generic division.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
@@ -313,7 +358,7 @@ struct TestNumericConversion {};
#define TEST_EXPECTED_RANGE(expected, actual) \
EXPECT_EQ(expected, base::internal::DstRangeRelationToSrcRange<Dst>(actual)) \
<< "Conversion test: " << src << " value " << actual << " to " << dst \
- << " on line " << line;
+ << " on line " << line
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
@@ -660,7 +705,7 @@ TEST(SafeNumerics, SaturatedCastChecks) {
std::numeric_limits<float>::infinity();
EXPECT_TRUE(std::isnan(not_a_number));
EXPECT_DEATH_IF_SUPPORTED(
- (saturated_cast<int, base::SaturatedCastNaNBehaviorCheck>(not_a_number)),
+ (saturated_cast<int, base::CheckOnFailure>(not_a_number)),
"");
}
diff --git a/chromium/base/observer_list.h b/chromium/base/observer_list.h
index afe1f46cd6e..0572ba6500d 100644
--- a/chromium/base/observer_list.h
+++ b/chromium/base/observer_list.h
@@ -11,6 +11,7 @@
#include <limits>
#include <vector>
+#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
@@ -46,11 +47,14 @@
// }
//
// void NotifyFoo() {
-// FOR_EACH_OBSERVER(Observer, observer_list_, OnFoo(this));
+// for (auto& observer : observer_list_)
+// observer.OnFoo(this);
// }
//
// void NotifyBar(int x, int y) {
-// FOR_EACH_OBSERVER(Observer, observer_list_, OnBar(this, x, y));
+// for (FooList::iterator i = observer_list.begin(),
+// e = observer_list.end(); i != e; ++i)
+// i->OnBar(this, x, y);
// }
//
// private:
@@ -80,20 +84,66 @@ class ObserverListBase
NOTIFY_EXISTING_ONLY
};
- // An iterator class that can be used to access the list of observers. See
- // also the FOR_EACH_OBSERVER macro defined below.
- class Iterator {
+ // An iterator class that can be used to access the list of observers.
+ template <class ContainerType>
+ class Iter {
public:
- explicit Iterator(ObserverListBase<ObserverType>* list);
- ~Iterator();
- ObserverType* GetNext();
+ Iter();
+ explicit Iter(ContainerType* list);
+ ~Iter();
+
+ // A workaround for C2244. MSVC requires fully qualified type name for
+ // return type on a function definition to match a function declaration.
+ using ThisType =
+ typename ObserverListBase<ObserverType>::template Iter<ContainerType>;
+
+ bool operator==(const Iter& other) const;
+ bool operator!=(const Iter& other) const;
+ ThisType& operator++();
+ ObserverType* operator->() const;
+ ObserverType& operator*() const;
private:
+ FRIEND_TEST_ALL_PREFIXES(ObserverListTest, BasicStdIterator);
+ FRIEND_TEST_ALL_PREFIXES(ObserverListTest, StdIteratorRemoveFront);
+
+ ObserverType* GetCurrent() const;
+ void EnsureValidIndex();
+
+ size_t clamped_max_index() const {
+ return std::min(max_index_, list_->observers_.size());
+ }
+
+ bool is_end() const { return !list_ || index_ == clamped_max_index(); }
+
WeakPtr<ObserverListBase<ObserverType>> list_;
+ // When initially constructed and each time the iterator is incremented,
+ // |index_| is guaranteed to point to a non-null index if the iterator
+ // has not reached the end of the ObserverList.
size_t index_;
size_t max_index_;
};
+ using Iterator = Iter<ObserverListBase<ObserverType>>;
+
+ using iterator = Iter<ObserverListBase<ObserverType>>;
+ iterator begin() {
+ // An optimization: do not involve weak pointers for empty list.
+ // Note: can't use ?: operator here due to some MSVC bug (unit tests fail)
+ if (observers_.empty())
+ return iterator();
+ return iterator(this);
+ }
+ iterator end() { return iterator(); }
+
+ using const_iterator = Iter<const ObserverListBase<ObserverType>>;
+ const_iterator begin() const {
+ if (observers_.empty())
+ return const_iterator();
+ return const_iterator(this);
+ }
+ const_iterator end() const { return const_iterator(); }
+
ObserverListBase() : notify_depth_(0), type_(NOTIFY_ALL) {}
explicit ObserverListBase(NotificationType type)
: notify_depth_(0), type_(type) {}
@@ -124,37 +174,99 @@ class ObserverListBase
int notify_depth_;
NotificationType type_;
- friend class ObserverListBase::Iterator;
+ template <class ContainerType>
+ friend class Iter;
DISALLOW_COPY_AND_ASSIGN(ObserverListBase);
};
template <class ObserverType>
-ObserverListBase<ObserverType>::Iterator::Iterator(
- ObserverListBase<ObserverType>* list)
- : list_(list->AsWeakPtr()),
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::Iter()
+ : index_(0), max_index_(0) {}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::Iter(ContainerType* list)
+ : list_(const_cast<ObserverListBase<ObserverType>*>(list)->AsWeakPtr()),
index_(0),
max_index_(list->type_ == NOTIFY_ALL ? std::numeric_limits<size_t>::max()
: list->observers_.size()) {
+ EnsureValidIndex();
+ DCHECK(list_);
++list_->notify_depth_;
}
template <class ObserverType>
-ObserverListBase<ObserverType>::Iterator::~Iterator() {
- if (list_.get() && --list_->notify_depth_ == 0)
+template <class ContainerType>
+ObserverListBase<ObserverType>::Iter<ContainerType>::~Iter() {
+ if (list_ && --list_->notify_depth_ == 0)
list_->Compact();
}
template <class ObserverType>
-ObserverType* ObserverListBase<ObserverType>::Iterator::GetNext() {
- if (!list_.get())
+template <class ContainerType>
+bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator==(
+ const Iter& other) const {
+ if (is_end() && other.is_end())
+ return true;
+ return list_.get() == other.list_.get() && index_ == other.index_;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+bool ObserverListBase<ObserverType>::Iter<ContainerType>::operator!=(
+ const Iter& other) const {
+ return !operator==(other);
+}
+
+template <class ObserverType>
+template <class ContainerType>
+typename ObserverListBase<ObserverType>::template Iter<ContainerType>&
+ ObserverListBase<ObserverType>::Iter<ContainerType>::operator++() {
+ if (list_) {
+ ++index_;
+ EnsureValidIndex();
+ }
+ return *this;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::operator->()
+ const {
+ ObserverType* current = GetCurrent();
+ DCHECK(current);
+ return current;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType& ObserverListBase<ObserverType>::Iter<ContainerType>::operator*()
+ const {
+ ObserverType* current = GetCurrent();
+ DCHECK(current);
+ return *current;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+ObserverType* ObserverListBase<ObserverType>::Iter<ContainerType>::GetCurrent()
+ const {
+ if (!list_)
return nullptr;
- ListType& observers = list_->observers_;
- // Advance if the current element is null
- size_t max_index = std::min(max_index_, observers.size());
- while (index_ < max_index && !observers[index_])
+ return index_ < clamped_max_index() ? list_->observers_[index_] : nullptr;
+}
+
+template <class ObserverType>
+template <class ContainerType>
+void ObserverListBase<ObserverType>::Iter<ContainerType>::EnsureValidIndex() {
+ if (!list_)
+ return;
+
+ size_t max_index = clamped_max_index();
+ while (index_ < max_index && !list_->observers_[index_])
++index_;
- return index_ < max_index ? observers[index_++] : nullptr;
}
template <class ObserverType>
@@ -205,9 +317,8 @@ void ObserverListBase<ObserverType>::Clear() {
template <class ObserverType>
void ObserverListBase<ObserverType>::Compact() {
- observers_.erase(
- std::remove(observers_.begin(), observers_.end(), nullptr),
- observers_.end());
+ observers_.erase(std::remove(observers_.begin(), observers_.end(), nullptr),
+ observers_.end());
}
template <class ObserverType, bool check_empty = false>
@@ -233,17 +344,6 @@ class ObserverList : public ObserverListBase<ObserverType> {
}
};
-#define FOR_EACH_OBSERVER(ObserverType, observer_list, func) \
- do { \
- if ((observer_list).might_have_observers()) { \
- typename base::ObserverListBase<ObserverType>::Iterator \
- it_inside_observer_macro(&observer_list); \
- ObserverType* obs; \
- while ((obs = it_inside_observer_macro.GetNext()) != nullptr) \
- obs->func; \
- } \
- } while (0)
-
} // namespace base
#endif // BASE_OBSERVER_LIST_H_
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index 49dc859f8f9..dc683d40812 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -7,16 +7,17 @@
#include <algorithm>
#include <map>
+#include <memory>
#include <tuple>
#include "base/bind.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/observer_list.h"
#include "base/single_thread_task_runner.h"
-#include "base/stl_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
@@ -96,8 +97,8 @@ class ObserverListThreadSafe
ObserverListThreadSafe<ObserverType>,
ObserverListThreadSafeTraits<ObserverType>> {
public:
- typedef typename ObserverList<ObserverType>::NotificationType
- NotificationType;
+ using NotificationType =
+ typename ObserverList<ObserverType>::NotificationType;
ObserverListThreadSafe()
: type_(ObserverListBase<ObserverType>::NOTIFY_ALL) {}
@@ -115,8 +116,10 @@ class ObserverListThreadSafe
PlatformThreadId thread_id = PlatformThread::CurrentId();
{
AutoLock lock(list_lock_);
- if (observer_lists_.find(thread_id) == observer_lists_.end())
- observer_lists_[thread_id] = new ObserverListContext(type_);
+ if (observer_lists_.find(thread_id) == observer_lists_.end()) {
+ observer_lists_[thread_id] =
+ base::MakeUnique<ObserverListContext>(type_);
+ }
list = &(observer_lists_[thread_id]->list);
}
list->AddObserver(obs);
@@ -128,32 +131,24 @@ class ObserverListThreadSafe
// If the observer to be removed is in the list, RemoveObserver MUST
// be called from the same thread which called AddObserver.
void RemoveObserver(ObserverType* obs) {
- ObserverListContext* context = nullptr;
- ObserverList<ObserverType>* list = nullptr;
PlatformThreadId thread_id = PlatformThread::CurrentId();
{
AutoLock lock(list_lock_);
- typename ObserversListMap::iterator it = observer_lists_.find(thread_id);
+ auto it = observer_lists_.find(thread_id);
if (it == observer_lists_.end()) {
// This will happen if we try to remove an observer on a thread
// we never added an observer for.
return;
}
- context = it->second;
- list = &context->list;
+ ObserverList<ObserverType>& list = it->second->list;
- // If we're about to remove the last observer from the list,
- // then we can remove this observer_list entirely.
- if (list->HasObserver(obs) && list->size() == 1)
+ list.RemoveObserver(obs);
+
+ // If that was the last observer in the list, remove the ObserverList
+ // entirely.
+ if (list.size() == 0)
observer_lists_.erase(it);
}
- list->RemoveObserver(obs);
-
- // If RemoveObserver is called from a notification, the size will be
- // nonzero. Instead of deleting here, the NotifyWrapper will delete
- // when it finishes iterating.
- if (list->size() == 0)
- delete context;
}
// Verifies that the list is currently empty (i.e. there are no observers).
@@ -176,7 +171,7 @@ class ObserverListThreadSafe
AutoLock lock(list_lock_);
for (const auto& entry : observer_lists_) {
- ObserverListContext* context = entry.second;
+ ObserverListContext* context = entry.second.get();
context->task_runner->PostTask(
from_here,
Bind(&ObserverListThreadSafe<ObserverType>::NotifyWrapper,
@@ -200,7 +195,6 @@ class ObserverListThreadSafe
};
~ObserverListThreadSafe() {
- STLDeleteValues(&observer_lists_);
}
// Wrapper which is called to fire the notifications for each thread's
@@ -211,22 +205,18 @@ class ObserverListThreadSafe
// Check that this list still needs notifications.
{
AutoLock lock(list_lock_);
- typename ObserversListMap::iterator it =
- observer_lists_.find(PlatformThread::CurrentId());
+ auto it = observer_lists_.find(PlatformThread::CurrentId());
// The ObserverList could have been removed already. In fact, it could
// have been removed and then re-added! If the master list's loop
// does not match this one, then we do not need to finish this
// notification.
- if (it == observer_lists_.end() || it->second != context)
+ if (it == observer_lists_.end() || it->second.get() != context)
return;
}
- {
- typename ObserverList<ObserverType>::Iterator it(&context->list);
- ObserverType* obs;
- while ((obs = it.GetNext()) != nullptr)
- method.Run(obs);
+ for (auto& observer : context->list) {
+ method.Run(&observer);
}
// If there are no more observers on the list, we can now delete it.
@@ -236,24 +226,22 @@ class ObserverListThreadSafe
// Remove |list| if it's not already removed.
// This can happen if multiple observers got removed in a notification.
// See http://crbug.com/55725.
- typename ObserversListMap::iterator it =
- observer_lists_.find(PlatformThread::CurrentId());
- if (it != observer_lists_.end() && it->second == context)
+ auto it = observer_lists_.find(PlatformThread::CurrentId());
+ if (it != observer_lists_.end() && it->second.get() == context)
observer_lists_.erase(it);
}
- delete context;
}
}
+ mutable Lock list_lock_; // Protects the observer_lists_.
+
// Key by PlatformThreadId because in tests, clients can attempt to remove
// observers without a SingleThreadTaskRunner. If this were keyed by
// SingleThreadTaskRunner, that operation would be silently ignored, leaving
// garbage in the ObserverList.
- typedef std::map<PlatformThreadId, ObserverListContext*>
- ObserversListMap;
+ std::map<PlatformThreadId, std::unique_ptr<ObserverListContext>>
+ observer_lists_;
- mutable Lock list_lock_; // Protects the observer_lists_.
- ObserversListMap observer_lists_;
const NotificationType type_;
DISALLOW_COPY_AND_ASSIGN(ObserverListThreadSafe);
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index 097a2ed28b1..c5e556bd9da 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -22,13 +22,17 @@ class Foo {
public:
virtual void Observe(int x) = 0;
virtual ~Foo() {}
+ virtual int GetValue() const { return 0; }
};
class Adder : public Foo {
public:
explicit Adder(int scaler) : total(0), scaler_(scaler) {}
- void Observe(int x) override { total += x * scaler_; }
~Adder() override {}
+
+ void Observe(int x) override { total += x * scaler_; }
+ int GetValue() const override { return total; }
+
int total;
private:
@@ -37,16 +41,28 @@ class Adder : public Foo {
class Disrupter : public Foo {
public:
+ Disrupter(ObserverList<Foo>* list, Foo* doomed, bool remove_self)
+ : list_(list), doomed_(doomed), remove_self_(remove_self) {}
Disrupter(ObserverList<Foo>* list, Foo* doomed)
- : list_(list),
- doomed_(doomed) {
- }
+ : Disrupter(list, doomed, false) {}
+ Disrupter(ObserverList<Foo>* list, bool remove_self)
+ : Disrupter(list, nullptr, remove_self) {}
+
~Disrupter() override {}
- void Observe(int x) override { list_->RemoveObserver(doomed_); }
+
+ void Observe(int x) override {
+ if (remove_self_)
+ list_->RemoveObserver(this);
+ if (doomed_)
+ list_->RemoveObserver(doomed_);
+ }
+
+ void SetDoomed(Foo* doomed) { doomed_ = doomed; }
private:
ObserverList<Foo>* list_;
Foo* doomed_;
+ bool remove_self_;
};
class ThreadSafeDisrupter : public Foo {
@@ -67,21 +83,19 @@ template <typename ObserverListType>
class AddInObserve : public Foo {
public:
explicit AddInObserve(ObserverListType* observer_list)
- : added(false),
- observer_list(observer_list),
- adder(1) {
- }
+ : observer_list(observer_list), to_add_() {}
+
+ void SetToAdd(Foo* to_add) { to_add_ = to_add; }
void Observe(int x) override {
- if (!added) {
- added = true;
- observer_list->AddObserver(&adder);
+ if (to_add_) {
+ observer_list->AddObserver(to_add_);
+ to_add_ = nullptr;
}
}
- bool added;
ObserverListType* observer_list;
- Adder adder;
+ Foo* to_add_;
};
@@ -112,8 +126,6 @@ class AddRemoveThread : public PlatformThread::Delegate,
FROM_HERE,
base::Bind(&AddRemoveThread::AddTask, weak_factory_.GetWeakPtr()));
RunLoop().Run();
- //LOG(ERROR) << "Loop 0x" << std::hex << loop_ << " done. " <<
- // count_observes_ << ", " << count_addtask_;
delete loop_;
loop_ = reinterpret_cast<MessageLoop*>(0xdeadbeef);
delete this;
@@ -176,6 +188,8 @@ class AddRemoveThread : public PlatformThread::Delegate,
base::WeakPtrFactory<AddRemoveThread> weak_factory_;
};
+} // namespace
+
TEST(ObserverListTest, BasicTest) {
ObserverList<Foo> observer_list;
Adder a(1), b(-1), c(1), d(-1), e(-1);
@@ -187,7 +201,8 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_TRUE(observer_list.HasObserver(&a));
EXPECT_FALSE(observer_list.HasObserver(&c));
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+ for (auto& observer : observer_list)
+ observer.Observe(10);
observer_list.AddObserver(&evil);
observer_list.AddObserver(&c);
@@ -196,7 +211,8 @@ TEST(ObserverListTest, BasicTest) {
// Removing an observer not in the list should do nothing.
observer_list.RemoveObserver(&e);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(10));
+ for (auto& observer : observer_list)
+ observer.Observe(10);
EXPECT_EQ(20, a.total);
EXPECT_EQ(-20, b.total);
@@ -205,6 +221,52 @@ TEST(ObserverListTest, BasicTest) {
EXPECT_EQ(0, e.total);
}
+TEST(ObserverListTest, DisruptSelf) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter evil(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ observer_list.AddObserver(&evil);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-20, b.total);
+ EXPECT_EQ(10, c.total);
+ EXPECT_EQ(-10, d.total);
+}
+
+TEST(ObserverListTest, DisruptBefore) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter evil(&observer_list, &b);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&evil);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+ for (auto& observer : observer_list)
+ observer.Observe(10);
+
+ EXPECT_EQ(20, a.total);
+ EXPECT_EQ(-10, b.total);
+ EXPECT_EQ(20, c.total);
+ EXPECT_EQ(-20, d.total);
+}
+
TEST(ObserverListThreadSafeTest, BasicTest) {
MessageLoop loop;
@@ -433,20 +495,24 @@ TEST(ObserverListTest, Existing) {
ObserverList<Foo> observer_list(ObserverList<Foo>::NOTIFY_EXISTING_ONLY);
Adder a(1);
AddInObserve<ObserverList<Foo> > b(&observer_list);
+ Adder c(1);
+ b.SetToAdd(&c);
observer_list.AddObserver(&a);
observer_list.AddObserver(&b);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
- EXPECT_TRUE(b.added);
+ EXPECT_FALSE(b.to_add_);
// B's adder should not have been notified because it was added during
// notification.
- EXPECT_EQ(0, b.adder.total);
+ EXPECT_EQ(0, c.total);
// Notify again to make sure b's adder is notified.
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
- EXPECT_EQ(1, b.adder.total);
+ for (auto& observer : observer_list)
+ observer.Observe(1);
+ EXPECT_EQ(1, c.total);
}
// Same as above, but for ObserverListThreadSafe
@@ -456,6 +522,8 @@ TEST(ObserverListThreadSafeTest, Existing) {
new ObserverListThreadSafe<Foo>(ObserverList<Foo>::NOTIFY_EXISTING_ONLY));
Adder a(1);
AddInObserve<ObserverListThreadSafe<Foo> > b(observer_list.get());
+ Adder c(1);
+ b.SetToAdd(&c);
observer_list->AddObserver(&a);
observer_list->AddObserver(&b);
@@ -463,15 +531,15 @@ TEST(ObserverListThreadSafeTest, Existing) {
observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
RunLoop().RunUntilIdle();
- EXPECT_TRUE(b.added);
+ EXPECT_FALSE(b.to_add_);
// B's adder should not have been notified because it was added during
// notification.
- EXPECT_EQ(0, b.adder.total);
+ EXPECT_EQ(0, c.total);
// Notify again to make sure b's adder is notified.
observer_list->Notify(FROM_HERE, &Foo::Observe, 1);
RunLoop().RunUntilIdle();
- EXPECT_EQ(1, b.adder.total);
+ EXPECT_EQ(1, c.total);
}
class AddInClearObserve : public Foo {
@@ -501,7 +569,8 @@ TEST(ObserverListTest, ClearNotifyAll) {
observer_list.AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
EXPECT_TRUE(a.added());
EXPECT_EQ(1, a.adder().total)
<< "Adder should observe once and have sum of 1.";
@@ -513,7 +582,8 @@ TEST(ObserverListTest, ClearNotifyExistingOnly) {
observer_list.AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, observer_list, Observe(1));
+ for (auto& observer : observer_list)
+ observer.Observe(1);
EXPECT_TRUE(a.added());
EXPECT_EQ(0, a.adder().total)
<< "Adder should not observe, so sum should still be 0.";
@@ -536,10 +606,330 @@ TEST(ObserverListTest, IteratorOutlivesList) {
ListDestructor a(observer_list);
observer_list->AddObserver(&a);
- FOR_EACH_OBSERVER(Foo, *observer_list, Observe(0));
+ for (auto& observer : *observer_list)
+ observer.Observe(0);
// If this test fails, there'll be Valgrind errors when this function goes out
// of scope.
}
-} // namespace
+TEST(ObserverListTest, BasicStdIterator) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+
+ // An optimization: begin() and end() do not involve weak pointers on
+ // empty list.
+ EXPECT_FALSE(observer_list.begin().list_);
+ EXPECT_FALSE(observer_list.end().list_);
+
+ // Iterate over empty list: no effect, no crash.
+ for (auto& i : observer_list)
+ i.Observe(10);
+
+ Adder a(1), b(-1), c(1), d(-1);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+ i != e; ++i)
+ i->Observe(1);
+
+ EXPECT_EQ(1, a.total);
+ EXPECT_EQ(-1, b.total);
+ EXPECT_EQ(1, c.total);
+ EXPECT_EQ(-1, d.total);
+
+ // Check an iteration over a 'const view' for a given container.
+ const FooList& const_list = observer_list;
+ for (FooList::const_iterator i = const_list.begin(), e = const_list.end();
+ i != e; ++i) {
+ EXPECT_EQ(1, std::abs(i->GetValue()));
+ }
+
+ for (const auto& o : const_list)
+ EXPECT_EQ(1, std::abs(o.GetValue()));
+}
+
+TEST(ObserverListTest, StdIteratorRemoveItself) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBefore) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &b);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-1, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfter) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &c);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(0, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveAfterFront) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &a);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(1, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBeforeBack) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, &d);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(0, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveFront) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ bool test_disruptor = true;
+ for (FooList::iterator i = observer_list.begin(), e = observer_list.end();
+ i != e; ++i) {
+ i->Observe(1);
+ // Check that second call to i->Observe() would crash here.
+ if (test_disruptor) {
+ EXPECT_FALSE(i.GetCurrent());
+ test_disruptor = false;
+ }
+ }
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, StdIteratorRemoveBack) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+ observer_list.AddObserver(&disrupter);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ for (auto& o : observer_list)
+ o.Observe(10);
+
+ EXPECT_EQ(11, a.total);
+ EXPECT_EQ(-11, b.total);
+ EXPECT_EQ(11, c.total);
+ EXPECT_EQ(-11, d.total);
+}
+
+TEST(ObserverListTest, NestedLoop) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1), c(1), d(-1);
+ Disrupter disrupter(&observer_list, true);
+
+ observer_list.AddObserver(&disrupter);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ observer_list.AddObserver(&c);
+ observer_list.AddObserver(&d);
+
+ for (auto& o : observer_list) {
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+ }
+
+ EXPECT_EQ(15, a.total);
+ EXPECT_EQ(-15, b.total);
+ EXPECT_EQ(15, c.total);
+ EXPECT_EQ(-15, d.total);
+}
+
+TEST(ObserverListTest, NonCompactList) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1);
+
+ Disrupter disrupter1(&observer_list, true);
+ Disrupter disrupter2(&observer_list, true);
+
+ // Disrupt itself and another one.
+ disrupter1.SetDoomed(&disrupter2);
+
+ observer_list.AddObserver(&disrupter1);
+ observer_list.AddObserver(&disrupter2);
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+
+ for (auto& o : observer_list) {
+ // Get the { nullptr, nullptr, &a, &b } non-compact list
+ // on the first inner pass.
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+ }
+
+ EXPECT_EQ(13, a.total);
+ EXPECT_EQ(-13, b.total);
+}
+
+TEST(ObserverListTest, BecomesEmptyThanNonEmpty) {
+ ObserverList<Foo> observer_list;
+ Adder a(1), b(-1);
+
+ Disrupter disrupter1(&observer_list, true);
+ Disrupter disrupter2(&observer_list, true);
+
+ // Disrupt itself and another one.
+ disrupter1.SetDoomed(&disrupter2);
+
+ observer_list.AddObserver(&disrupter1);
+ observer_list.AddObserver(&disrupter2);
+
+ bool add_observers = true;
+ for (auto& o : observer_list) {
+ // Get the { nullptr, nullptr } empty list on the first inner pass.
+ o.Observe(10);
+
+ for (auto& o : observer_list)
+ o.Observe(1);
+
+ if (add_observers) {
+ observer_list.AddObserver(&a);
+ observer_list.AddObserver(&b);
+ add_observers = false;
+ }
+ }
+
+ EXPECT_EQ(12, a.total);
+ EXPECT_EQ(-12, b.total);
+}
+
+TEST(ObserverListTest, AddObserverInTheLastObserve) {
+ using FooList = ObserverList<Foo>;
+ FooList observer_list;
+
+ AddInObserve<FooList> a(&observer_list);
+ Adder b(-1);
+
+ a.SetToAdd(&b);
+ observer_list.AddObserver(&a);
+
+ auto it = observer_list.begin();
+ while (it != observer_list.end()) {
+ auto& observer = *it;
+ // Intentionally increment the iterator before calling Observe(). The
+ // ObserverList starts with only one observer, and it == observer_list.end()
+ // should be true after the next line.
+ ++it;
+ // However, the first Observe() call will add a second observer: at this
+ // point, it != observer_list.end() should be true, and Observe() should be
+ // called on the newly added observer on the next iteration of the loop.
+ observer.Observe(10);
+ }
+
+ EXPECT_EQ(-10, b.total);
+}
+
} // namespace base
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index 3adda97c021..cf65ad7dac1 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -34,7 +34,22 @@ namespace internal {
template <typename T, bool = base::is_trivially_destructible<T>::value>
struct OptionalStorage {
- OptionalStorage() {}
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
// When T is not trivially destructible we must call its
// destructor before deallocating its memory.
~OptionalStorage() {
@@ -45,16 +60,31 @@ struct OptionalStorage {
bool is_null_ = true;
union {
// |empty_| exists so that the union will always be initialized, even when
- // it doesn't contain a value. Not initializing it has been observed to
- // trigger comiler warnings.
- char empty_ = '\0';
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
T value_;
};
};
template <typename T>
struct OptionalStorage<T, true> {
- OptionalStorage() {}
+ // Initializing |empty_| here instead of using default member initializing
+ // to avoid errors in g++ 4.8.
+ constexpr OptionalStorage() : empty_('\0') {}
+
+ constexpr explicit OptionalStorage(const T& value)
+ : is_null_(false), value_(value) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ explicit OptionalStorage(T&& value)
+ : is_null_(false), value_(std::move(value)) {}
+
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
+ template <class... Args>
+ explicit OptionalStorage(base::in_place_t, Args&&... args)
+ : is_null_(false), value_(std::forward<Args>(args)...) {}
+
// When T is trivially destructible (i.e. its destructor does nothing) there
// is no need to call it. Explicitly defaulting the destructor means it's not
// user-provided. Those two together make this destructor trivial.
@@ -63,9 +93,9 @@ struct OptionalStorage<T, true> {
bool is_null_ = true;
union {
// |empty_| exists so that the union will always be initialized, even when
- // it doesn't contain a value. Not initializing it has been observed to
- // trigger comiler warnings.
- char empty_ = '\0';
+ // it doesn't contain a value. Union members must be initialized for the
+ // constructor to be 'constexpr'.
+ char empty_;
T value_;
};
};
@@ -90,8 +120,9 @@ class Optional {
public:
using value_type = T;
- constexpr Optional() = default;
- Optional(base::nullopt_t) : Optional() {}
+ constexpr Optional() {}
+
+ constexpr Optional(base::nullopt_t) {}
Optional(const Optional& other) {
if (!other.storage_.is_null_)
@@ -103,14 +134,15 @@ class Optional {
Init(std::move(other.value()));
}
- Optional(const T& value) { Init(value); }
+ constexpr Optional(const T& value) : storage_(value) {}
- Optional(T&& value) { Init(std::move(value)); }
+ // TODO(alshabalin): Can't use 'constexpr' with std::move until C++14.
+ Optional(T&& value) : storage_(std::move(value)) {}
+ // TODO(alshabalin): Can't use 'constexpr' with std::forward until C++14.
template <class... Args>
- explicit Optional(base::in_place_t, Args&&... args) {
- emplace(std::forward<Args>(args)...);
- }
+ explicit Optional(base::in_place_t, Args&&... args)
+ : storage_(base::in_place, std::forward<Args>(args)...) {}
~Optional() = default;
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index 565b6cd6c74..83025e8bdad 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -98,7 +98,7 @@ static_assert(
TEST(OptionalTest, DefaultConstructor) {
{
- Optional<float> o;
+ constexpr Optional<float> o;
EXPECT_FALSE(o);
}
@@ -144,21 +144,28 @@ TEST(OptionalTest, CopyConstructor) {
TEST(OptionalTest, ValueConstructor) {
{
- Optional<float> o(0.1f);
+ constexpr float value = 0.1f;
+ constexpr Optional<float> o(value);
+
EXPECT_TRUE(o);
- EXPECT_EQ(o.value(), 0.1f);
+ EXPECT_EQ(value, o.value());
}
{
- Optional<std::string> o("foo");
+ std::string value("foo");
+ Optional<std::string> o(value);
+
EXPECT_TRUE(o);
- EXPECT_EQ(o.value(), "foo");
+ EXPECT_EQ(value, o.value());
}
{
- Optional<TestObject> o(TestObject(3, 0.1));
- EXPECT_TRUE(!!o);
- EXPECT_TRUE(o.value() == TestObject(3, 0.1));
+ TestObject value(3, 0.1);
+ Optional<TestObject> o(value);
+
+ EXPECT_TRUE(o);
+ EXPECT_EQ(TestObject::State::COPY_CONSTRUCTED, o->state());
+ EXPECT_EQ(value, o.value());
}
}
@@ -198,35 +205,28 @@ TEST(OptionalTest, MoveConstructor) {
TEST(OptionalTest, MoveValueConstructor) {
{
- Optional<float> first(0.1f);
- Optional<float> second(std::move(first.value()));
+ float value = 0.1f;
+ Optional<float> o(std::move(value));
- EXPECT_TRUE(second);
- EXPECT_EQ(second.value(), 0.1f);
-
- EXPECT_TRUE(first);
+ EXPECT_TRUE(o);
+ EXPECT_EQ(0.1f, o.value());
}
{
- Optional<std::string> first("foo");
- Optional<std::string> second(std::move(first.value()));
+ std::string value("foo");
+ Optional<std::string> o(std::move(value));
- EXPECT_TRUE(second);
- EXPECT_EQ("foo", second.value());
-
- EXPECT_TRUE(first);
+ EXPECT_TRUE(o);
+ EXPECT_EQ("foo", o.value());
}
{
- Optional<TestObject> first(TestObject(3, 0.1));
- Optional<TestObject> second(std::move(first.value()));
-
- EXPECT_TRUE(!!second);
- EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, second->state());
- EXPECT_TRUE(TestObject(3, 0.1) == second.value());
+ TestObject value(3, 0.1);
+ Optional<TestObject> o(std::move(value));
- EXPECT_TRUE(!!first);
- EXPECT_EQ(TestObject::State::MOVED_FROM, first->state());
+ EXPECT_TRUE(o);
+ EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED, o->state());
+ EXPECT_EQ(TestObject(3, 0.1), o.value());
}
}
@@ -251,7 +251,7 @@ TEST(OptionalTest, ConstructorForwardArguments) {
}
TEST(OptionalTest, NulloptConstructor) {
- Optional<int> a = base::nullopt;
+ constexpr Optional<int> a(base::nullopt);
EXPECT_FALSE(a);
}
diff --git a/chromium/base/pending_task.cc b/chromium/base/pending_task.cc
index 73834bd4607..cca9ebfbff3 100644
--- a/chromium/base/pending_task.cc
+++ b/chromium/base/pending_task.cc
@@ -9,17 +9,16 @@
namespace base {
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- base::Closure task)
+ OnceClosure task)
: base::TrackingInfo(posted_from, TimeTicks()),
task(std::move(task)),
posted_from(posted_from),
sequence_num(0),
nestable(true),
- is_high_res(false) {
-}
+ is_high_res(false) {}
PendingTask::PendingTask(const tracked_objects::Location& posted_from,
- base::Closure task,
+ OnceClosure task,
TimeTicks delayed_run_time,
bool nestable)
: base::TrackingInfo(posted_from, delayed_run_time),
@@ -27,8 +26,7 @@ PendingTask::PendingTask(const tracked_objects::Location& posted_from,
posted_from(posted_from),
sequence_num(0),
nestable(nestable),
- is_high_res(false) {
-}
+ is_high_res(false) {}
PendingTask::PendingTask(PendingTask&& other) = default;
diff --git a/chromium/base/pending_task.h b/chromium/base/pending_task.h
index 5761653397e..a55fa518ea0 100644
--- a/chromium/base/pending_task.h
+++ b/chromium/base/pending_task.h
@@ -18,10 +18,9 @@ namespace base {
// Contains data about a pending task. Stored in TaskQueue and DelayedTaskQueue
// for use by classes that queue and execute tasks.
struct BASE_EXPORT PendingTask : public TrackingInfo {
+ PendingTask(const tracked_objects::Location& posted_from, OnceClosure task);
PendingTask(const tracked_objects::Location& posted_from,
- Closure task);
- PendingTask(const tracked_objects::Location& posted_from,
- Closure task,
+ OnceClosure task,
TimeTicks delayed_run_time,
bool nestable);
PendingTask(PendingTask&& other);
@@ -33,7 +32,7 @@ struct BASE_EXPORT PendingTask : public TrackingInfo {
bool operator<(const PendingTask& other) const;
// The task to run.
- Closure task;
+ OnceClosure task;
// The site this PendingTask was posted from.
tracked_objects::Location posted_from;
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index 2b2040c7d42..be8f6e73b9f 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -275,6 +275,12 @@ BASE_EXPORT bool GetAppOutputWithExitCode(const CommandLine& cl,
BASE_EXPORT void RaiseProcessToHighPriority();
#if defined(OS_MACOSX)
+// An implementation of LaunchProcess() that uses posix_spawn() instead of
+// fork()+exec(). This does not support the |pre_exec_delegate| and
+// |current_directory| options.
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+ const LaunchOptions& options);
+
// Restore the default exception handler, setting it to Apple Crash Reporter
// (ReportCrash). When forking and execing a new process, the child will
// inherit the parent's exception ports, which may be set to the Breakpad
diff --git a/chromium/base/process/launch_mac.cc b/chromium/base/process/launch_mac.cc
index 5895eae4351..3732bc1ecc5 100644
--- a/chromium/base/process/launch_mac.cc
+++ b/chromium/base/process/launch_mac.cc
@@ -4,13 +4,75 @@
#include "base/process/launch.h"
+#include <crt_externs.h>
#include <mach/mach.h>
-#include <servers/bootstrap.h>
+#include <spawn.h>
+#include <string.h>
+#include <sys/wait.h>
#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/threading/thread_restrictions.h"
namespace base {
+namespace {
+
+// DPSXCHECK is a Debug Posix Spawn Check macro. The posix_spawn* family of
+// functions return an errno value, as opposed to setting errno directly. This
+// macro emulates a DPCHECK().
+#define DPSXCHECK(expr) \
+ do { \
+ int rv = (expr); \
+ DCHECK_EQ(rv, 0) << #expr << ": -" << rv << " " << strerror(rv); \
+ } while (0)
+
+class PosixSpawnAttr {
+ public:
+ PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_init(&attr_)); }
+
+ ~PosixSpawnAttr() { DPSXCHECK(posix_spawnattr_destroy(&attr_)); }
+
+ posix_spawnattr_t* get() { return &attr_; }
+
+ private:
+ posix_spawnattr_t attr_;
+};
+
+class PosixSpawnFileActions {
+ public:
+ PosixSpawnFileActions() {
+ DPSXCHECK(posix_spawn_file_actions_init(&file_actions_));
+ }
+
+ ~PosixSpawnFileActions() {
+ DPSXCHECK(posix_spawn_file_actions_destroy(&file_actions_));
+ }
+
+ void Open(int filedes, const char* path, int mode) {
+ DPSXCHECK(posix_spawn_file_actions_addopen(&file_actions_, filedes, path,
+ mode, 0));
+ }
+
+ void Dup2(int filedes, int newfiledes) {
+ DPSXCHECK(
+ posix_spawn_file_actions_adddup2(&file_actions_, filedes, newfiledes));
+ }
+
+ void Inherit(int filedes) {
+ DPSXCHECK(posix_spawn_file_actions_addinherit_np(&file_actions_, filedes));
+ }
+
+ const posix_spawn_file_actions_t* get() const { return &file_actions_; }
+
+ private:
+ posix_spawn_file_actions_t file_actions_;
+
+ DISALLOW_COPY_AND_ASSIGN(PosixSpawnFileActions);
+};
+
+} // namespace
+
void RestoreDefaultExceptionHandler() {
// This function is tailored to remove the Breakpad exception handler.
// exception_mask matches s_exception_mask in
@@ -28,4 +90,93 @@ void RestoreDefaultExceptionHandler() {
EXCEPTION_DEFAULT, THREAD_STATE_NONE);
}
+Process LaunchProcessPosixSpawn(const std::vector<std::string>& argv,
+ const LaunchOptions& options) {
+ DCHECK(!options.pre_exec_delegate)
+ << "LaunchProcessPosixSpawn does not support PreExecDelegate";
+ DCHECK(options.current_directory.empty())
+ << "LaunchProcessPosixSpawn does not support current_directory";
+
+ PosixSpawnAttr attr;
+
+ short flags = POSIX_SPAWN_CLOEXEC_DEFAULT;
+ if (options.new_process_group) {
+ flags |= POSIX_SPAWN_SETPGROUP;
+ DPSXCHECK(posix_spawnattr_setpgroup(attr.get(), 0));
+ }
+ DPSXCHECK(posix_spawnattr_setflags(attr.get(), flags));
+
+ PosixSpawnFileActions file_actions;
+
+ // Process file descriptors for the child. By default, LaunchProcess will
+ // open stdin to /dev/null and inherit stdout and stderr.
+ bool inherit_stdout = true, inherit_stderr = true;
+ bool null_stdin = true;
+ if (options.fds_to_remap) {
+ for (const auto& dup2_pair : *options.fds_to_remap) {
+ if (dup2_pair.second == STDIN_FILENO) {
+ null_stdin = false;
+ } else if (dup2_pair.second == STDOUT_FILENO) {
+ inherit_stdout = false;
+ } else if (dup2_pair.second == STDERR_FILENO) {
+ inherit_stderr = false;
+ }
+
+ if (dup2_pair.first == dup2_pair.second) {
+ file_actions.Inherit(dup2_pair.second);
+ } else {
+ file_actions.Dup2(dup2_pair.first, dup2_pair.second);
+ }
+ }
+ }
+
+ if (null_stdin) {
+ file_actions.Open(STDIN_FILENO, "/dev/null", O_RDONLY);
+ }
+ if (inherit_stdout) {
+ file_actions.Inherit(STDOUT_FILENO);
+ }
+ if (inherit_stderr) {
+ file_actions.Inherit(STDERR_FILENO);
+ }
+
+ std::unique_ptr<char* []> argv_cstr(new char*[argv.size() + 1]);
+ for (size_t i = 0; i < argv.size(); i++) {
+ argv_cstr[i] = const_cast<char*>(argv[i].c_str());
+ }
+ argv_cstr[argv.size()] = nullptr;
+
+ std::unique_ptr<char* []> owned_environ;
+ char** new_environ = options.clear_environ ? nullptr : *_NSGetEnviron();
+ if (!options.environ.empty()) {
+ owned_environ = AlterEnvironment(new_environ, options.environ);
+ new_environ = owned_environ.get();
+ }
+
+ const char* executable_path = !options.real_path.empty()
+ ? options.real_path.value().c_str()
+ : argv_cstr[0];
+
+ // Use posix_spawnp as some callers expect to have PATH consulted.
+ pid_t pid;
+ int rv = posix_spawnp(&pid, executable_path, file_actions.get(), attr.get(),
+ &argv_cstr[0], new_environ);
+
+ if (rv != 0) {
+ DLOG(ERROR) << "posix_spawnp(" << executable_path << "): -" << rv << " "
+ << strerror(rv);
+ return Process();
+ }
+
+ if (options.wait) {
+ // While this isn't strictly disk IO, waiting for another process to
+ // finish is the sort of thing ThreadRestrictions is trying to prevent.
+ base::ThreadRestrictions::AssertIOAllowed();
+ pid_t ret = HANDLE_EINTR(waitpid(pid, nullptr, 0));
+ DPCHECK(ret > 0);
+ }
+
+ return Process(pid);
+}
+
} // namespace base
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index e4560436251..44eafcfb05a 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -60,6 +60,8 @@
#if defined(OS_MACOSX)
#include <crt_externs.h>
#include <sys/event.h>
+
+#include "base/feature_list.h"
#else
extern char** environ;
#endif
@@ -70,6 +72,11 @@ namespace base {
namespace {
+#if defined(OS_MACOSX)
+const Feature kMacLaunchProcessPosixSpawn{"MacLaunchProcessPosixSpawn",
+ FEATURE_ENABLED_BY_DEFAULT};
+#endif
+
// Get the process's "environment" (i.e. the thing that setenv/getenv
// work with).
char** GetEnvironment() {
@@ -291,6 +298,15 @@ Process LaunchProcess(const CommandLine& cmdline,
Process LaunchProcess(const std::vector<std::string>& argv,
const LaunchOptions& options) {
+#if defined(OS_MACOSX)
+ if (FeatureList::IsEnabled(kMacLaunchProcessPosixSpawn)) {
+ // TODO(rsesek): Do this unconditionally. There is one user for each of
+ // these two options. https://crbug.com/179923.
+ if (!options.pre_exec_delegate && options.current_directory.empty())
+ return LaunchProcessPosixSpawn(argv, options);
+ }
+#endif
+
size_t fd_shuffle_size = 0;
if (options.fds_to_remap) {
fd_shuffle_size = options.fds_to_remap->size();
diff --git a/chromium/base/process/memory_mac.mm b/chromium/base/process/memory_mac.mm
index bac75aeab15..4c1b12043e6 100644
--- a/chromium/base/process/memory_mac.mm
+++ b/chromium/base/process/memory_mac.mm
@@ -5,6 +5,7 @@
#include "base/process/memory.h"
#include <CoreFoundation/CoreFoundation.h>
+#import <Foundation/Foundation.h>
#include <errno.h>
#include <mach/mach.h>
#include <mach/mach_vm.h>
diff --git a/chromium/base/process/port_provider_mac.cc b/chromium/base/process/port_provider_mac.cc
index ac13949ac83..23d214c3f31 100644
--- a/chromium/base/process/port_provider_mac.cc
+++ b/chromium/base/process/port_provider_mac.cc
@@ -21,7 +21,8 @@ void PortProvider::RemoveObserver(Observer* observer) {
void PortProvider::NotifyObservers(ProcessHandle process) {
base::AutoLock l(lock_);
- FOR_EACH_OBSERVER(Observer, observer_list_, OnReceivedTaskPort(process));
+ for (auto& observer : observer_list_)
+ observer.OnReceivedTaskPort(process);
}
} // namespace base
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index 70c82601932..5538475863b 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -15,8 +15,17 @@
#include "base/win/scoped_handle.h"
#endif
+#if defined(OS_MACOSX)
+#include "base/feature_list.h"
+#include "base/process/port_provider_mac.h"
+#endif
+
namespace base {
+#if defined(OS_MACOSX)
+extern const Feature kMacAllowBackgroundingProcesses;
+#endif
+
// Provides a move-only encapsulation of a process.
//
// This object is not tied to the lifetime of the underlying process: the
@@ -106,6 +115,28 @@ class BASE_EXPORT Process {
// is not required.
bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
+#if defined(OS_MACOSX)
+ // The Mac needs a Mach port in order to manipulate a process's priority,
+ // and there's no good way to get that from base given the pid. These Mac
+ // variants of the IsProcessBackgrounded and SetProcessBackgrounded API take
+ // a port provider for this reason. See crbug.com/460102
+ //
+ // A process is backgrounded when its task priority is
+ // |TASK_BACKGROUND_APPLICATION|.
+ //
+ // Returns true if the port_provider can locate a task port for the process
+ // and it is backgrounded. If port_provider is null, returns false.
+ bool IsProcessBackgrounded(PortProvider* port_provider) const;
+
+ // Set the process as backgrounded. If value is
+ // true, the priority of the associated task will be set to
+ // TASK_BACKGROUND_APPLICATION. If value is false, the
+ // priority of the process will be set to TASK_FOREGROUND_APPLICATION.
+ //
+ // Returns true if the priority was changed, false otherwise. If
+ // |port_provider| is null, this is a no-op and it returns false.
+ bool SetProcessBackgrounded(PortProvider* port_provider, bool value);
+#else
// A process is backgrounded when it's priority is lower than normal.
// Return true if this process is backgrounded, false otherwise.
bool IsProcessBackgrounded() const;
@@ -115,7 +146,7 @@ class BASE_EXPORT Process {
// will be made "normal" - equivalent to default process priority.
// Returns true if the priority was changed, false otherwise.
bool SetProcessBackgrounded(bool value);
-
+#endif // defined(OS_MACOSX)
// Returns an integer representing the priority of a process. The meaning
// of this value is OS dependent.
int GetPriority() const;
diff --git a/chromium/base/process/process_mac.cc b/chromium/base/process/process_mac.cc
new file mode 100644
index 00000000000..f83fbb9991e
--- /dev/null
+++ b/chromium/base/process/process_mac.cc
@@ -0,0 +1,90 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/process/process.h"
+
+#include <mach/mach.h>
+
+#include "base/feature_list.h"
+#include "base/mac/mach_logging.h"
+
+namespace base {
+
+// Enables backgrounding hidden renderers on Mac.
+const Feature kMacAllowBackgroundingProcesses{"MacAllowBackgroundingProcesses",
+ FEATURE_DISABLED_BY_DEFAULT};
+
+bool Process::CanBackgroundProcesses() {
+ return FeatureList::IsEnabled(kMacAllowBackgroundingProcesses);
+}
+
+bool Process::IsProcessBackgrounded(PortProvider* port_provider) const {
+ DCHECK(IsValid());
+ if (port_provider == nullptr || !CanBackgroundProcesses())
+ return false;
+
+ mach_port_t task_port = port_provider->TaskForPid(Pid());
+ if (task_port == TASK_NULL)
+ return false;
+
+ task_category_policy_data_t category_policy;
+ mach_msg_type_number_t task_info_count = TASK_CATEGORY_POLICY_COUNT;
+ boolean_t get_default = FALSE;
+
+ kern_return_t result =
+ task_policy_get(task_port, TASK_CATEGORY_POLICY,
+ reinterpret_cast<task_policy_t>(&category_policy),
+ &task_info_count, &get_default);
+ MACH_LOG_IF(ERROR, result != KERN_SUCCESS, result)
+ << "task_policy_get TASK_CATEGORY_POLICY";
+
+ if (result == KERN_SUCCESS && get_default == FALSE) {
+ return category_policy.role == TASK_BACKGROUND_APPLICATION;
+ }
+ return false;
+}
+
+bool Process::SetProcessBackgrounded(PortProvider* port_provider,
+ bool background) {
+ DCHECK(IsValid());
+ if (port_provider == nullptr || !CanBackgroundProcesses())
+ return false;
+
+ mach_port_t task_port = port_provider->TaskForPid(Pid());
+ if (task_port == TASK_NULL)
+ return false;
+
+ if (IsProcessBackgrounded(port_provider) == background)
+ return true;
+
+ task_category_policy category_policy;
+ category_policy.role =
+ background ? TASK_BACKGROUND_APPLICATION : TASK_FOREGROUND_APPLICATION;
+ kern_return_t result =
+ task_policy_set(task_port, TASK_CATEGORY_POLICY,
+ reinterpret_cast<task_policy_t>(&category_policy),
+ TASK_CATEGORY_POLICY_COUNT);
+
+ if (result != KERN_SUCCESS) {
+ MACH_LOG(ERROR, result) << "task_policy_set TASK_CATEGORY_POLICY";
+ return false;
+ }
+
+ // Latency QoS regulates timer throttling/accuracy. Select default tier
+ // on foreground because precise timer firing isn't needed.
+ struct task_qos_policy qos_policy = {
+ background ? LATENCY_QOS_TIER_5 : LATENCY_QOS_TIER_UNSPECIFIED,
+ background ? THROUGHPUT_QOS_TIER_5 : THROUGHPUT_QOS_TIER_UNSPECIFIED};
+ result = task_policy_set(task_port, TASK_OVERRIDE_QOS_POLICY,
+ reinterpret_cast<task_policy_t>(&qos_policy),
+ TASK_QOS_POLICY_COUNT);
+ if (result != KERN_SUCCESS) {
+ MACH_LOG(ERROR, result) << "task_policy_set TASK_OVERRIDE_QOS_POLICY";
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace base
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index a9d745eeead..20fab921416 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -257,12 +257,12 @@ Process Process::DeprecatedGetProcessFromHandle(ProcessHandle handle) {
return Process(handle);
}
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX)
// static
bool Process::CanBackgroundProcesses() {
return false;
}
-#endif // !defined(OS_LINUX)
+#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
bool Process::IsValid() const {
return process_ != kNullProcessHandle;
@@ -361,7 +361,7 @@ bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
return WaitForExitWithTimeoutImpl(Handle(), exit_code, timeout);
}
-#if !defined(OS_LINUX)
+#if !defined(OS_LINUX) && !defined(OS_MACOSX)
bool Process::IsProcessBackgrounded() const {
// See SetProcessBackgrounded().
DCHECK(IsValid());
@@ -369,13 +369,13 @@ bool Process::IsProcessBackgrounded() const {
}
bool Process::SetProcessBackgrounded(bool value) {
- // Not implemented for POSIX systems other than Linux. With POSIX, if we were
- // to lower the process priority we wouldn't be able to raise it back to its
- // initial priority.
+ // Not implemented for POSIX systems other than Linux and Mac. With POSIX, if
+ // we were to lower the process priority we wouldn't be able to raise it back
+ // to its initial priority.
NOTIMPLEMENTED();
return false;
}
-#endif // !defined(OS_LINUX)
+#endif // !defined(OS_LINUX) && !defined(OS_MACOSX)
int Process::GetPriority() const {
DCHECK(IsValid());
diff --git a/chromium/base/process/process_unittest.cc b/chromium/base/process/process_unittest.cc
index 853f1fe8fff..90bb95a0fe9 100644
--- a/chromium/base/process/process_unittest.cc
+++ b/chromium/base/process/process_unittest.cc
@@ -22,6 +22,16 @@ const int kExpectedStillRunningExitCode = 0x102;
const int kExpectedStillRunningExitCode = 0;
#endif
+#if defined(OS_MACOSX)
+// Fake port provider that returns the calling process's
+// task port, ignoring its argument.
+class FakePortProvider : public base::PortProvider {
+ mach_port_t TaskForPid(base::ProcessHandle process) const override {
+ return mach_task_self();
+ }
+};
+#endif
+
} // namespace
namespace base {
@@ -171,6 +181,8 @@ TEST_F(ProcessTest, WaitForExitWithTimeout) {
// Note: a platform may not be willing or able to lower the priority of
// a process. The calls to SetProcessBackground should be noops then.
TEST_F(ProcessTest, SetProcessBackgrounded) {
+ if (!Process::CanBackgroundProcesses())
+ return;
Process process(SpawnChild("SimpleChildProcess"));
int old_priority = process.GetPriority();
#if defined(OS_WIN)
@@ -178,11 +190,22 @@ TEST_F(ProcessTest, SetProcessBackgrounded) {
EXPECT_TRUE(process.IsProcessBackgrounded());
EXPECT_TRUE(process.SetProcessBackgrounded(false));
EXPECT_FALSE(process.IsProcessBackgrounded());
+#elif defined(OS_MACOSX)
+ // On the Mac, backgrounding a process requires a port to that process.
+ // In the browser it's available through the MachBroker class, which is not
+ // part of base. Additionally, there is an indefinite amount of time between
+ // spawning a process and receiving its port. Because this test just checks
+ // the ability to background/foreground a process, we can use the current
+ // process's port instead.
+ FakePortProvider provider;
+ EXPECT_TRUE(process.SetProcessBackgrounded(&provider, true));
+ EXPECT_TRUE(process.IsProcessBackgrounded(&provider));
+ EXPECT_TRUE(process.SetProcessBackgrounded(&provider, false));
+ EXPECT_FALSE(process.IsProcessBackgrounded(&provider));
+
#else
- if (process.CanBackgroundProcesses()) {
- process.SetProcessBackgrounded(true);
- process.SetProcessBackgrounded(false);
- }
+ process.SetProcessBackgrounded(true);
+ process.SetProcessBackgrounded(false);
#endif
int new_priority = process.GetPriority();
EXPECT_EQ(old_priority, new_priority);
@@ -191,6 +214,8 @@ TEST_F(ProcessTest, SetProcessBackgrounded) {
// Same as SetProcessBackgrounded but to this very process. It uses
// a different code path at least for Windows.
TEST_F(ProcessTest, SetProcessBackgroundedSelf) {
+ if (!Process::CanBackgroundProcesses())
+ return;
Process process = Process::Current();
int old_priority = process.GetPriority();
#if defined(OS_WIN)
@@ -198,6 +223,12 @@ TEST_F(ProcessTest, SetProcessBackgroundedSelf) {
EXPECT_TRUE(process.IsProcessBackgrounded());
EXPECT_TRUE(process.SetProcessBackgrounded(false));
EXPECT_FALSE(process.IsProcessBackgrounded());
+#elif defined(OS_MACOSX)
+ FakePortProvider provider;
+ EXPECT_TRUE(process.SetProcessBackgrounded(&provider, true));
+ EXPECT_TRUE(process.IsProcessBackgrounded(&provider));
+ EXPECT_TRUE(process.SetProcessBackgrounded(&provider, false));
+ EXPECT_FALSE(process.IsProcessBackgrounded(&provider));
#else
process.SetProcessBackgrounded(true);
process.SetProcessBackgrounded(false);
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index 10639759f6f..e25440f80c5 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -23,7 +23,7 @@ namespace base {
namespace {
// Used to ensure only one profiler is running at a time.
-LazyInstance<Lock> concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
+LazyInstance<Lock>::Leaky concurrent_profiling_lock = LAZY_INSTANCE_INITIALIZER;
// AsyncRunner ----------------------------------------------------------------
@@ -40,12 +40,13 @@ class AsyncRunner {
private:
AsyncRunner();
- // Runs the callback and deletes the AsyncRunner instance.
+ // Runs the callback and deletes the AsyncRunner instance. |profiles| is not
+ // const& because it must be passed with std::move.
static void RunCallbackAndDeleteInstance(
std::unique_ptr<AsyncRunner> object_to_be_deleted,
const StackSamplingProfiler::CompletedCallback& callback,
scoped_refptr<SingleThreadTaskRunner> task_runner,
- const StackSamplingProfiler::CallStackProfiles& profiles);
+ StackSamplingProfiler::CallStackProfiles profiles);
std::unique_ptr<StackSamplingProfiler> profiler_;
@@ -75,8 +76,8 @@ void AsyncRunner::RunCallbackAndDeleteInstance(
std::unique_ptr<AsyncRunner> object_to_be_deleted,
const StackSamplingProfiler::CompletedCallback& callback,
scoped_refptr<SingleThreadTaskRunner> task_runner,
- const StackSamplingProfiler::CallStackProfiles& profiles) {
- callback.Run(profiles);
+ StackSamplingProfiler::CallStackProfiles profiles) {
+ callback.Run(std::move(profiles));
// Delete the instance on the original calling thread.
task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
}
@@ -110,10 +111,22 @@ StackSamplingProfiler::Frame::Frame()
StackSamplingProfiler::CallStackProfile::CallStackProfile() {}
StackSamplingProfiler::CallStackProfile::CallStackProfile(
- const CallStackProfile& other) = default;
+ CallStackProfile&& other) = default;
StackSamplingProfiler::CallStackProfile::~CallStackProfile() {}
+StackSamplingProfiler::CallStackProfile&
+StackSamplingProfiler::CallStackProfile::operator=(CallStackProfile&& other) =
+ default;
+
+StackSamplingProfiler::CallStackProfile
+StackSamplingProfiler::CallStackProfile::CopyForTesting() const {
+ return CallStackProfile(*this);
+}
+
+StackSamplingProfiler::CallStackProfile::CallStackProfile(
+ const CallStackProfile& other) = default;
+
// StackSamplingProfiler::SamplingThread --------------------------------------
StackSamplingProfiler::SamplingThread::SamplingThread(
@@ -139,7 +152,7 @@ void StackSamplingProfiler::SamplingThread::ThreadMain() {
CallStackProfiles profiles;
CollectProfiles(&profiles);
concurrent_profiling_lock.Get().Release();
- completed_callback_.Run(profiles);
+ completed_callback_.Run(std::move(profiles));
}
// Depending on how long the sampling takes and the length of the sampling
@@ -203,7 +216,7 @@ void StackSamplingProfiler::SamplingThread::CollectProfiles(
bool was_stopped = false;
CollectProfile(&profile, &previous_elapsed_profile_time, &was_stopped);
if (!profile.samples.empty())
- profiles->push_back(profile);
+ profiles->push_back(std::move(profile));
if (was_stopped)
return;
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index bf5a4f33721..cf1daf7e16d 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -113,9 +113,13 @@ class BASE_EXPORT StackSamplingProfiler {
// CallStackProfile represents a set of samples.
struct BASE_EXPORT CallStackProfile {
CallStackProfile();
- CallStackProfile(const CallStackProfile& other);
+ CallStackProfile(CallStackProfile&& other);
~CallStackProfile();
+ CallStackProfile& operator=(CallStackProfile&& other);
+
+ CallStackProfile CopyForTesting() const;
+
std::vector<Module> modules;
std::vector<Sample> samples;
@@ -124,6 +128,13 @@ class BASE_EXPORT StackSamplingProfiler {
// Time between samples.
TimeDelta sampling_period;
+
+ private:
+ // Copying is possible but expensive so disallow it except for internal use
+ // (i.e. CopyForTesting); use std::move instead.
+ CallStackProfile(const CallStackProfile& other);
+
+ DISALLOW_ASSIGN(CallStackProfile);
};
using CallStackProfiles = std::vector<CallStackProfile>;
@@ -151,7 +162,8 @@ class BASE_EXPORT StackSamplingProfiler {
TimeDelta sampling_interval;
};
- // The callback type used to collect completed profiles.
+ // The callback type used to collect completed profiles. The passed |profiles|
+ // are move-only.
//
// IMPORTANT NOTE: the callback is invoked on a thread the profiler
// constructs, rather than on the thread used to construct the profiler and
@@ -159,7 +171,7 @@ class BASE_EXPORT StackSamplingProfiler {
// threads with message loops that create StackSamplingProfilers, posting a
// task to the message loop with a copy of the profiles is the recommended
// thread-safe callback implementation.
- using CompletedCallback = Callback<void(const CallStackProfiles&)>;
+ using CompletedCallback = Callback<void(CallStackProfiles)>;
// Creates a profiler that sends completed profiles to |callback|. The second
// constructor is for test purposes.
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 3a59e6d2c30..4fc70b8e050 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -6,6 +6,7 @@
#include <stdint.h>
#include <cstdlib>
+#include <utility>
#include "base/bind.h"
#include "base/compiler_specific.h"
@@ -309,8 +310,8 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
// Called on the profiler thread when complete, to collect profiles.
void SaveProfiles(CallStackProfiles* profiles,
- const CallStackProfiles& pending_profiles) {
- *profiles = pending_profiles;
+ CallStackProfiles pending_profiles) {
+ *profiles = std::move(pending_profiles);
}
// Called on the profiler thread when complete. Collects profiles produced by
@@ -318,8 +319,8 @@ void SaveProfiles(CallStackProfiles* profiles,
// the profiler is done.
void SaveProfilesAndSignalEvent(CallStackProfiles* profiles,
WaitableEvent* event,
- const CallStackProfiles& pending_profiles) {
- *profiles = pending_profiles;
+ CallStackProfiles pending_profiles) {
+ *profiles = std::move(pending_profiles);
event->Signal();
}
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index 4159d8524a3..3f7555dde54 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -29,24 +29,6 @@ void STLClearObject(T* obj) {
obj->reserve(0);
}
-// For a range within a container of pointers, calls delete (non-array version)
-// on these pointers.
-// NOTE: for these three functions, we could just implement a DeleteObject
-// functor and then call for_each() on the range and functor, but this
-// requires us to pull in all of algorithm.h, which seems expensive.
-// For hash_[multi]set, it is important that this deletes behind the iterator
-// because the hash_set may call the hash function on the iterator when it is
-// advanced, which could result in the hash function trying to deference a
-// stale pointer.
-template <class ForwardIterator>
-void STLDeleteContainerPointers(ForwardIterator begin, ForwardIterator end) {
- while (begin != end) {
- ForwardIterator temp = begin;
- ++begin;
- delete *temp;
- }
-}
-
// Counts the number of instances of val in a container.
template <typename Container, typename T>
typename std::iterator_traits<
@@ -85,7 +67,13 @@ template <class T>
void STLDeleteElements(T* container) {
if (!container)
return;
- STLDeleteContainerPointers(container->begin(), container->end());
+
+ for (auto it = container->begin(); it != container->end();) {
+ auto temp = it;
+ ++it;
+ delete *temp;
+ }
+
container->clear();
}
@@ -97,8 +85,7 @@ void STLDeleteValues(T* container) {
if (!container)
return;
- auto it = container->begin();
- while (it != container->end()) {
+ for (auto it = container->begin(); it != container->end();) {
auto temp = it;
++it;
delete temp->second;
diff --git a/chromium/base/strings/string_number_conversions_unittest.cc b/chromium/base/strings/string_number_conversions_unittest.cc
index 6b2bd97da9a..68e86dbdf57 100644
--- a/chromium/base/strings/string_number_conversions_unittest.cc
+++ b/chromium/base/strings/string_number_conversions_unittest.cc
@@ -745,6 +745,8 @@ TEST(StringNumberConversionsTest, StringToDouble) {
{"9e307", 9e307, true},
{"1.7976e308", 1.7976e308, true},
{"1.7977e308", HUGE_VAL, false},
+ {"1.797693134862315807e+308", HUGE_VAL, true},
+ {"1.797693134862315808e+308", HUGE_VAL, false},
{"9e308", HUGE_VAL, false},
{"9e309", HUGE_VAL, false},
{"9e999", HUGE_VAL, false},
@@ -754,6 +756,8 @@ TEST(StringNumberConversionsTest, StringToDouble) {
{"-9e307", -9e307, true},
{"-1.7976e308", -1.7976e308, true},
{"-1.7977e308", -HUGE_VAL, false},
+ {"-1.797693134862315807e+308", -HUGE_VAL, true},
+ {"-1.797693134862315808e+308", -HUGE_VAL, false},
{"-9e308", -HUGE_VAL, false},
{"-9e309", -HUGE_VAL, false},
{"-9e999", -HUGE_VAL, false},
@@ -769,6 +773,7 @@ TEST(StringNumberConversionsTest, StringToDouble) {
{"-1E-7", -0.0000001, true},
{"01e02", 100, true},
{"2.3e15", 2.3e15, true},
+ {"100e-309", 100e-309, true},
// Test some invalid cases.
{"\t\n\v\f\r -123.45e2", -12345.0, false},
diff --git a/chromium/base/synchronization/lock_impl_win.cc b/chromium/base/synchronization/lock_impl_win.cc
index ef0326753d0..31f95f4e394 100644
--- a/chromium/base/synchronization/lock_impl_win.cc
+++ b/chromium/base/synchronization/lock_impl_win.cc
@@ -18,8 +18,7 @@ bool LockImpl::Try() {
}
void LockImpl::Lock() {
- // Commented out pending https://crbug.com/652432
- // base::debug::ScopedLockAcquireActivity lock_activity(this);
+ base::debug::ScopedLockAcquireActivity lock_activity(this);
::AcquireSRWLockExclusive(&native_handle_);
}
diff --git a/chromium/base/synchronization/waitable_event_watcher.h b/chromium/base/synchronization/waitable_event_watcher.h
index d4096d121ac..44ef5047edf 100644
--- a/chromium/base/synchronization/waitable_event_watcher.h
+++ b/chromium/base/synchronization/waitable_event_watcher.h
@@ -21,7 +21,6 @@ namespace base {
class Flag;
class AsyncWaiter;
-class AsyncCallbackTask;
class WaitableEvent;
// This class provides a way to wait on a WaitableEvent asynchronously.
diff --git a/chromium/base/syslog_logging.cc b/chromium/base/syslog_logging.cc
index 1d3daf9e830..1cd545910a9 100644
--- a/chromium/base/syslog_logging.cc
+++ b/chromium/base/syslog_logging.cc
@@ -6,6 +6,8 @@
#include "base/syslog_logging.h"
#if defined(OS_WIN)
+#include "base/win/eventlog_messages.h"
+
#include <windows.h>
#elif defined(OS_LINUX)
#include <syslog.h>
@@ -33,7 +35,6 @@ EventLogMessage::~EventLogMessage() {
}
std::string message(log_message_.str());
- LPCSTR strings[1] = {message.data()};
WORD log_type = EVENTLOG_ERROR_TYPE;
switch (log_message_.severity()) {
case LOG_INFO:
@@ -51,10 +52,9 @@ EventLogMessage::~EventLogMessage() {
log_type = EVENTLOG_ERROR_TYPE;
break;
}
- // TODO(pastarmovj): Register Chrome's event log resource types to make the
- // entries nicer. 1337 is just a made up event id type.
- if (!ReportEventA(event_log_handle, log_type, 0, 1337, NULL, 1, 0,
- strings, NULL)) {
+ LPCSTR strings[1] = {message.data()};
+ if (!ReportEventA(event_log_handle, log_type, BROWSER_CATEGORY,
+ MSG_LOG_MESSAGE, NULL, 1, 0, strings, NULL)) {
stream() << " !!NOT ADDED TO EVENTLOG!!";
}
DeregisterEventSource(event_log_handle);
diff --git a/chromium/base/task_scheduler/delayed_task_manager.cc b/chromium/base/task_scheduler/delayed_task_manager.cc
index d648b9d4628..1cc928b0f0d 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager.cc
@@ -6,49 +6,18 @@
#include <utility>
+#include "base/bind.h"
#include "base/logging.h"
+#include "base/task_runner.h"
#include "base/task_scheduler/scheduler_worker_pool.h"
namespace base {
namespace internal {
-struct DelayedTaskManager::DelayedTask {
- DelayedTask(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker,
- SchedulerWorkerPool* worker_pool,
- uint64_t index)
- : task(std::move(task)),
- sequence(std::move(sequence)),
- worker(worker),
- worker_pool(worker_pool),
- index(index) {}
-
- DelayedTask(DelayedTask&& other) = default;
-
- ~DelayedTask() = default;
-
- DelayedTask& operator=(DelayedTask&& other) = default;
-
- // |task| will be posted to |worker_pool| with |sequence| and |worker|
- // when it becomes ripe for execution.
- std::unique_ptr<Task> task;
- scoped_refptr<Sequence> sequence;
- SchedulerWorker* worker;
- SchedulerWorkerPool* worker_pool;
-
- // Ensures that tasks that have the same |delayed_run_time| are sorted
- // according to the order in which they were added to the DelayedTaskManager.
- uint64_t index;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(DelayedTask);
-};
-
DelayedTaskManager::DelayedTaskManager(
- const Closure& on_delayed_run_time_updated)
- : on_delayed_run_time_updated_(on_delayed_run_time_updated) {
- DCHECK(!on_delayed_run_time_updated_.is_null());
+ scoped_refptr<TaskRunner> service_thread_task_runner)
+ : service_thread_task_runner_(std::move(service_thread_task_runner)) {
+ DCHECK(service_thread_task_runner_);
}
DelayedTaskManager::~DelayedTaskManager() = default;
@@ -61,92 +30,16 @@ void DelayedTaskManager::AddDelayedTask(std::unique_ptr<Task> task,
DCHECK(sequence);
DCHECK(worker_pool);
- const TimeTicks new_task_delayed_run_time = task->delayed_run_time;
- TimeTicks current_delayed_run_time;
-
- {
- AutoSchedulerLock auto_lock(lock_);
-
- if (!delayed_tasks_.empty())
- current_delayed_run_time = delayed_tasks_.top().task->delayed_run_time;
-
- delayed_tasks_.emplace(std::move(task), std::move(sequence), worker,
- worker_pool, ++delayed_task_index_);
- }
-
- if (current_delayed_run_time.is_null() ||
- new_task_delayed_run_time < current_delayed_run_time) {
- on_delayed_run_time_updated_.Run();
- }
-}
-
-void DelayedTaskManager::PostReadyTasks() {
- const TimeTicks now = Now();
-
- // Move delayed tasks that are ready for execution into |ready_tasks|. Don't
- // post them right away to avoid imposing an unecessary lock dependency on
- // PostTaskNowHelper.
- std::vector<DelayedTask> ready_tasks;
-
- {
- AutoSchedulerLock auto_lock(lock_);
- while (!delayed_tasks_.empty() &&
- delayed_tasks_.top().task->delayed_run_time <= now) {
- // The const_cast for std::move is okay since we're immediately popping
- // the task from |delayed_tasks_|. See DelayedTaskComparator::operator()
- // for minor debug-check implications.
- ready_tasks.push_back(
- std::move(const_cast<DelayedTask&>(delayed_tasks_.top())));
- delayed_tasks_.pop();
- }
- }
-
- // Post delayed tasks that are ready for execution.
- for (auto& delayed_task : ready_tasks) {
- delayed_task.worker_pool->PostTaskWithSequenceNow(
- std::move(delayed_task.task), std::move(delayed_task.sequence),
- delayed_task.worker);
- }
-}
-
-TimeTicks DelayedTaskManager::GetDelayedRunTime() const {
- AutoSchedulerLock auto_lock(lock_);
-
- if (delayed_tasks_.empty())
- return TimeTicks();
-
- return delayed_tasks_.top().task->delayed_run_time;
-}
-
-// In std::priority_queue, the largest element is on top. Therefore, this
-// comparator returns true if the delayed run time of |right| is earlier than
-// the delayed run time of |left|.
-bool DelayedTaskManager::DelayedTaskComparator::operator()(
- const DelayedTask& left,
- const DelayedTask& right) const {
-#ifndef NDEBUG
- // Due to STL consistency checks in Windows and const_cast'ing right before
- // popping the DelayedTask, a null task can be passed to this comparator in
- // Debug builds. To satisfy these consistency checks, this comparator
- // considers null tasks to be the larger than anything.
- DCHECK(left.task || right.task);
- if (!left.task)
- return false;
- if (!right.task)
- return true;
-#else
- DCHECK(left.task);
- DCHECK(right.task);
-#endif // NDEBUG
- if (left.task->delayed_run_time > right.task->delayed_run_time)
- return true;
- if (left.task->delayed_run_time < right.task->delayed_run_time)
- return false;
- return left.index > right.index;
-}
+ const TimeDelta delay = task->delay;
+ DCHECK(!delay.is_zero());
-TimeTicks DelayedTaskManager::Now() const {
- return TimeTicks::Now();
+ // TODO(fdoray): Use |task->delayed_run_time| on the service thread
+ // MessageLoop rather than recomputing it from |delay|.
+ service_thread_task_runner_->PostDelayedTask(
+ FROM_HERE, Bind(&SchedulerWorkerPool::PostTaskWithSequenceNow,
+ Unretained(worker_pool), Passed(std::move(task)),
+ std::move(sequence), Unretained(worker)),
+ delay);
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/delayed_task_manager.h b/chromium/base/task_scheduler/delayed_task_manager.h
index d773fe5da94..3bfb355ef6d 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.h
+++ b/chromium/base/task_scheduler/delayed_task_manager.h
@@ -5,40 +5,37 @@
#ifndef BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
#define BASE_TASK_SCHEDULER_DELAYED_TASK_MANAGER_H_
-#include <stdint.h>
-
#include <memory>
-#include <queue>
-#include <vector>
#include "base/base_export.h"
-#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/task_scheduler/scheduler_lock.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/task_scheduler/task.h"
#include "base/time/time.h"
namespace base {
+
+class TaskRunner;
+
namespace internal {
class SchedulerWorker;
class SchedulerWorkerPool;
+class Sequence;
+struct Task;
-// A DelayedTaskManager holds delayed Tasks until they become ripe for
-// execution. This class is thread-safe.
+// A DelayedTaskManager forwards Tasks to a SchedulerWorkerPool when they become
+// ripe for execution. This class is thread-safe.
class BASE_EXPORT DelayedTaskManager {
public:
- // |on_delayed_run_time_updated| is invoked when the delayed run time is
- // updated as a result of adding a delayed task to the manager.
- explicit DelayedTaskManager(const Closure& on_delayed_run_time_updated);
+ // |service_thread_task_runner| posts tasks to the TaskScheduler service
+ // thread.
+ explicit DelayedTaskManager(
+ scoped_refptr<TaskRunner> service_thread_task_runner);
~DelayedTaskManager();
- // Adds |task| to a queue of delayed tasks. The task will be posted to
- // |worker_pool| with |sequence| and |worker| the first time that
- // PostReadyTasks() is called while Now() is passed |task->delayed_run_time|.
- // |worker| is a SchedulerWorker owned by |worker_pool| or nullptr.
+ // Posts |task|. The task will be forwarded to |worker_pool| with |sequence|
+ // and |worker| when it becomes ripe for execution. |worker| is a
+ // SchedulerWorker owned by |worker_pool| or nullptr.
//
// TODO(robliao): Find a concrete way to manage the memory of |worker| and
// |worker_pool|. These objects are never deleted in production, but it is
@@ -48,36 +45,8 @@ class BASE_EXPORT DelayedTaskManager {
SchedulerWorker* worker,
SchedulerWorkerPool* worker_pool);
- // Posts delayed tasks that are ripe for execution.
- void PostReadyTasks();
-
- // Returns the next time at which a delayed task will become ripe for
- // execution, or a null TimeTicks if there are no pending delayed tasks.
- TimeTicks GetDelayedRunTime() const;
-
- // Returns the current time. Can be overridden for tests.
- virtual TimeTicks Now() const;
-
private:
- struct DelayedTask;
- struct DelayedTaskComparator {
- bool operator()(const DelayedTask& left, const DelayedTask& right) const;
- };
-
- const Closure on_delayed_run_time_updated_;
-
- // Synchronizes access to all members below.
- mutable SchedulerLock lock_;
-
- // Priority queue of delayed tasks. The delayed task with the smallest
- // |task->delayed_run_time| is in front of the priority queue.
- using DelayedTaskQueue = std::priority_queue<DelayedTask,
- std::vector<DelayedTask>,
- DelayedTaskComparator>;
- DelayedTaskQueue delayed_tasks_;
-
- // The index to assign to the next delayed task added to the manager.
- uint64_t delayed_task_index_ = 0;
+ const scoped_refptr<TaskRunner> service_thread_task_runner_;
DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
};
diff --git a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
index c1c85ef5e8b..f964cda5a67 100644
--- a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
@@ -8,13 +8,14 @@
#include <utility>
#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/logging.h"
#include "base/memory/ref_counted.h"
-#include "base/task_scheduler/scheduler_lock.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/task_runner.h"
#include "base/task_scheduler/scheduler_worker_pool.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
+#include "base/test/test_mock_time_task_runner.h"
#include "base/time/time.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -23,45 +24,38 @@ namespace base {
namespace internal {
namespace {
-class TestDelayedTaskManager : public DelayedTaskManager {
- public:
- TestDelayedTaskManager()
- : DelayedTaskManager(
- Bind(&TestDelayedTaskManager::OnDelayedRunTimeUpdated,
- Unretained(this))) {}
-
- void SetCurrentTime(TimeTicks now) { now_ = now; }
-
- // DelayedTaskManager:
- TimeTicks Now() const override { return now_; }
-
- MOCK_METHOD0(OnDelayedRunTimeUpdated, void());
-
- private:
- TimeTicks now_ = TimeTicks::Now();
-
- DISALLOW_COPY_AND_ASSIGN(TestDelayedTaskManager);
-};
+constexpr TimeDelta kLongDelay = TimeDelta::FromHours(1);
class MockSchedulerWorkerPool : public SchedulerWorkerPool {
public:
// SchedulerWorkerPool:
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) override {
- NOTREACHED();
+ const TaskTraits& traits) override {
+ ADD_FAILURE() << "Call to unimplemented method.";
+ return nullptr;
+ }
+
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) override {
+ ADD_FAILURE() << "Call to unimplemented method.";
+ return nullptr;
+ }
+
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) override {
+ ADD_FAILURE() << "Call to unimplemented method.";
return nullptr;
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
const SequenceSortKey& sequence_sort_key) override {
- NOTREACHED();
+ ADD_FAILURE() << "Call to unimplemented method.";
}
bool PostTaskWithSequence(std::unique_ptr<Task> task,
scoped_refptr<Sequence> sequence,
SchedulerWorker* worker) override {
- NOTREACHED();
+ ADD_FAILURE() << "Call to unimplemented method.";
return true;
}
@@ -79,166 +73,93 @@ class MockSchedulerWorkerPool : public SchedulerWorkerPool {
} // namespace
-// Verify that GetDelayedRunTime() returns a null TimeTicks when there are
-// no pending delayed tasks.
-TEST(TaskSchedulerDelayedTaskManagerTest,
- GetDelayedRunTimeNoPendingDelayedTasks) {
- TestDelayedTaskManager manager;
- EXPECT_EQ(TimeTicks(), manager.GetDelayedRunTime());
-}
-
-// Verify that a delayed task isn't posted before it is ripe for execution.
-TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTaskBeforeDelayedRunTime) {
- testing::StrictMock<TestDelayedTaskManager> manager;
-
- std::unique_ptr<Task> task(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
- const Task* task_raw = task.get();
- scoped_refptr<Sequence> sequence(new Sequence);
- testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
-
- // Add |task| to the DelayedTaskManager.
- EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
- manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
-
- // Ask the DelayedTaskManager to post tasks that are ripe for execution. Don't
- // expect any call to the mock method of |worker_pool|.
- manager.PostReadyTasks();
-
- // The delayed run time shouldn't have changed.
- EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
-}
-
-// Verify that a delayed task is posted when PostReadyTasks() is called with the
-// current time equal to the task's delayed run time.
-TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAtDelayedRunTime) {
- testing::StrictMock<TestDelayedTaskManager> manager;
+// Verify that a delayed task isn't forwarded to its SchedulerWorkerPool before
+// it is ripe for execution.
+TEST(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunTooEarly) {
+ scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner(
+ new TestMockTimeTaskRunner);
+ DelayedTaskManager manager(service_thread_task_runner);
std::unique_ptr<Task> task(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
- const Task* task_raw = task.get();
+ new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), kLongDelay));
scoped_refptr<Sequence> sequence(new Sequence);
testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
- // Add |task| to the DelayedTaskManager.
- EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
+ // Send |task| to the DelayedTaskManager.
manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
- // Fast-forward time to |task_raw|'s delayed run time.
- manager.SetCurrentTime(task_raw->delayed_run_time);
-
- // Ask the DelayedTaskManager to post tasks that are ripe for execution.
- EXPECT_CALL(worker_pool,
- PostTaskWithSequenceNowMock(task_raw, sequence.get(), nullptr));
- manager.PostReadyTasks();
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(TimeTicks(), manager.GetDelayedRunTime());
+ // Run tasks that are ripe for execution. Don't expect any call to the mock
+ // method of |worker_pool|.
+ service_thread_task_runner->RunUntilIdle();
}
-// Verify that a delayed task is posted when PostReadyTasks() is called with the
-// current time greater than the task's delayed run time.
-TEST(TaskSchedulerDelayedTaskManagerTest, PostReadyTasksAfterDelayedRunTime) {
- testing::StrictMock<TestDelayedTaskManager> manager;
+// Verify that a delayed task is forwarded to its SchedulerWorkerPool when it is
+// ripe for execution.
+TEST(TaskSchedulerDelayedTaskManagerTest, DelayedTaskRunsAfterDelay) {
+ scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner(
+ new TestMockTimeTaskRunner);
+ DelayedTaskManager manager(service_thread_task_runner);
std::unique_ptr<Task> task(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
+ new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), kLongDelay));
const Task* task_raw = task.get();
scoped_refptr<Sequence> sequence(new Sequence);
testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
- // Add |task| to the DelayedTaskManager.
- EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
+ // Send |task| to the DelayedTaskManager.
manager.AddDelayedTask(std::move(task), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_raw->delayed_run_time, manager.GetDelayedRunTime());
-
- // Fast-forward time to |task_raw|'s delayed run time.
- manager.SetCurrentTime(task_raw->delayed_run_time +
- TimeDelta::FromSeconds(10));
- // Ask the DelayedTaskManager to post tasks that are ripe for execution.
+ // Fast-forward time. Expect a call to the mock method of |worker_pool|.
EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_raw, sequence.get(), nullptr));
- manager.PostReadyTasks();
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(TimeTicks(), manager.GetDelayedRunTime());
+ service_thread_task_runner->FastForwardBy(kLongDelay);
}
-// Verify that when multiple tasks are added to a DelayedTaskManager, they are
-// posted when they become ripe for execution.
-TEST(TaskSchedulerDelayedTaskManagerTest, AddAndPostReadyTasks) {
- testing::StrictMock<TestDelayedTaskManager> manager;
+// Verify that multiple delayed task are forwarded to their SchedulerWorkerPool
+// when they are ripe for execution.
+TEST(TaskSchedulerDelayedTaskManagerTest, DelayedTasksRunAfterDelay) {
+ scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner(
+ new TestMockTimeTaskRunner);
+ DelayedTaskManager manager(service_thread_task_runner);
scoped_refptr<Sequence> sequence(new Sequence);
testing::StrictMock<MockSchedulerWorkerPool> worker_pool;
- std::unique_ptr<Task> task_a(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task_a->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(2);
+ std::unique_ptr<Task> task_a(new Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(), TimeDelta::FromHours(1)));
const Task* task_a_raw = task_a.get();
- std::unique_ptr<Task> task_b(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task_b->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(2);
+ std::unique_ptr<Task> task_b(new Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(), TimeDelta::FromHours(2)));
const Task* task_b_raw = task_b.get();
- std::unique_ptr<Task> task_c(
- new Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
- task_c->delayed_run_time = manager.Now() + TimeDelta::FromSeconds(1);
+ std::unique_ptr<Task> task_c(new Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(), TimeDelta::FromHours(1)));
const Task* task_c_raw = task_c.get();
- // Add |task_a| to the DelayedTaskManager. The delayed run time should be
- // updated to |task_a|'s delayed run time.
- EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
+ // Send tasks to the DelayedTaskManager.
manager.AddDelayedTask(std::move(task_a), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
-
- // Add |task_b| to the DelayedTaskManager. The delayed run time shouldn't
- // change.
manager.AddDelayedTask(std::move(task_b), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
-
- // Add |task_c| to the DelayedTaskManager. The delayed run time should be
- // updated to |task_c|'s delayed run time.
- EXPECT_CALL(manager, OnDelayedRunTimeUpdated());
manager.AddDelayedTask(std::move(task_c), sequence, nullptr, &worker_pool);
- testing::Mock::VerifyAndClear(&manager);
- EXPECT_EQ(task_c_raw->delayed_run_time, manager.GetDelayedRunTime());
- // Fast-forward time to |task_c_raw|'s delayed run time.
- manager.SetCurrentTime(task_c_raw->delayed_run_time);
+ // Run tasks that are ripe for execution on the service thread. Don't expect
+ // any call to the mock method of |worker_pool|.
+ service_thread_task_runner->RunUntilIdle();
- // Ask the DelayedTaskManager to post tasks that are ripe for execution.
- // |task_c_raw| should be posted and the delayed run time should become
- // |task_a_raw|'s delayed run time.
+ // Fast-forward time. Expect |task_a_raw| and |task_c_raw| to be forwarded to
+ // the worker pool.
+ EXPECT_CALL(worker_pool,
+ PostTaskWithSequenceNowMock(task_a_raw, sequence.get(), nullptr));
EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_c_raw, sequence.get(), nullptr));
- manager.PostReadyTasks();
+ service_thread_task_runner->FastForwardBy(TimeDelta::FromHours(1));
testing::Mock::VerifyAndClear(&worker_pool);
- EXPECT_EQ(task_a_raw->delayed_run_time, manager.GetDelayedRunTime());
-
- // Fast-forward time to |task_a_raw|'s delayed run time.
- manager.SetCurrentTime(task_a_raw->delayed_run_time);
- // Ask the DelayedTaskManager to post tasks that are ripe for execution.
- // |task_a_raw| and |task_b_raw| should be posted and the delayed run time
- // should become a null TimeTicks.
- EXPECT_CALL(worker_pool,
- PostTaskWithSequenceNowMock(task_a_raw, sequence.get(), nullptr));
+ // Fast-forward time. Expect |task_b_raw| to be forwarded to the worker pool.
EXPECT_CALL(worker_pool,
PostTaskWithSequenceNowMock(task_b_raw, sequence.get(), nullptr));
- manager.PostReadyTasks();
+ service_thread_task_runner->FastForwardBy(TimeDelta::FromHours(1));
testing::Mock::VerifyAndClear(&worker_pool);
- EXPECT_EQ(TimeTicks(), manager.GetDelayedRunTime());
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/initialization_util.cc b/chromium/base/task_scheduler/initialization_util.cc
new file mode 100644
index 00000000000..7accd19c6f9
--- /dev/null
+++ b/chromium/base/task_scheduler/initialization_util.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/initialization_util.h"
+
+#include <algorithm>
+
+#include "base/sys_info.h"
+
+namespace base {
+
+int RecommendedMaxNumberOfThreadsInPool(int min,
+ int max,
+ double cores_multiplier,
+ int offset) {
+ const int num_of_cores = SysInfo::NumberOfProcessors();
+ const int threads = std::ceil<int>(num_of_cores * cores_multiplier) + offset;
+ return std::min(max, std::max(min, threads));
+}
+
+} // namespace base
diff --git a/chromium/base/task_scheduler/initialization_util.h b/chromium/base/task_scheduler/initialization_util.h
new file mode 100644
index 00000000000..c3bd9e7c4a3
--- /dev/null
+++ b/chromium/base/task_scheduler/initialization_util.h
@@ -0,0 +1,21 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
+#define BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
+
+#include "base/base_export.h"
+
+namespace base {
+
+// Computes a value that may be used as the maximum number of threads in a
+// TaskScheduler pool. Developers may use other methods to choose this maximum.
+BASE_EXPORT int RecommendedMaxNumberOfThreadsInPool(int min,
+ int max,
+ double cores_multiplier,
+ int offset);
+
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_INITIALIZATION_UTIL_H_
diff --git a/chromium/base/task_scheduler/post_task.cc b/chromium/base/task_scheduler/post_task.cc
index f415cd3800b..737a219c260 100644
--- a/chromium/base/task_scheduler/post_task.cc
+++ b/chromium/base/task_scheduler/post_task.cc
@@ -13,7 +13,7 @@ namespace {
class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
public:
- explicit PostTaskAndReplyTaskRunner(TaskTraits traits)
+ explicit PostTaskAndReplyTaskRunner(const TaskTraits& traits)
: traits_(traits) {}
private:
@@ -40,23 +40,32 @@ void PostTaskAndReply(const tracked_objects::Location& from_here,
}
void PostTaskWithTraits(const tracked_objects::Location& from_here,
- TaskTraits traits,
+ const TaskTraits& traits,
const Closure& task) {
TaskScheduler::GetInstance()->PostTaskWithTraits(from_here, traits, task);
}
void PostTaskWithTraitsAndReply(const tracked_objects::Location& from_here,
- TaskTraits traits,
+ const TaskTraits& traits,
const Closure& task,
const Closure& reply) {
PostTaskAndReplyTaskRunner(traits).PostTaskAndReply(from_here, task, reply);
}
-scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- TaskTraits traits,
- ExecutionMode execution_mode) {
- return TaskScheduler::GetInstance()->CreateTaskRunnerWithTraits(
- traits, execution_mode);
+scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(const TaskTraits& traits) {
+ return TaskScheduler::GetInstance()->CreateTaskRunnerWithTraits(traits);
+}
+
+scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return TaskScheduler::GetInstance()->CreateSequencedTaskRunnerWithTraits(
+ traits);
+}
+
+scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return TaskScheduler::GetInstance()->CreateSingleThreadTaskRunnerWithTraits(
+ traits);
}
} // namespace base
diff --git a/chromium/base/task_scheduler/post_task.h b/chromium/base/task_scheduler/post_task.h
index a7a2114efb7..346a2321476 100644
--- a/chromium/base/task_scheduler/post_task.h
+++ b/chromium/base/task_scheduler/post_task.h
@@ -9,6 +9,8 @@
#include "base/callback_forward.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
@@ -32,18 +34,17 @@ namespace base {
// Bind(...));
//
// To post tasks that must run in sequence:
-// scoped_refptr<TaskRunner> task_runner = CreateTaskRunnerWithTraits(
-// TaskTraits(), ExecutionMode::SEQUENCED);
+// scoped_refptr<SequencedTaskRunner> task_runner =
+// CreateSequencedTaskRunnerWithTraits(TaskTraits());
// task_runner.PostTask(FROM_HERE, Bind(...));
// task_runner.PostTask(FROM_HERE, Bind(...));
//
// To post file I/O tasks that must run in sequence and can be skipped on
// shutdown:
-// scoped_refptr<TaskRunner> task_runner =
-// CreateTaskRunnerWithTraits(
+// scoped_refptr<SequencedTaskRunner> task_runner =
+// CreateSequencedTaskRunnerWithTraits(
// TaskTraits().WithFileIO().WithShutdownBehavior(
-// TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
-// ExecutionMode::SEQUENCED);
+// TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
// task_runner.PostTask(FROM_HERE, Bind(...));
// task_runner.PostTask(FROM_HERE, Bind(...));
//
@@ -72,7 +73,7 @@ BASE_EXPORT void PostTaskAndReply(const tracked_objects::Location& from_here,
// Posts |task| with specific |traits| to the TaskScheduler.
BASE_EXPORT void PostTaskWithTraits(const tracked_objects::Location& from_here,
- TaskTraits traits,
+ const TaskTraits& traits,
const Closure& task);
// Posts |task| with specific |traits| to the TaskScheduler and posts |reply| on
@@ -81,15 +82,32 @@ BASE_EXPORT void PostTaskWithTraits(const tracked_objects::Location& from_here,
// SequencedTaskRunnerHandle::IsSet().
BASE_EXPORT void PostTaskWithTraitsAndReply(
const tracked_objects::Location& from_here,
- TaskTraits traits,
+ const TaskTraits& traits,
const Closure& task,
const Closure& reply);
-// Returns a TaskRunner whose PostTask invocations will result in scheduling
-// tasks using |traits| which will be executed according to |execution_mode|.
+// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+// using |traits|. Tasks may run in any order and in parallel.
BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- TaskTraits traits,
- ExecutionMode execution_mode);
+ const TaskTraits& traits);
+
+// Returns a SequencedTaskRunner whose PostTask invocations result in scheduling
+// tasks using |traits|. Tasks run one at a time in posting order.
+BASE_EXPORT scoped_refptr<SequencedTaskRunner>
+CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits);
+
+// Returns a SingleThreadTaskRunner whose PostTask invocations result in
+// scheduling tasks using |traits|. Tasks run on a single thread in posting
+// order.
+//
+// If all you need is to make sure that tasks don't run concurrently (e.g.
+// because they access a data structure which is not thread-safe), use
+// CreateSequencedTaskRunnerWithTraits(). Only use this if you rely on a thread-
+// affine API (it might be safer to assume thread-affinity when dealing with
+// under-documented third-party APIs, e.g. other OS') or share data across tasks
+// using thread-local storage.
+BASE_EXPORT scoped_refptr<SingleThreadTaskRunner>
+CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits);
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_service_thread.cc b/chromium/base/task_scheduler/scheduler_service_thread.cc
deleted file mode 100644
index 9f6936ba47a..00000000000
--- a/chromium/base/task_scheduler/scheduler_service_thread.cc
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scheduler_service_thread.h"
-
-#include <utility>
-
-#include "base/logging.h"
-#include "base/memory/ptr_util.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/task_scheduler/delayed_task_manager.h"
-#include "base/task_scheduler/scheduler_worker.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/threading/thread_checker.h"
-#include "base/time/time.h"
-
-namespace base {
-namespace internal {
-namespace {
-
-class ServiceThreadDelegate : public SchedulerWorker::Delegate {
- public:
- ServiceThreadDelegate(DelayedTaskManager* delayed_task_manager)
- : delayed_task_manager_(delayed_task_manager) {}
-
- // SchedulerWorker::Delegate:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override {
- DCHECK(detach_duration.is_max());
- }
-
- scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
- delayed_task_manager_->PostReadyTasks();
- return nullptr;
- }
-
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override {
- NOTREACHED()
- << "GetWork() never returns a sequence so no task should ever run.";
- }
-
- void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
- NOTREACHED() <<
- "GetWork() never returns a sequence so there's nothing to reenqueue.";
- }
-
- TimeDelta GetSleepTimeout() override {
- const TimeTicks next_time = delayed_task_manager_->GetDelayedRunTime();
- if (next_time.is_null())
- return TimeDelta::Max();
-
- // For delayed tasks with delays that are really close to each other, it is
- // possible for the current time to advance beyond the required
- // GetDelayedWaitTime. Return a minimum of TimeDelta() in the event that
- // happens.
- TimeDelta sleep_time = next_time - delayed_task_manager_->Now();
- const TimeDelta zero_delta;
- return sleep_time < zero_delta ? zero_delta : sleep_time;
- }
-
- bool CanDetach(SchedulerWorker* worker) override {
- return false;
- }
-
- private:
- DelayedTaskManager* const delayed_task_manager_;
-
- DISALLOW_COPY_AND_ASSIGN(ServiceThreadDelegate);
-};
-
-} // namespace
-
-SchedulerServiceThread::~SchedulerServiceThread() = default;
-
-// static
-std::unique_ptr<SchedulerServiceThread> SchedulerServiceThread::Create(
- TaskTracker* task_tracker, DelayedTaskManager* delayed_task_manager) {
- std::unique_ptr<SchedulerWorker> worker = SchedulerWorker::Create(
- ThreadPriority::NORMAL,
- MakeUnique<ServiceThreadDelegate>(delayed_task_manager), task_tracker,
- SchedulerWorker::InitialState::ALIVE);
- if (!worker)
- return nullptr;
-
- return WrapUnique(new SchedulerServiceThread(std::move(worker)));
-}
-
-void SchedulerServiceThread::WakeUp() {
- worker_->WakeUp();
-}
-
-void SchedulerServiceThread::JoinForTesting() {
- worker_->JoinForTesting();
-}
-
-SchedulerServiceThread::SchedulerServiceThread(
- std::unique_ptr<SchedulerWorker> worker) : worker_(std::move(worker)) {}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_service_thread.h b/chromium/base/task_scheduler/scheduler_service_thread.h
deleted file mode 100644
index e6c9fd0cb97..00000000000
--- a/chromium/base/task_scheduler/scheduler_service_thread.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
-#define BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/macros.h"
-
-namespace base {
-namespace internal {
-
-class DelayedTaskManager;
-class SchedulerWorker;
-class TaskTracker;
-
-// A thread dedicated to performing Task Scheduler related work.
-class BASE_EXPORT SchedulerServiceThread {
- public:
- ~SchedulerServiceThread();
-
- // Creates a SchedulerServiceThread. |task_tracker| and |delayed_task_manager|
- // are passed through to the underlying SchedulerWorker. Returns a nullptr on
- // failure.
- static std::unique_ptr<SchedulerServiceThread> Create(
- TaskTracker* task_tracker, DelayedTaskManager* delayed_task_manager);
-
- // Wakes the SchedulerServiceThread if it wasn't already awake. This also has
- // the impact of updating the amount of time the thread sleeps for delayed
- // tasks.
- void WakeUp();
-
- // Joins this SchedulerServiceThread. This can only be called once.
- void JoinForTesting();
-
- private:
- SchedulerServiceThread(std::unique_ptr<SchedulerWorker> worker);
-
- const std::unique_ptr<SchedulerWorker> worker_;
-
- DISALLOW_COPY_AND_ASSIGN(SchedulerServiceThread);
-};
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_TASK_SCHEDULER_SERVICE_THREAD_H_
diff --git a/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc b/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc
deleted file mode 100644
index 65690b00472..00000000000
--- a/chromium/base/task_scheduler/scheduler_service_thread_unittest.cc
+++ /dev/null
@@ -1,188 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/scheduler_service_thread.h"
-
-#include <memory>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/location.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/task_scheduler/delayed_task_manager.h"
-#include "base/task_scheduler/scheduler_worker_pool_impl.h"
-#include "base/task_scheduler/scheduler_worker_pool_params.h"
-#include "base/task_scheduler/sequence.h"
-#include "base/task_scheduler/task.h"
-#include "base/task_scheduler/task_tracker.h"
-#include "base/task_scheduler/task_traits.h"
-#include "base/time/time.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace internal {
-namespace {
-
-// The goal of the tests here is to verify the behavior of the Service Thread.
-// Some tests may be better part of DelayedTaskManager unit tests depending on
-// the nature of the test.
-//
-// Timed waits are inherent in the service thread because one of its main
-// purposes is to tell the delayed task manager when to post ready tasks.
-// This also makes writing tests tricky since the goal isn't to test if
-// WaitableEvent works but rather do the correct callbacks occur at the right
-// time.
-//
-// As a result, there are a few assumptions that are made in the test:
-// 1) Tests execute with balanced context switching. This means that there isn't
-// an adversary that context switches test main thread for an extended period
-// of time when the test main thread isn't waiting.
-// 2) Time proceeds normally. Since timed waits determine how long the service
-// thread will wait, and timed waits is currently not mockable, time needs to
-// proceed in a forward fashion. If time is frozen (e.g. TimeTicks::Now()
-// doesn't advance), some tests below may fail.
-// 3) Short waits sufficiently cover longer waits. Having tests run quickly is
-// desirable. Since the tests can't change the behavior of timed waiting, the
-// delay durations should be reasonably short on the order of hundreds of
-// milliseconds.
-class TaskSchedulerServiceThreadTest : public testing::Test {
- protected:
- TaskSchedulerServiceThreadTest() : delayed_task_manager_(Bind(&DoNothing)) {}
-
- void SetUp() override {
- scheduler_worker_pool_ = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("TestWorkerPoolForSchedulerServiceThread",
- ThreadPriority::BACKGROUND,
- SchedulerWorkerPoolParams::IORestriction::
- DISALLOWED,
- 1u,
- TimeDelta::Max()),
- Bind(&ReEnqueueSequenceCallback), &task_tracker_,
- &delayed_task_manager_);
- ASSERT_TRUE(scheduler_worker_pool_);
- service_thread_ = SchedulerServiceThread::Create(
- &task_tracker_, &delayed_task_manager_);
- ASSERT_TRUE(service_thread_);
- }
-
- void TearDown() override {
- scheduler_worker_pool_->JoinForTesting();
- service_thread_->JoinForTesting();
- }
-
- SchedulerServiceThread* service_thread() {
- return service_thread_.get();
- }
-
- DelayedTaskManager& delayed_task_manager() {
- return delayed_task_manager_;
- }
-
- SchedulerWorkerPoolImpl* worker_pool() {
- return scheduler_worker_pool_.get();
- }
-
- private:
- static void ReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence) {
- ADD_FAILURE() << "This test only expects one task per sequence.";
- }
-
- DelayedTaskManager delayed_task_manager_;
- TaskTracker task_tracker_;
- std::unique_ptr<SchedulerWorkerPoolImpl> scheduler_worker_pool_;
- std::unique_ptr<SchedulerServiceThread> service_thread_;
-
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerServiceThreadTest);
-};
-
-} // namespace
-
-// Tests that the service thread can handle a single delayed task.
-TEST_F(TaskSchedulerServiceThreadTest, RunSingleDelayedTask) {
- WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- delayed_task_manager().AddDelayedTask(
- WrapUnique(new Task(FROM_HERE,
- Bind(&WaitableEvent::Signal, Unretained(&event)),
- TaskTraits(), TimeDelta::FromMilliseconds(100))),
- make_scoped_refptr(new Sequence), nullptr, worker_pool());
- // Waking the service thread shouldn't cause the task to be executed per its
- // delay not having expired (racy in theory, see test-fixture meta-comment).
- service_thread()->WakeUp();
- // Yield to increase the likelihood of catching a bug where these tasks would
- // be released before their delay is passed.
- PlatformThread::YieldCurrentThread();
- EXPECT_FALSE(event.IsSignaled());
- // When the delay expires, the delayed task is posted, signaling |event|.
- event.Wait();
-}
-
-// Tests that the service thread can handle more than one delayed task with
-// different delays.
-TEST_F(TaskSchedulerServiceThreadTest, RunMultipleDelayedTasks) {
- const TimeTicks test_begin_time = TimeTicks::Now();
- const TimeDelta delay1 = TimeDelta::FromMilliseconds(100);
- const TimeDelta delay2 = TimeDelta::FromMilliseconds(200);
-
- WaitableEvent event1(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- delayed_task_manager().AddDelayedTask(
- WrapUnique(new Task(FROM_HERE,
- Bind(&WaitableEvent::Signal, Unretained(&event1)),
- TaskTraits(), delay1)),
- make_scoped_refptr(new Sequence), nullptr, worker_pool());
-
- WaitableEvent event2(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- delayed_task_manager().AddDelayedTask(
- WrapUnique(new Task(FROM_HERE,
- Bind(&WaitableEvent::Signal, Unretained(&event2)),
- TaskTraits(), delay2)),
- make_scoped_refptr(new Sequence), nullptr, worker_pool());
-
- // Adding the task shouldn't have caused them to be executed.
- EXPECT_FALSE(event1.IsSignaled());
- EXPECT_FALSE(event2.IsSignaled());
-
- // Waking the service thread shouldn't cause the tasks to be executed per
- // their delays not having expired (note: this is racy if the delay somehow
- // expires before this runs but 100ms is a long time in a unittest...). It
- // should instead cause the service thread to schedule itself for wakeup when
- // |delay1| expires.
- service_thread()->WakeUp();
- // Yield to increase the likelihood of catching a bug where these tasks would
- // be released before their delay is passed.
- PlatformThread::YieldCurrentThread();
- EXPECT_FALSE(event1.IsSignaled());
- EXPECT_FALSE(event2.IsSignaled());
-
- // Confirm the above assumption about the evolution of time in the test.
- EXPECT_LT(TimeTicks::Now() - test_begin_time, delay1);
-
- // Wait until |delay1| expires and service thread wakes up to schedule the
- // first task, signalling |event1|.
- event1.Wait();
-
- // Only the first task should have been released.
- EXPECT_TRUE(event1.IsSignaled());
- EXPECT_FALSE(event2.IsSignaled());
-
- // At least |delay1| should have passed for |event1| to fire.
- EXPECT_GE(TimeTicks::Now() - test_begin_time, delay1);
-
- // And assuming a sane test timeline |delay2| shouldn't have expired yet.
- EXPECT_LT(TimeTicks::Now() - test_begin_time, delay2);
-
- // Now wait for the second task to be fired.
- event2.Wait();
-
- // Which should only have fired after |delay2| was expired.
- EXPECT_GE(TimeTicks::Now() - test_begin_time, delay2);
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker.cc b/chromium/base/task_scheduler/scheduler_worker.cc
index 2bed780826e..5853bf6131e 100644
--- a/chromium/base/task_scheduler/scheduler_worker.cc
+++ b/chromium/base/task_scheduler/scheduler_worker.cc
@@ -14,6 +14,8 @@
#if defined(OS_MACOSX)
#include "base/mac/scoped_nsautorelease_pool.h"
+#elif defined(OS_WIN)
+#include "base/win/scoped_com_initializer.h"
#endif
namespace base {
@@ -36,16 +38,22 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
// Set if this thread was detached.
std::unique_ptr<Thread> detached_thread;
- outer_->delegate_->OnMainEntry(
- outer_, outer_->last_detach_time_.is_null()
- ? TimeDelta::Max()
- : TimeTicks::Now() - outer_->last_detach_time_);
+ outer_->delegate_->OnMainEntry(outer_);
// A SchedulerWorker starts out waiting for work.
WaitForWork();
+#if defined(OS_WIN)
+ // This is required as SequencedWorkerPool previously blindly CoInitialized
+ // all of its threads.
+ // TODO: Get rid of this broad COM scope and force tasks that care about a
+ // CoInitialized environment to request one (via an upcoming execution
+ // mode).
+ win::ScopedCOMInitializer com_initializer;
+#endif
+
while (!outer_->task_tracker_->IsShutdownComplete() &&
- !outer_->ShouldExitForTesting()) {
+ !outer_->should_exit_for_testing_.IsSet()) {
DCHECK(outer_);
#if defined(OS_MACOSX)
@@ -60,9 +68,9 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
if (outer_->delegate_->CanDetach(outer_)) {
detached_thread = outer_->Detach();
if (detached_thread) {
+ outer_ = nullptr;
DCHECK_EQ(detached_thread.get(), this);
PlatformThread::Detach(thread_handle_);
- outer_ = nullptr;
break;
}
}
@@ -70,12 +78,13 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
continue;
}
- const Task* task = sequence->PeekTask();
- const TimeTicks start_time = TimeTicks::Now();
- if (outer_->task_tracker_->RunTask(task, sequence->token()))
- outer_->delegate_->DidRunTask(task, start_time - task->sequenced_time);
+ std::unique_ptr<Task> task = sequence->TakeTask();
+ const TaskPriority task_priority = task->traits.priority();
+ const TimeDelta task_latency = TimeTicks::Now() - task->sequenced_time;
+ if (outer_->task_tracker_->RunTask(std::move(task), sequence->token()))
+ outer_->delegate_->DidRunTaskWithPriority(task_priority, task_latency);
- const bool sequence_became_empty = sequence->PopTask();
+ const bool sequence_became_empty = sequence->Pop();
// If |sequence| isn't empty immediately after the pop, re-enqueue it to
// maintain the invariant that a non-empty Sequence is always referenced
@@ -216,10 +225,9 @@ void SchedulerWorker::WakeUp() {
}
void SchedulerWorker::JoinForTesting() {
- {
- AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
- should_exit_for_testing_ = true;
- }
+ DCHECK(!should_exit_for_testing_.IsSet());
+ should_exit_for_testing_.Set();
+
WakeUp();
// Normally holding a lock and joining is dangerous. However, since this is
@@ -249,14 +257,18 @@ SchedulerWorker::SchedulerWorker(ThreadPriority priority_hint,
}
std::unique_ptr<SchedulerWorker::Thread> SchedulerWorker::Detach() {
- DCHECK(!ShouldExitForTesting()) << "Worker was already joined";
+ DCHECK(!should_exit_for_testing_.IsSet()) << "Worker was already joined";
AutoSchedulerLock auto_lock(thread_lock_);
// If a wakeup is pending, then a WakeUp() came in while we were deciding to
// detach. This means we can't go away anymore since we would break the
// guarantee that we call GetWork() after a successful wakeup.
if (thread_->IsWakeUpPending())
return nullptr;
- last_detach_time_ = TimeTicks::Now();
+
+ // Call OnDetach() within the scope of |thread_lock_| to prevent the delegate
+ // from being used concurrently from an old and a new thread.
+ delegate_->OnDetach();
+
return std::move(thread_);
}
@@ -269,10 +281,5 @@ void SchedulerWorker::CreateThreadAssertSynchronized() {
CreateThread();
}
-bool SchedulerWorker::ShouldExitForTesting() const {
- AutoSchedulerLock auto_lock(should_exit_for_testing_lock_);
- return should_exit_for_testing_;
-}
-
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker.h b/chromium/base/task_scheduler/scheduler_worker.h
index eee2f4a56c7..a9b891ad772 100644
--- a/chromium/base/task_scheduler/scheduler_worker.h
+++ b/chromium/base/task_scheduler/scheduler_worker.h
@@ -10,6 +10,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/sequence.h"
@@ -37,7 +38,7 @@ class TaskTracker;
class BASE_EXPORT SchedulerWorker {
public:
// Delegate interface for SchedulerWorker. The methods are always called from
- // a thread managed by the SchedulerWorker instance.
+ // the thread managed by the SchedulerWorker instance.
class Delegate {
public:
virtual ~Delegate() = default;
@@ -46,17 +47,17 @@ class BASE_EXPORT SchedulerWorker {
// If a thread is recreated after detachment, |detach_duration| is the time
// elapsed since detachment. Otherwise, if this is the first thread created
// for |worker|, |detach_duration| is TimeDelta::Max().
- virtual void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) = 0;
+ virtual void OnMainEntry(SchedulerWorker* worker) = 0;
// Called by a thread managed by |worker| to get a Sequence from which to
// run a Task.
virtual scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) = 0;
- // Called by the SchedulerWorker after it ran |task|. |task_latency| is the
- // time elapsed between when the task was posted and when it started to run.
- virtual void DidRunTask(const Task* task,
- const TimeDelta& task_latency) = 0;
+ // Called by the SchedulerWorker after it ran a task with |task_priority|.
+ // |task_latency| is the time elapsed between when the task was posted and
+ // when it started to run.
+ virtual void DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) = 0;
// Called when |sequence| isn't empty after the SchedulerWorker pops a Task
// from it. |sequence| is the last Sequence returned by GetWork().
@@ -81,6 +82,11 @@ class BASE_EXPORT SchedulerWorker {
// This MUST return false if SchedulerWorker::JoinForTesting() is in
// progress.
virtual bool CanDetach(SchedulerWorker* worker) = 0;
+
+ // Called by a thread before it detaches. This method is not allowed to
+ // acquire a SchedulerLock because it is called within the scope of another
+ // SchedulerLock.
+ virtual void OnDetach() = 0;
};
enum class InitialState { ALIVE, DETACHED };
@@ -135,27 +141,18 @@ class BASE_EXPORT SchedulerWorker {
void CreateThreadAssertSynchronized();
- bool ShouldExitForTesting() const;
-
// Synchronizes access to |thread_|.
mutable SchedulerLock thread_lock_;
// The underlying thread for this SchedulerWorker.
std::unique_ptr<Thread> thread_;
- // Time of the last successful Detach(). Is only accessed from the thread
- // managed by this SchedulerWorker.
- TimeTicks last_detach_time_;
-
const ThreadPriority priority_hint_;
const std::unique_ptr<Delegate> delegate_;
TaskTracker* const task_tracker_;
- // Synchronizes access to |should_exit_for_testing_|.
- mutable SchedulerLock should_exit_for_testing_lock_;
-
- // True once JoinForTesting() has been called.
- bool should_exit_for_testing_ = false;
+ // Set once JoinForTesting() has been called.
+ AtomicFlag should_exit_for_testing_;
DISALLOW_COPY_AND_ASSIGN(SchedulerWorker);
};
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.h b/chromium/base/task_scheduler/scheduler_worker_pool.h
index 43dce606c62..c742ac3c454 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.h
@@ -9,6 +9,8 @@
#include "base/base_export.h"
#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
@@ -25,11 +27,23 @@ class BASE_EXPORT SchedulerWorkerPool {
public:
virtual ~SchedulerWorkerPool() = default;
- // Returns a TaskRunner whose PostTask invocations will result in scheduling
- // Tasks with |traits| and |execution_mode| in this SchedulerWorkerPool.
+ // Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+ // in this SchedulerWorkerPool using |traits|. Tasks may run in any order and
+ // in parallel.
virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) = 0;
+ const TaskTraits& traits) = 0;
+
+ // Returns a SequencedTaskRunner whose PostTask invocations result in
+ // scheduling tasks in this SchedulerWorkerPool using |traits|. Tasks run one
+ // at a time in posting order.
+ virtual scoped_refptr<SequencedTaskRunner>
+ CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+
+ // Returns a SingleThreadTaskRunner whose PostTask invocations result in
+ // scheduling tasks in this SchedulerWorkerPool using |traits|. Tasks run on a
+ // single thread in posting order.
+ virtual scoped_refptr<SingleThreadTaskRunner>
+ CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits) = 0;
// Inserts |sequence| with |sequence_sort_key| into a queue of Sequences that
// can be processed by any worker owned by this SchedulerWorkerPool. Must only
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index ed8e84a336d..f2d69344f4c 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -19,6 +19,7 @@
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/stringprintf.h"
+#include "base/task_runner.h"
#include "base/task_scheduler/delayed_task_manager.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/threading/platform_thread.h"
@@ -34,14 +35,12 @@ namespace {
constexpr char kPoolNameSuffix[] = "Pool";
constexpr char kDetachDurationHistogramPrefix[] =
"TaskScheduler.DetachDuration.";
+constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
+ "TaskScheduler.NumTasksBeforeDetach.";
constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
"TaskScheduler.NumTasksBetweenWaits.";
constexpr char kTaskLatencyHistogramPrefix[] = "TaskScheduler.TaskLatency.";
-// SchedulerWorker that owns the current thread, if any.
-LazyInstance<ThreadLocalPointer<const SchedulerWorker>>::Leaky
- tls_current_worker = LAZY_INSTANCE_INITIALIZER;
-
// SchedulerWorkerPool that owns the current thread, if any.
LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky
tls_current_worker_pool = LAZY_INSTANCE_INITIALIZER;
@@ -198,7 +197,10 @@ class SchedulerWorkerPoolImpl::SchedulerSingleThreadTaskRunner :
}
bool RunsTasksOnCurrentThread() const override {
- return tls_current_worker.Get().Get() == worker_;
+ // Even though this is a SingleThreadTaskRunner, test the actual sequence
+ // instead of the assigned worker so that another task randomly assigned
+ // to the same worker doesn't return true by happenstance.
+ return sequence_->token() == SequenceToken::GetForCurrentThread();
}
private:
@@ -235,13 +237,14 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
}
// SchedulerWorker::Delegate:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override;
+ void OnMainEntry(SchedulerWorker* worker) override;
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override;
+ void DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) override;
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override;
TimeDelta GetSleepTimeout() override;
bool CanDetach(SchedulerWorker* worker) override;
+ void OnDetach() override;
void RegisterSingleThreadTaskRunner() {
// No barrier as barriers only affect sequential consistency which is
@@ -265,6 +268,9 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// |single_threaded_priority_queue_|.
bool last_sequence_is_single_threaded_ = false;
+ // Time of the last detach.
+ TimeTicks last_detach_time_;
+
// Time when GetWork() first returned nullptr.
TimeTicks idle_start_time_;
@@ -279,6 +285,10 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// TaskScheduler.NumTasksBetweenWaits histogram was recorded.
size_t num_tasks_since_last_wait_ = 0;
+ // Number of tasks executed since the last time the
+ // TaskScheduler.NumTasksBeforeDetach histogram was recorded.
+ size_t num_tasks_since_last_detach_ = 0;
+
subtle::Atomic32 num_single_threaded_runners_ = 0;
const int index_;
@@ -303,61 +313,38 @@ std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create(
params.io_restriction(),
params.suggested_reclaim_time(),
task_tracker, delayed_task_manager));
- if (worker_pool->Initialize(params.priority_hint(), params.max_threads(),
- re_enqueue_sequence_callback)) {
+ if (worker_pool->Initialize(
+ params.priority_hint(), params.standby_thread_policy(),
+ params.max_threads(), re_enqueue_sequence_callback)) {
return worker_pool;
}
return nullptr;
}
-void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
- AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
- while (idle_workers_stack_.Size() < workers_.size())
- idle_workers_stack_cv_for_testing_->Wait();
-}
-
-void SchedulerWorkerPoolImpl::JoinForTesting() {
- DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max()) <<
- "Workers can detach during join.";
- for (const auto& worker : workers_)
- worker->JoinForTesting();
-
- DCHECK(!join_for_testing_returned_.IsSignaled());
- join_for_testing_returned_.Signal();
+scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this));
}
-void SchedulerWorkerPoolImpl::DisallowWorkerDetachmentForTesting() {
- worker_detachment_disallowed_.Set();
+scoped_refptr<SequencedTaskRunner>
+SchedulerWorkerPoolImpl::CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return make_scoped_refptr(new SchedulerSequencedTaskRunner(traits, this));
}
-scoped_refptr<TaskRunner> SchedulerWorkerPoolImpl::CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) {
- switch (execution_mode) {
- case ExecutionMode::PARALLEL:
- return make_scoped_refptr(new SchedulerParallelTaskRunner(traits, this));
-
- case ExecutionMode::SEQUENCED:
- return make_scoped_refptr(new SchedulerSequencedTaskRunner(traits, this));
-
- case ExecutionMode::SINGLE_THREADED: {
- // TODO(fdoray): Find a way to take load into account when assigning a
- // SchedulerWorker to a SingleThreadTaskRunner. Also, this code
- // assumes that all SchedulerWorkers are alive. Eventually, we might
- // decide to tear down threads that haven't run tasks for a long time.
- size_t worker_index;
- {
- AutoSchedulerLock auto_lock(next_worker_index_lock_);
- worker_index = next_worker_index_;
- next_worker_index_ = (next_worker_index_ + 1) % workers_.size();
- }
- return make_scoped_refptr(new SchedulerSingleThreadTaskRunner(
- traits, this, workers_[worker_index].get()));
- }
+scoped_refptr<SingleThreadTaskRunner>
+SchedulerWorkerPoolImpl::CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ // TODO(fdoray): Find a way to take load into account when assigning a
+ // SchedulerWorker to a SingleThreadTaskRunner.
+ size_t worker_index;
+ {
+ AutoSchedulerLock auto_lock(next_worker_index_lock_);
+ worker_index = next_worker_index_;
+ next_worker_index_ = (next_worker_index_ + 1) % workers_.size();
}
-
- NOTREACHED();
- return nullptr;
+ return make_scoped_refptr(new SchedulerSingleThreadTaskRunner(
+ traits, this, workers_[worker_index].get()));
}
void SchedulerWorkerPoolImpl::ReEnqueueSequence(
@@ -409,7 +396,7 @@ void SchedulerWorkerPoolImpl::PostTaskWithSequenceNow(
// Confirm that |task| is ready to run (its delayed run time is either null or
// in the past).
- DCHECK_LE(task->delayed_run_time, delayed_task_manager_->Now());
+ DCHECK_LE(task->delayed_run_time, TimeTicks::Now());
// Because |worker| belongs to this worker pool, we know that the type
// of its delegate is SchedulerWorkerDelegateImpl.
@@ -434,12 +421,47 @@ void SchedulerWorkerPoolImpl::PostTaskWithSequenceNow(
// Wake up a worker to process |sequence|.
if (worker)
- worker->WakeUp();
+ WakeUpWorker(worker);
else
WakeUpOneWorker();
}
}
+void SchedulerWorkerPoolImpl::GetHistograms(
+ std::vector<const HistogramBase*>* histograms) const {
+ histograms->push_back(detach_duration_histogram_);
+ histograms->push_back(num_tasks_between_waits_histogram_);
+}
+
+void SchedulerWorkerPoolImpl::WaitForAllWorkersIdleForTesting() {
+ AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
+ while (idle_workers_stack_.Size() < workers_.size())
+ idle_workers_stack_cv_for_testing_->Wait();
+}
+
+void SchedulerWorkerPoolImpl::JoinForTesting() {
+ DCHECK(!CanWorkerDetachForTesting() || suggested_reclaim_time_.is_max())
+ << "Workers can detach during join.";
+ for (const auto& worker : workers_)
+ worker->JoinForTesting();
+
+ DCHECK(!join_for_testing_returned_.IsSignaled());
+ join_for_testing_returned_.Signal();
+}
+
+void SchedulerWorkerPoolImpl::DisallowWorkerDetachmentForTesting() {
+ worker_detachment_disallowed_.Set();
+}
+
+size_t SchedulerWorkerPoolImpl::NumberOfAliveWorkersForTesting() {
+ size_t num_alive_workers = 0;
+ for (const auto& worker : workers_) {
+ if (worker->ThreadAliveForTesting())
+ ++num_alive_workers;
+ }
+ return num_alive_workers;
+}
+
SchedulerWorkerPoolImpl::SchedulerSingleThreadTaskRunner::
SchedulerSingleThreadTaskRunner(const TaskTraits& traits,
SchedulerWorkerPool* worker_pool,
@@ -474,8 +496,7 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
~SchedulerWorkerDelegateImpl() = default;
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
- SchedulerWorker* worker,
- const TimeDelta& detach_duration) {
+ SchedulerWorker* worker) {
#if DCHECK_IS_ON()
// Wait for |outer_->workers_created_| to avoid traversing
// |outer_->workers_| while it is being filled by Initialize().
@@ -485,17 +506,15 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
DCHECK_EQ(num_tasks_since_last_wait_, 0U);
- if (!detach_duration.is_max()) {
- outer_->detach_duration_histogram_->AddTime(detach_duration);
- did_detach_since_last_get_work_ = true;
+ if (!last_detach_time_.is_null()) {
+ outer_->detach_duration_histogram_->AddTime(TimeTicks::Now() -
+ last_detach_time_);
}
PlatformThread::SetName(
StringPrintf("TaskScheduler%sWorker%d", outer_->name_.c_str(), index_));
- DCHECK(!tls_current_worker.Get().Get());
DCHECK(!tls_current_worker_pool.Get().Get());
- tls_current_worker.Get().Set(worker);
tls_current_worker_pool.Get().Set(outer_);
// New threads haven't run GetWork() yet, so reset the |idle_start_time_|.
@@ -514,16 +533,15 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
// Record the TaskScheduler.NumTasksBetweenWaits histogram if the
// SchedulerWorker waited on its WaitableEvent since the last GetWork().
//
- // Note: When GetWork() returns nullptr for the first time after returning a
- // Sequence, SchedulerWorker waits on its WaitableEvent. When the wait stops
- // (either because WakeUp() was called or because the sleep timeout expired),
- // GetWork() is called and the histogram is recorded. If GetWork() returns
- // nullptr again, the SchedulerWorker may detach.
- // |did_detach_since_last_get_work_| is set to true from OnMainEntry() if the
- // SchedulerWorker detaches and wakes up again. The next call to GetWork()
- // won't record the histogram (which is correct since the SchedulerWorker
- // didn't wait on its WaitableEvent since the last time the histogram was
- // recorded).
+ // Note: When GetWork() starts returning nullptr, the SchedulerWorker waits on
+ // its WaitableEvent. When it wakes up (either because WakeUp() was called or
+ // because the sleep timeout expired), it calls GetWork() again. The code
+ // below records the histogram and, if GetWork() returns nullptr again, the
+ // SchedulerWorker may detach. If that happens,
+ // |did_detach_since_last_get_work_| is set to true and the next call to
+ // GetWork() won't record the histogram (which is correct since the
+ // SchedulerWorker didn't wait on its WaitableEvent since the last time the
+ // histogram was recorded).
if (last_get_work_returned_nullptr_ && !did_detach_since_last_get_work_) {
outer_->num_tasks_between_waits_histogram_->Add(num_tasks_since_last_wait_);
num_tasks_since_last_wait_ = 0;
@@ -588,12 +606,13 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
return sequence;
}
-void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask(
- const Task* task,
- const TimeDelta& task_latency) {
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
+ DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) {
++num_tasks_since_last_wait_;
+ ++num_tasks_since_last_detach_;
- const int priority_index = static_cast<int>(task->traits.priority());
+ const int priority_index = static_cast<int>(task_priority);
// As explained in the header file, histograms are allocated on demand. It
// doesn't matter if an element of |task_latency_histograms_| is set multiple
@@ -603,7 +622,7 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask(
subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index]));
if (!task_latency_histogram) {
task_latency_histogram =
- GetTaskLatencyHistogram(outer_->name_, task->traits.priority());
+ GetTaskLatencyHistogram(outer_->name_, task_priority);
subtle::Release_Store(
&outer_->task_latency_histograms_[priority_index],
reinterpret_cast<subtle::AtomicWord>(task_latency_histogram));
@@ -647,6 +666,14 @@ bool SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::CanDetach(
return can_detach;
}
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() {
+ DCHECK(!did_detach_since_last_get_work_);
+ outer_->num_tasks_before_detach_histogram_->Add(num_tasks_since_last_detach_);
+ num_tasks_since_last_detach_ = 0;
+ did_detach_since_last_get_work_ = true;
+ last_detach_time_ = TimeTicks::Now();
+}
+
SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
StringPiece name,
SchedulerWorkerPoolParams::IORestriction io_restriction,
@@ -672,6 +699,15 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
TimeDelta::FromHours(1),
50,
HistogramBase::kUmaTargetedHistogramFlag)),
+ // Mimics the UMA_HISTOGRAM_COUNTS_1000 macro. When a worker runs more
+ // than 1000 tasks before detaching, there is no need to know the exact
+ // number of tasks that ran.
+ num_tasks_before_detach_histogram_(Histogram::FactoryGet(
+ kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
+ 1,
+ 1000,
+ 50,
+ HistogramBase::kUmaTargetedHistogramFlag)),
// Mimics the UMA_HISTOGRAM_COUNTS_100 macro. A SchedulerWorker is
// expected to run between zero and a few tens of tasks between waits.
// When it runs more than 100 tasks, there is no need to know the exact
@@ -690,23 +726,34 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
bool SchedulerWorkerPoolImpl::Initialize(
ThreadPriority priority_hint,
+ SchedulerWorkerPoolParams::StandbyThreadPolicy standby_thread_policy,
size_t max_threads,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) {
AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
DCHECK(workers_.empty());
-
- for (size_t i = 0; i < max_threads; ++i) {
+ workers_.resize(max_threads);
+
+ // Create workers and push them to the idle stack in reverse order of index.
+ // This ensures that they are woken up in order of index and that the ALIVE
+ // worker is on top of the stack.
+ for (int index = max_threads - 1; index >= 0; --index) {
+ const bool is_standby_lazy =
+ standby_thread_policy ==
+ SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY;
+ const SchedulerWorker::InitialState initial_state =
+ (index == 0 && !is_standby_lazy)
+ ? SchedulerWorker::InitialState::ALIVE
+ : SchedulerWorker::InitialState::DETACHED;
std::unique_ptr<SchedulerWorker> worker = SchedulerWorker::Create(
- priority_hint, MakeUnique<SchedulerWorkerDelegateImpl>(
- this, re_enqueue_sequence_callback,
- &shared_priority_queue_, static_cast<int>(i)),
- task_tracker_, i == 0 ? SchedulerWorker::InitialState::ALIVE
- : SchedulerWorker::InitialState::DETACHED);
+ priority_hint,
+ MakeUnique<SchedulerWorkerDelegateImpl>(
+ this, re_enqueue_sequence_callback, &shared_priority_queue_, index),
+ task_tracker_, initial_state);
if (!worker)
break;
idle_workers_stack_.Push(worker.get());
- workers_.push_back(std::move(worker));
+ workers_[index] = std::move(worker);
}
#if DCHECK_IS_ON()
@@ -716,6 +763,14 @@ bool SchedulerWorkerPoolImpl::Initialize(
return !workers_.empty();
}
+void SchedulerWorkerPoolImpl::WakeUpWorker(SchedulerWorker* worker) {
+ DCHECK(worker);
+ RemoveFromIdleWorkersStack(worker);
+ worker->WakeUp();
+ // TOOD(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
+ // hysteresis to the CanDetach check. See https://crbug.com/666041.
+}
+
void SchedulerWorkerPoolImpl::WakeUpOneWorker() {
SchedulerWorker* worker;
{
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index a621f797a53..8014030b541 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -20,7 +20,6 @@
#include "base/strings/string_piece.h"
#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/condition_variable.h"
-#include "base/task_runner.h"
#include "base/task_scheduler/priority_queue.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/scheduler_worker.h"
@@ -66,6 +65,32 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager);
+ // SchedulerWorkerPool:
+ scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
+ const SequenceSortKey& sequence_sort_key) override;
+ bool PostTaskWithSequence(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) override;
+ void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
+ scoped_refptr<Sequence> sequence,
+ SchedulerWorker* worker) override;
+
+ const HistogramBase* num_tasks_before_detach_histogram() const {
+ return num_tasks_before_detach_histogram_;
+ }
+
+ const HistogramBase* num_tasks_between_waits_histogram() const {
+ return num_tasks_between_waits_histogram_;
+ }
+
+ void GetHistograms(std::vector<const HistogramBase*>* histograms) const;
+
// Waits until all workers are idle.
void WaitForAllWorkersIdleForTesting();
@@ -81,22 +106,9 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// reclaimed).
void DisallowWorkerDetachmentForTesting();
- // SchedulerWorkerPool:
- scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) override;
- void ReEnqueueSequence(scoped_refptr<Sequence> sequence,
- const SequenceSortKey& sequence_sort_key) override;
- bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) override;
- void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence,
- SchedulerWorker* worker) override;
-
- const HistogramBase* num_tasks_between_waits_histogram_for_testing() const {
- return num_tasks_between_waits_histogram_;
- }
+ // Returns the number of workers alive in this worker pool. The value may
+ // change if workers are woken up or detached during this call.
+ size_t NumberOfAliveWorkersForTesting();
private:
class SchedulerSingleThreadTaskRunner;
@@ -111,9 +123,13 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
bool Initialize(
ThreadPriority priority_hint,
+ SchedulerWorkerPoolParams::StandbyThreadPolicy standby_thread_policy,
size_t max_threads,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback);
+ // Wakes up |worker|.
+ void WakeUpWorker(SchedulerWorker* worker);
+
// Wakes up the last worker from this worker pool to go idle, if any.
void WakeUpOneWorker();
@@ -159,7 +175,11 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// details in GetWork()).
mutable SchedulerLock idle_workers_stack_lock_;
- // Stack of idle workers.
+ // Stack of idle workers. Initially, all workers are on this stack. A worker
+ // is removed from the stack before its WakeUp() function is called and when
+ // it receives work from GetWork() (a worker calls GetWork() when its sleep
+ // timeout expires, even if its WakeUp() method hasn't been called). A worker
+ // is pushed on this stack when it receives nullptr from GetWork().
SchedulerWorkerStack idle_workers_stack_;
// Signaled when all workers become idle.
@@ -181,6 +201,10 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// leaked.
HistogramBase* const detach_duration_histogram_;
+ // TaskScheduler.NumTasksBeforeDetach.[worker pool name] histogram.
+ // Intentionally leaked.
+ HistogramBase* const num_tasks_before_detach_histogram_;
+
// TaskScheduler.NumTasksBetweenWaits.[worker pool name] histogram.
// Intentionally leaked.
HistogramBase* const num_tasks_between_waits_histogram_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index 0f8475a6905..9983cad5c16 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -30,9 +30,13 @@
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/test_task_factory.h"
+#include "base/task_scheduler/test_utils.h"
#include "base/test/gtest_util.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
+#include "base/threading/thread.h"
#include "base/threading/thread_checker_impl.h"
#include "base/threading/thread_local_storage.h"
#include "base/threading/thread_restrictions.h"
@@ -55,52 +59,46 @@ constexpr TimeDelta kExtraTimeToWaitForDetach =
TimeDelta::FromSeconds(1);
using IORestriction = SchedulerWorkerPoolParams::IORestriction;
-
-class TestDelayedTaskManager : public DelayedTaskManager {
- public:
- TestDelayedTaskManager() : DelayedTaskManager(Bind(&DoNothing)) {}
-
- void SetCurrentTime(TimeTicks now) { now_ = now; }
-
- // DelayedTaskManager:
- TimeTicks Now() const override { return now_; }
-
- private:
- TimeTicks now_ = TimeTicks::Now();
-
- DISALLOW_COPY_AND_ASSIGN(TestDelayedTaskManager);
-};
+using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
class TaskSchedulerWorkerPoolImplTest
- : public testing::TestWithParam<ExecutionMode> {
+ : public testing::TestWithParam<test::ExecutionMode> {
protected:
- TaskSchedulerWorkerPoolImplTest() = default;
+ TaskSchedulerWorkerPoolImplTest()
+ : service_thread_("TaskSchedulerServiceThread") {}
void SetUp() override {
InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
}
void TearDown() override {
+ service_thread_.Stop();
worker_pool_->WaitForAllWorkersIdleForTesting();
worker_pool_->JoinForTesting();
}
void InitializeWorkerPool(const TimeDelta& suggested_reclaim_time,
size_t num_workers) {
+ ASSERT_FALSE(worker_pool_);
+ ASSERT_FALSE(delayed_task_manager_);
+ service_thread_.Start();
+ delayed_task_manager_ =
+ base::MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
worker_pool_ = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("TestWorkerPool", ThreadPriority::NORMAL,
- IORestriction::ALLOWED, num_workers,
- suggested_reclaim_time),
+ SchedulerWorkerPoolParams(
+ "TestWorkerPool", ThreadPriority::NORMAL, IORestriction::ALLOWED,
+ StandbyThreadPolicy::LAZY, num_workers, suggested_reclaim_time),
Bind(&TaskSchedulerWorkerPoolImplTest::ReEnqueueSequenceCallback,
Unretained(this)),
- &task_tracker_, &delayed_task_manager_);
+ &task_tracker_, delayed_task_manager_.get());
ASSERT_TRUE(worker_pool_);
}
std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
TaskTracker task_tracker_;
- TestDelayedTaskManager delayed_task_manager_;
+ Thread service_thread_;
+ std::unique_ptr<DelayedTaskManager> delayed_task_manager_;
private:
void ReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence) {
@@ -114,6 +112,21 @@ class TaskSchedulerWorkerPoolImplTest
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplTest);
};
+scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
+ SchedulerWorkerPoolImpl* worker_pool,
+ test::ExecutionMode execution_mode) {
+ switch (execution_mode) {
+ case test::ExecutionMode::PARALLEL:
+ return worker_pool->CreateTaskRunnerWithTraits(TaskTraits());
+ case test::ExecutionMode::SEQUENCED:
+ return worker_pool->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ case test::ExecutionMode::SINGLE_THREADED:
+ return worker_pool->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ }
+ ADD_FAILURE() << "Unknown ExecutionMode";
+ return nullptr;
+}
+
using PostNestedTask = test::TestTaskFactory::PostNestedTask;
class ThreadPostingTasks : public SimpleThread {
@@ -129,15 +142,14 @@ class ThreadPostingTasks : public SimpleThread {
// |worker_pool| are idle before posting a new task. If |post_nested_task| is
// YES, each task posted by this thread posts another task when it runs.
ThreadPostingTasks(SchedulerWorkerPoolImpl* worker_pool,
- ExecutionMode execution_mode,
+ test::ExecutionMode execution_mode,
WaitBeforePostTask wait_before_post_task,
PostNestedTask post_nested_task)
: SimpleThread("ThreadPostingTasks"),
worker_pool_(worker_pool),
wait_before_post_task_(wait_before_post_task),
post_nested_task_(post_nested_task),
- factory_(worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(),
- execution_mode),
+ factory_(CreateTaskRunnerWithExecutionMode(worker_pool, execution_mode),
execution_mode) {
DCHECK(worker_pool_);
}
@@ -249,7 +261,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, PostTasksWithOneAvailableWorker) {
std::vector<std::unique_ptr<test::TestTaskFactory>> blocked_task_factories;
for (size_t i = 0; i < (kNumWorkersInWorkerPool - 1); ++i) {
blocked_task_factories.push_back(MakeUnique<test::TestTaskFactory>(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
GetParam()));
EXPECT_TRUE(blocked_task_factories.back()->PostTask(
PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
@@ -259,7 +271,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, PostTasksWithOneAvailableWorker) {
// Post |kNumTasksPostedPerThread| tasks that should all run despite the fact
// that only one worker in |worker_pool_| isn't busy.
test::TestTaskFactory short_task_factory(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
GetParam());
for (size_t i = 0; i < kNumTasksPostedPerThread; ++i)
EXPECT_TRUE(short_task_factory.PostTask(PostNestedTask::NO, Closure()));
@@ -283,7 +295,7 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, Saturate) {
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
factories.push_back(MakeUnique<test::TestTaskFactory>(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()),
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()),
GetParam()));
EXPECT_TRUE(factories.back()->PostTask(
PostNestedTask::NO, Bind(&WaitableEvent::Wait, Unretained(&event))));
@@ -301,47 +313,44 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, Saturate) {
// Verify that a Task can't be posted after shutdown.
TEST_P(TaskSchedulerWorkerPoolImplTest, PostTaskAfterShutdown) {
auto task_runner =
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam());
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam());
task_tracker_.Shutdown();
EXPECT_FALSE(task_runner->PostTask(FROM_HERE, Bind(&ShouldNotRunCallback)));
}
-// Verify that a Task posted with a delay is added to the DelayedTaskManager and
-// doesn't run before its delay expires.
+// Verify that a Task runs shortly after its delay expires.
TEST_P(TaskSchedulerWorkerPoolImplTest, PostDelayedTask) {
- EXPECT_TRUE(delayed_task_manager_.GetDelayedRunTime().is_null());
+ TimeTicks start_time = TimeTicks::Now();
- // Post a delayed task.
+ // Post a task with a short delay.
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- EXPECT_TRUE(worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam())
+ EXPECT_TRUE(CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam())
->PostDelayedTask(FROM_HERE, Bind(&WaitableEvent::Signal,
Unretained(&task_ran)),
- TimeDelta::FromSeconds(10)));
-
- // The task should have been added to the DelayedTaskManager.
- EXPECT_FALSE(delayed_task_manager_.GetDelayedRunTime().is_null());
+ TestTimeouts::tiny_timeout()));
- // The task shouldn't run.
- EXPECT_FALSE(task_ran.IsSignaled());
-
- // Fast-forward time and post tasks that are ripe for execution.
- delayed_task_manager_.SetCurrentTime(
- delayed_task_manager_.GetDelayedRunTime());
- delayed_task_manager_.PostReadyTasks();
-
- // The task should run.
+ // Wait until the task runs.
task_ran.Wait();
+
+ // Expect the task to run after its delay expires, but not more than 250 ms
+ // after that.
+ const TimeDelta actual_delay = TimeTicks::Now() - start_time;
+ EXPECT_GE(actual_delay, TestTimeouts::tiny_timeout());
+ EXPECT_LT(actual_delay,
+ TimeDelta::FromMilliseconds(250) + TestTimeouts::tiny_timeout());
}
// Verify that the RunsTasksOnCurrentThread() method of a SEQUENCED TaskRunner
-// returns false when called from a task that isn't part of the sequence.
+// returns false when called from a task that isn't part of the sequence. Note:
+// Tests that use TestTaskFactory already verify that RunsTasksOnCurrentThread()
+// returns true when appropriate so this method complements it to get full
+// coverage of that method.
TEST_P(TaskSchedulerWorkerPoolImplTest, SequencedRunsTasksOnCurrentThread) {
scoped_refptr<TaskRunner> task_runner(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(), GetParam()));
- scoped_refptr<TaskRunner> sequenced_task_runner(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits(),
- ExecutionMode::SEQUENCED));
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()));
+ scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
+ worker_pool_->CreateSequencedTaskRunnerWithTraits(TaskTraits()));
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -351,8 +360,6 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, SequencedRunsTasksOnCurrentThread) {
[](scoped_refptr<TaskRunner> sequenced_task_runner,
WaitableEvent* task_ran) {
EXPECT_FALSE(sequenced_task_runner->RunsTasksOnCurrentThread());
- // Tests that use TestTaskFactory already verify that
- // RunsTasksOnCurrentThread() returns true when appropriate.
task_ran->Signal();
},
sequenced_task_runner, Unretained(&task_ran)));
@@ -361,13 +368,73 @@ TEST_P(TaskSchedulerWorkerPoolImplTest, SequencedRunsTasksOnCurrentThread) {
INSTANTIATE_TEST_CASE_P(Parallel,
TaskSchedulerWorkerPoolImplTest,
- ::testing::Values(ExecutionMode::PARALLEL));
+ ::testing::Values(test::ExecutionMode::PARALLEL));
INSTANTIATE_TEST_CASE_P(Sequenced,
TaskSchedulerWorkerPoolImplTest,
- ::testing::Values(ExecutionMode::SEQUENCED));
-INSTANTIATE_TEST_CASE_P(SingleThreaded,
- TaskSchedulerWorkerPoolImplTest,
- ::testing::Values(ExecutionMode::SINGLE_THREADED));
+ ::testing::Values(test::ExecutionMode::SEQUENCED));
+INSTANTIATE_TEST_CASE_P(
+ SingleThreaded,
+ TaskSchedulerWorkerPoolImplTest,
+ ::testing::Values(test::ExecutionMode::SINGLE_THREADED));
+
+namespace {
+
+// Same as TaskSchedulerWorkerPoolImplTest but its SchedulerWorkerPoolImpl
+// instance uses |max_threads == 1|.
+class TaskSchedulerWorkerPoolImplSingleWorkerTest
+ : public TaskSchedulerWorkerPoolImplTest {
+ public:
+ TaskSchedulerWorkerPoolImplSingleWorkerTest() = default;
+
+ protected:
+ void SetUp() override {
+ InitializeWorkerPool(TimeDelta::Max(), 1);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplSingleWorkerTest);
+};
+
+} // namespace
+
+// Verify that the RunsTasksOnCurrentThread() method of a
+// SchedulerSingleThreadTaskRunner returns false when called from a task that
+// isn't part of its sequence even though it's running on that
+// SchedulerSingleThreadTaskRunner's assigned worker. Note: Tests that use
+// TestTaskFactory already verify that RunsTasksOnCurrentThread() returns true
+// when appropriate so this method complements it to get full coverage of that
+// method.
+TEST_P(TaskSchedulerWorkerPoolImplSingleWorkerTest,
+ SingleThreadRunsTasksOnCurrentThread) {
+ scoped_refptr<TaskRunner> task_runner(
+ CreateTaskRunnerWithExecutionMode(worker_pool_.get(), GetParam()));
+ scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner(
+ worker_pool_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits()));
+
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ task_runner->PostTask(
+ FROM_HERE,
+ Bind(
+ [](scoped_refptr<TaskRunner> single_thread_task_runner,
+ WaitableEvent* task_ran) {
+ EXPECT_FALSE(single_thread_task_runner->RunsTasksOnCurrentThread());
+ task_ran->Signal();
+ },
+ single_thread_task_runner, Unretained(&task_ran)));
+ task_ran.Wait();
+}
+
+INSTANTIATE_TEST_CASE_P(Parallel,
+ TaskSchedulerWorkerPoolImplSingleWorkerTest,
+ ::testing::Values(test::ExecutionMode::PARALLEL));
+INSTANTIATE_TEST_CASE_P(Sequenced,
+ TaskSchedulerWorkerPoolImplSingleWorkerTest,
+ ::testing::Values(test::ExecutionMode::SEQUENCED));
+INSTANTIATE_TEST_CASE_P(
+ SingleThreaded,
+ TaskSchedulerWorkerPoolImplSingleWorkerTest,
+ ::testing::Values(test::ExecutionMode::SINGLE_THREADED));
namespace {
@@ -403,19 +470,20 @@ class TaskSchedulerWorkerPoolImplIORestrictionTest
TEST_P(TaskSchedulerWorkerPoolImplIORestrictionTest, IORestriction) {
TaskTracker task_tracker;
- DelayedTaskManager delayed_task_manager(Bind(&DoNothing));
+ DelayedTaskManager delayed_task_manager(
+ make_scoped_refptr(new TestSimpleTaskRunner));
auto worker_pool = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams("TestWorkerPoolWithParam",
- ThreadPriority::NORMAL, GetParam(), 1U,
- TimeDelta::Max()),
+ SchedulerWorkerPoolParams(
+ "TestWorkerPoolWithParam", ThreadPriority::NORMAL, GetParam(),
+ StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max()),
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
&delayed_task_manager);
ASSERT_TRUE(worker_pool);
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- worker_pool->CreateTaskRunnerWithTraits(TaskTraits(), ExecutionMode::PARALLEL)
+ worker_pool->CreateTaskRunnerWithTraits(TaskTraits())
->PostTask(FROM_HERE, Bind(&ExpectIORestriction, GetParam(), &task_ran));
task_ran.Wait();
@@ -460,10 +528,9 @@ class TaskSchedulerWorkerPoolSingleThreadedTest
// Verify that thread resources for a single thread remain.
TEST_F(TaskSchedulerWorkerPoolSingleThreadedTest, SingleThreadTask) {
auto single_thread_task_runner =
- worker_pool_->CreateTaskRunnerWithTraits(
- TaskTraits().
- WithShutdownBehavior(TaskShutdownBehavior::BLOCK_SHUTDOWN),
- ExecutionMode::SINGLE_THREADED);
+ worker_pool_->CreateSingleThreadTaskRunnerWithTraits(
+ TaskTraits().WithShutdownBehavior(
+ TaskShutdownBehavior::BLOCK_SHUTDOWN));
single_thread_task_runner->PostTask(
FROM_HERE,
Bind(&TaskSchedulerWorkerPoolSingleThreadedTest::InitializeThreadChecker,
@@ -535,10 +602,9 @@ TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckDetachedThreads) {
// Saturate the threads and mark each thread with a magic TLS value.
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
- factories.push_back(WrapUnique(new test::TestTaskFactory(
- worker_pool_->CreateTaskRunnerWithTraits(
- TaskTraits(), ExecutionMode::PARALLEL),
- ExecutionMode::PARALLEL)));
+ factories.push_back(MakeUnique<test::TestTaskFactory>(
+ worker_pool_->CreateTaskRunnerWithTraits(TaskTraits()),
+ test::ExecutionMode::PARALLEL));
ASSERT_TRUE(factories.back()->PostTask(
PostNestedTask::NO,
Bind(&TaskSchedulerWorkerPoolCheckTlsReuse::SetTlsValueAndWait,
@@ -591,15 +657,14 @@ class TaskSchedulerWorkerPoolHistogramTest
TaskSchedulerWorkerPoolHistogramTest() = default;
protected:
+ // Override SetUp() to allow every test case to initialize a worker pool with
+ // its own arguments.
void SetUp() override {}
- void TearDown() override { worker_pool_->JoinForTesting(); }
-
private:
std::unique_ptr<StatisticsRecorder> statistics_recorder_ =
StatisticsRecorder::CreateTemporaryForTesting();
- private:
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolHistogramTest);
};
@@ -609,8 +674,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
- auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(
- TaskTraits(), ExecutionMode::SEQUENCED);
+ auto task_runner =
+ worker_pool_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
// Post a task.
task_runner->PostTask(FROM_HERE,
@@ -633,15 +698,10 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
worker_pool_->WaitForAllWorkersIdleForTesting();
// Verify that counts were recorded to the histogram as expected.
- EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(0));
- EXPECT_EQ(1, worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(3));
- EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(10));
+ const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
+ EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+ EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
+ EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
}
namespace {
@@ -658,8 +718,7 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
- auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(
- TaskTraits(), ExecutionMode::PARALLEL);
+ auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(TaskTraits());
// Post tasks to saturate the pool.
std::vector<std::unique_ptr<WaitableEvent>> task_started_events;
@@ -697,31 +756,106 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
for (const auto& task_started_event : task_started_events)
task_started_event->Wait();
+ const auto* histogram = worker_pool_->num_tasks_between_waits_histogram();
+
// Verify that counts were recorded to the histogram as expected.
// - The "0" bucket has a count of at least 1 because the SchedulerWorker on
// top of the idle stack isn't allowed to detach when its sleep timeout
// expires. Instead, it waits on its WaitableEvent again without running a
// task. The count may be higher than 1 because of spurious wake ups before
// the sleep timeout expires.
- EXPECT_GE(worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(0),
- 1);
+ EXPECT_GE(histogram->SnapshotSamples()->GetCount(0), 1);
// - The "1" bucket has a count of |kNumWorkersInWorkerPool| because each
// SchedulerWorker ran a task before waiting on its WaitableEvent at the
// beginning of the test.
EXPECT_EQ(static_cast<int>(kNumWorkersInWorkerPool),
- worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(1));
- EXPECT_EQ(0, worker_pool_->num_tasks_between_waits_histogram_for_testing()
- ->SnapshotSamples()
- ->GetCount(10));
+ histogram->SnapshotSamples()->GetCount(1));
+ EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
tasks_can_exit_event.Signal();
worker_pool_->WaitForAllWorkersIdleForTesting();
worker_pool_->DisallowWorkerDetachmentForTesting();
}
+namespace {
+
+void CaptureThreadId(PlatformThreadId* thread_id) {
+ ASSERT_TRUE(thread_id);
+ *thread_id = PlatformThread::CurrentId();
+}
+
+void VerifyThreadIdIsNot(PlatformThreadId thread_id) {
+ EXPECT_NE(thread_id, PlatformThread::CurrentId());
+}
+
+} // namespace
+
+TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeDetach) {
+ InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
+
+ // This test assumes that the TaskRunners aren't assigned to the same worker.
+ auto task_runner =
+ worker_pool_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ auto other_task_runner =
+ worker_pool_->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+
+ // Post 3 tasks and wait until they run.
+ PlatformThreadId thread_id;
+ task_runner->PostTask(FROM_HERE,
+ Bind(&CaptureThreadId, Unretained(&thread_id)));
+ task_runner->PostTask(FROM_HERE, Bind(&DoNothing));
+ task_runner->PostTask(FROM_HERE, Bind(&DoNothing));
+ worker_pool_->WaitForAllWorkersIdleForTesting();
+
+ // To allow the SchedulerWorker associated with |task_runner| to detach:
+ // - Make sure it isn't on top of the idle stack by waking up another
+ // SchedulerWorker and waiting until it goes back to sleep.
+ // - Release |task_runner|.
+ other_task_runner->PostTask(FROM_HERE, Bind(&VerifyThreadIdIsNot, thread_id));
+ worker_pool_->WaitForAllWorkersIdleForTesting();
+ task_runner = nullptr;
+
+ // Allow the SchedulerWorker that was associated with |task_runner| to detach.
+ PlatformThread::Sleep(kReclaimTimeForDetachTests + kExtraTimeToWaitForDetach);
+ worker_pool_->DisallowWorkerDetachmentForTesting();
+
+ // Verify that counts were recorded to the histogram as expected.
+ const auto* histogram = worker_pool_->num_tasks_before_detach_histogram();
+ EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(0));
+ EXPECT_EQ(1, histogram->SnapshotSamples()->GetCount(3));
+ EXPECT_EQ(0, histogram->SnapshotSamples()->GetCount(10));
+}
+
+TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitLazy) {
+ TaskTracker task_tracker;
+ DelayedTaskManager delayed_task_manager(
+ make_scoped_refptr(new TestSimpleTaskRunner));
+ auto worker_pool = SchedulerWorkerPoolImpl::Create(
+ SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
+ IORestriction::DISALLOWED,
+ StandbyThreadPolicy::LAZY, 8U,
+ TimeDelta::Max()),
+ Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
+ &delayed_task_manager);
+ ASSERT_TRUE(worker_pool);
+ EXPECT_EQ(0U, worker_pool->NumberOfAliveWorkersForTesting());
+ worker_pool->JoinForTesting();
+}
+
+TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
+ TaskTracker task_tracker;
+ DelayedTaskManager delayed_task_manager(
+ make_scoped_refptr(new TestSimpleTaskRunner));
+ auto worker_pool = SchedulerWorkerPoolImpl::Create(
+ SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
+ IORestriction::DISALLOWED,
+ StandbyThreadPolicy::ONE, 8U, TimeDelta::Max()),
+ Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
+ &delayed_task_manager);
+ ASSERT_TRUE(worker_pool);
+ EXPECT_EQ(1U, worker_pool->NumberOfAliveWorkersForTesting());
+ worker_pool->JoinForTesting();
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
index d820460e91d..dbc02f58261 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -12,11 +12,13 @@ SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
const std::string& name,
ThreadPriority priority_hint,
IORestriction io_restriction,
+ StandbyThreadPolicy standby_thread_policy,
int max_threads,
const TimeDelta& suggested_reclaim_time)
: name_(name),
priority_hint_(priority_hint),
io_restriction_(io_restriction),
+ standby_thread_policy_(standby_thread_policy),
max_threads_(max_threads),
suggested_reclaim_time_(suggested_reclaim_time) {}
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.h b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
index bba78559909..c33392afd06 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
@@ -21,6 +21,14 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
DISALLOWED,
};
+ enum class StandbyThreadPolicy {
+ // Create threads as needed on demand, reclaimed as necessary.
+ LAZY,
+ // When possible, keep one idle thread alive on standby, reclaimed as
+ // necessary.
+ ONE,
+ };
+
// Construct a scheduler worker pool parameter object. |name| will be used to
// label the pool's threads ("TaskScheduler" + |name| + index) and histograms
// ("TaskScheduler." + histogram name + "." + |name| + extra suffixes). The
@@ -33,6 +41,7 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
SchedulerWorkerPoolParams(const std::string& name,
ThreadPriority priority_hint,
IORestriction io_restriction,
+ StandbyThreadPolicy standby_thread_policy,
int max_threads,
const TimeDelta& suggested_reclaim_time);
SchedulerWorkerPoolParams(SchedulerWorkerPoolParams&& other);
@@ -41,6 +50,9 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
const std::string& name() const { return name_; }
ThreadPriority priority_hint() const { return priority_hint_; }
IORestriction io_restriction() const { return io_restriction_; }
+ StandbyThreadPolicy standby_thread_policy() const {
+ return standby_thread_policy_;
+ }
size_t max_threads() const { return max_threads_; }
const TimeDelta& suggested_reclaim_time() const {
return suggested_reclaim_time_;
@@ -50,6 +62,7 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
std::string name_;
ThreadPriority priority_hint_;
IORestriction io_restriction_;
+ StandbyThreadPolicy standby_thread_policy_;
size_t max_threads_;
TimeDelta suggested_reclaim_time_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
index c56bc8a063c..5359dcd0ebc 100644
--- a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
@@ -22,13 +22,13 @@ namespace {
class MockSchedulerWorkerDelegate : public SchedulerWorker::Delegate {
public:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override {}
+ void OnMainEntry(SchedulerWorker* worker) override {}
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
return nullptr;
}
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override {
- ADD_FAILURE() << "Unexpected call to DidRunTask()";
+ void DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) override {
+ ADD_FAILURE() << "Unexpected call to DidRunTaskWithPriority()";
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
@@ -39,6 +39,7 @@ class MockSchedulerWorkerDelegate : public SchedulerWorker::Delegate {
bool CanDetach(SchedulerWorker* worker) override {
return false;
}
+ void OnDetach() override { ADD_FAILURE() << "Unexpected call to OnDetach()"; }
};
class TaskSchedulerWorkerStackTest : public testing::Test {
diff --git a/chromium/base/task_scheduler/scheduler_worker_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index 47a35cbecac..22847580e32 100644
--- a/chromium/base/task_scheduler/scheduler_worker_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -41,19 +41,20 @@ class SchedulerWorkerDefaultDelegate : public SchedulerWorker::Delegate {
SchedulerWorkerDefaultDelegate() = default;
// SchedulerWorker::Delegate:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override {}
+ void OnMainEntry(SchedulerWorker* worker) override {}
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
return nullptr;
}
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override {
- ADD_FAILURE() << "Unexpected call to DidRunTask()";
+ void DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) override {
+ ADD_FAILURE() << "Unexpected call to DidRunTaskWithPriority()";
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
}
TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
bool CanDetach(SchedulerWorker* worker) override { return false; }
+ void OnDetach() override { ADD_FAILURE() << "Unexpected call to OnDetach()"; }
private:
DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerDefaultDelegate);
@@ -126,15 +127,14 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
: outer_(outer) {}
~TestSchedulerWorkerDelegate() override {
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
}
// SchedulerWorker::Delegate:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override {
+ void OnMainEntry(SchedulerWorker* worker) override {
outer_->worker_set_.Wait();
EXPECT_EQ(outer_->worker_.get(), worker);
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
// Without synchronization, OnMainEntry() could be called twice without
// generating an error.
@@ -144,7 +144,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
}
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
EXPECT_EQ(outer_->worker_.get(), worker);
{
@@ -174,7 +174,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
sequence->PushTask(std::move(task));
}
- ExpectCallToDidRunTask(sequence->PeekTask());
+ ExpectCallToDidRunTaskWithPriority(sequence->PeekTaskTraits().priority());
{
// Add the Sequence to the vector of created Sequences.
@@ -185,11 +185,13 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
return sequence;
}
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override {
- AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
- EXPECT_EQ(expect_did_run_task_, task);
- expect_did_run_task_ = nullptr;
+ void DidRunTaskWithPriority(TaskPriority task_priority,
+ const TimeDelta& task_latency) override {
+ AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
+ EXPECT_TRUE(expect_did_run_task_with_priority_);
+ EXPECT_EQ(expected_task_priority_, task_priority);
EXPECT_FALSE(task_latency.is_max());
+ expect_did_run_task_with_priority_ = false;
}
// This override verifies that |sequence| contains the expected number of
@@ -197,15 +199,14 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
// EnqueueSequence implementation, it doesn't reinsert |sequence| into a
// queue for further execution.
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
- EXPECT_FALSE(IsCallToDidRunTaskExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
EXPECT_GT(outer_->TasksPerSequence(), 1U);
// Verify that |sequence| contains TasksPerSequence() - 1 Tasks.
for (size_t i = 0; i < outer_->TasksPerSequence() - 1; ++i) {
- EXPECT_TRUE(sequence->PeekTask());
- sequence->PopTask();
+ EXPECT_TRUE(sequence->TakeTask());
+ EXPECT_EQ(i == outer_->TasksPerSequence() - 2, sequence->Pop());
}
- EXPECT_FALSE(sequence->PeekTask());
// Add |sequence| to |re_enqueued_sequences_|.
AutoSchedulerLock auto_lock(outer_->lock_);
@@ -215,27 +216,31 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
}
private:
- // Expect a call to DidRunTask() with |task| as argument before the next
- // call to any other method of this delegate.
- void ExpectCallToDidRunTask(const Task* task) {
- AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
- expect_did_run_task_ = task;
+ // Expect a call to DidRunTaskWithPriority() with |task_priority| as
+ // argument before the next call to any other method of this delegate.
+ void ExpectCallToDidRunTaskWithPriority(TaskPriority task_priority) {
+ AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
+ expect_did_run_task_with_priority_ = true;
+ expected_task_priority_ = task_priority;
}
- bool IsCallToDidRunTaskExpected() const {
- AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
- return expect_did_run_task_ != nullptr;
+ bool IsCallToDidRunTaskWithPriorityExpected() const {
+ AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
+ return expect_did_run_task_with_priority_;
}
TaskSchedulerWorkerTest* outer_;
- // Synchronizes access to |expect_did_run_task_|.
- mutable SchedulerLock expect_did_run_task_lock_;
+ // Synchronizes access to |expect_did_run_task_with_priority_| and
+ // |expected_task_priority_|.
+ mutable SchedulerLock expect_did_run_task_with_priority_lock_;
- // Expected task for the next call to DidRunTask(). DidRunTask() should not
- // be called when this is nullptr. No method other than DidRunTask() should
- // be called on this delegate when this is not nullptr.
- const Task* expect_did_run_task_ = nullptr;
+ // Whether the next method called on this delegate should be
+ // DidRunTaskWithPriority().
+ bool expect_did_run_task_with_priority_ = false;
+
+ // Expected priority for the next call to DidRunTaskWithPriority().
+ TaskPriority expected_task_priority_ = TaskPriority::BACKGROUND;
};
void RunTaskCallback() {
@@ -357,15 +362,16 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
work_processed_(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED),
detach_requested_(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED) {
+ WaitableEvent::InitialState::NOT_SIGNALED),
+ detached_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {
EXPECT_TRUE(task_tracker_);
}
~ControllableDetachDelegate() override = default;
// SchedulerWorker::Delegate:
- MOCK_METHOD2(OnMainEntry,
- void(SchedulerWorker* worker, const TimeDelta& detach_duration));
+ MOCK_METHOD1(OnMainEntry, void(SchedulerWorker* worker));
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker)
override {
@@ -384,13 +390,20 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
return sequence;
}
- void DidRunTask(const Task* task, const TimeDelta& task_latency) override {}
+ void DidRunTaskWithPriority(TaskPriority task,
+ const TimeDelta& task_latency) override {}
bool CanDetach(SchedulerWorker* worker) override {
detach_requested_.Signal();
return can_detach_;
}
+ void OnDetach() override {
+ EXPECT_TRUE(can_detach_);
+ EXPECT_TRUE(detach_requested_.IsSignaled());
+ detached_.Signal();
+ }
+
void WaitForWorkToRun() {
work_processed_.Wait();
}
@@ -399,6 +412,8 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
detach_requested_.Wait();
}
+ void WaitForDetach() { detached_.Wait(); }
+
void ResetState() {
work_requested_ = false;
work_processed_.Reset();
@@ -413,6 +428,7 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
bool can_detach_ = false;
WaitableEvent work_processed_;
WaitableEvent detach_requested_;
+ WaitableEvent detached_;
DISALLOW_COPY_AND_ASSIGN(ControllableDetachDelegate);
};
@@ -425,7 +441,7 @@ TEST(TaskSchedulerWorkerTest, WorkerDetaches) {
ControllableDetachDelegate* delegate =
new StrictMock<ControllableDetachDelegate>(&task_tracker);
delegate->set_can_detach(true);
- EXPECT_CALL(*delegate, OnMainEntry(_, TimeDelta::Max()));
+ EXPECT_CALL(*delegate, OnMainEntry(_));
std::unique_ptr<SchedulerWorker> worker =
SchedulerWorker::Create(
ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
@@ -434,8 +450,7 @@ TEST(TaskSchedulerWorkerTest, WorkerDetaches) {
delegate->WaitForWorkToRun();
Mock::VerifyAndClear(delegate);
delegate->WaitForDetachRequest();
- // Sleep to give a chance for the detach to happen. A yield is too short.
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ delegate->WaitForDetach();
ASSERT_FALSE(worker->ThreadAliveForTesting());
}
@@ -445,7 +460,7 @@ TEST(TaskSchedulerWorkerTest, WorkerDetachesAndWakes) {
ControllableDetachDelegate* delegate =
new StrictMock<ControllableDetachDelegate>(&task_tracker);
delegate->set_can_detach(true);
- EXPECT_CALL(*delegate, OnMainEntry(_, TimeDelta::Max()));
+ EXPECT_CALL(*delegate, OnMainEntry(_));
std::unique_ptr<SchedulerWorker> worker =
SchedulerWorker::Create(
ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
@@ -454,20 +469,19 @@ TEST(TaskSchedulerWorkerTest, WorkerDetachesAndWakes) {
delegate->WaitForWorkToRun();
Mock::VerifyAndClear(delegate);
delegate->WaitForDetachRequest();
- // Sleep to give a chance for the detach to happen. A yield is too short.
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ delegate->WaitForDetach();
ASSERT_FALSE(worker->ThreadAliveForTesting());
delegate->ResetState();
delegate->set_can_detach(false);
- // When SchedulerWorker recreates its thread, expect OnMainEntry() to be
- // called with a detach duration which is not TimeDelta::Max().
- EXPECT_CALL(*delegate, OnMainEntry(worker.get(), Ne(TimeDelta::Max())));
+ // Expect OnMainEntry() to be called when SchedulerWorker recreates its
+ // thread.
+ EXPECT_CALL(*delegate, OnMainEntry(worker.get()));
worker->WakeUp();
delegate->WaitForWorkToRun();
Mock::VerifyAndClear(delegate);
delegate->WaitForDetachRequest();
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(50));
+ delegate->WaitForDetach();
ASSERT_TRUE(worker->ThreadAliveForTesting());
worker->JoinForTesting();
}
@@ -482,7 +496,7 @@ TEST(TaskSchedulerWorkerTest, CreateDetached) {
ThreadPriority::NORMAL, WrapUnique(delegate), &task_tracker,
SchedulerWorker::InitialState::DETACHED);
ASSERT_FALSE(worker->ThreadAliveForTesting());
- EXPECT_CALL(*delegate, OnMainEntry(worker.get(), TimeDelta::Max()));
+ EXPECT_CALL(*delegate, OnMainEntry(worker.get()));
worker->WakeUp();
delegate->WaitForWorkToRun();
Mock::VerifyAndClear(delegate);
@@ -510,10 +524,7 @@ class ExpectThreadPriorityDelegate : public SchedulerWorkerDefaultDelegate {
}
// SchedulerWorker::Delegate:
- void OnMainEntry(SchedulerWorker* worker,
- const TimeDelta& detach_duration) override {
- VerifyThreadPriority();
- }
+ void OnMainEntry(SchedulerWorker* worker) override { VerifyThreadPriority(); }
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
VerifyThreadPriority();
priority_verified_in_get_work_event_.Signal();
diff --git a/chromium/base/task_scheduler/sequence.cc b/chromium/base/task_scheduler/sequence.cc
index 86e99f0a4c3..601b5402d06 100644
--- a/chromium/base/task_scheduler/sequence.cc
+++ b/chromium/base/task_scheduler/sequence.cc
@@ -26,37 +26,32 @@ bool Sequence::PushTask(std::unique_ptr<Task> task) {
return queue_.size() == 1;
}
-const Task* Sequence::PeekTask() const {
+std::unique_ptr<Task> Sequence::TakeTask() {
AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+ DCHECK(queue_.front());
- if (queue_.empty())
- return nullptr;
+ const int priority_index =
+ static_cast<int>(queue_.front()->traits.priority());
+ DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
+ --num_tasks_per_priority_[priority_index];
- return queue_.front().get();
+ return std::move(queue_.front());
}
-bool Sequence::PopTask() {
- // Delete the popped task outside the scope of |lock_|. This prevents a double
- // acquisition of |lock_| if the task's destructor tries to post a task to
- // this Sequence and reduces contention.
- std::unique_ptr<Task> delete_outside_lock_scope;
- bool sequence_empty_after_pop = false;
-
- {
- AutoSchedulerLock auto_lock(lock_);
- DCHECK(!queue_.empty());
-
- const int priority_index =
- static_cast<int>(queue_.front()->traits.priority());
- DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
- --num_tasks_per_priority_[priority_index];
-
- delete_outside_lock_scope = std::move(queue_.front());
- queue_.pop();
- sequence_empty_after_pop = queue_.empty();
- }
+TaskTraits Sequence::PeekTaskTraits() const {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+ DCHECK(queue_.front());
+ return queue_.front()->traits;
+}
- return sequence_empty_after_pop;
+bool Sequence::Pop() {
+ AutoSchedulerLock auto_lock(lock_);
+ DCHECK(!queue_.empty());
+ DCHECK(!queue_.front());
+ queue_.pop();
+ return queue_.empty();
}
SequenceSortKey Sequence::GetSortKey() const {
diff --git a/chromium/base/task_scheduler/sequence.h b/chromium/base/task_scheduler/sequence.h
index 8717336080e..408d99f9c64 100644
--- a/chromium/base/task_scheduler/sequence.h
+++ b/chromium/base/task_scheduler/sequence.h
@@ -22,7 +22,10 @@
namespace base {
namespace internal {
-// A sequence holds tasks that must be executed in posting order.
+// A Sequence holds slots each containing up to a single Task that must be
+// executed in posting order.
+//
+// In comments below, an "empty Sequence" is a Sequence with no slot.
//
// Note: there is a known refcounted-ownership cycle in the Scheduler
// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
@@ -41,20 +44,27 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
public:
Sequence();
- // Adds |task| at the end of the sequence's queue. Returns true if the
- // sequence was empty before this operation.
+ // Adds |task| in a new slot at the end of the Sequence. Returns true if the
+ // Sequence was empty before this operation.
bool PushTask(std::unique_ptr<Task> task);
- // Returns the task in front of the sequence's queue, if any.
- const Task* PeekTask() const;
+ // Transfers ownership of the Task in the front slot of the Sequence to the
+ // caller. The front slot of the Sequence will be nullptr and remain until
+ // Pop(). Cannot be called on an empty Sequence or a Sequence whose front slot
+ // is already nullptr.
+ std::unique_ptr<Task> TakeTask();
+
+ // Returns the TaskTraits of the Task in front of the Sequence. Cannot be
+ // called on an empty Sequence or on a Sequence whose front slot is empty.
+ TaskTraits PeekTaskTraits() const;
- // Removes the task in front of the sequence's queue. Returns true if the
- // sequence is empty after this operation. Cannot be called on an empty
- // sequence.
- bool PopTask();
+ // Removes the front slot of the Sequence. The front slot must have been
+ // emptied by TakeTask() before this is called. Cannot be called on an empty
+ // Sequence. Returns true if the Sequence is empty after this operation.
+ bool Pop();
- // Returns a SequenceSortKey representing the priority of the sequence. Cannot
- // be called on an empty sequence.
+ // Returns a SequenceSortKey representing the priority of the Sequence. Cannot
+ // be called on an empty Sequence.
SequenceSortKey GetSortKey() const;
// Returns a token that uniquely identifies this Sequence.
@@ -72,7 +82,7 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
// Queue of tasks to execute.
std::queue<std::unique_ptr<Task>> queue_;
- // Number of tasks contained in the sequence for each priority.
+ // Number of tasks contained in the Sequence for each priority.
size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
{};
diff --git a/chromium/base/task_scheduler/sequence_unittest.cc b/chromium/base/task_scheduler/sequence_unittest.cc
index 41a9794ac61..ba020cb31c0 100644
--- a/chromium/base/task_scheduler/sequence_unittest.cc
+++ b/chromium/base/task_scheduler/sequence_unittest.cc
@@ -9,6 +9,7 @@
#include "base/bind.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
+#include "base/test/gtest_util.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -17,30 +18,6 @@ namespace internal {
namespace {
-// A class that pushes a Task to a Sequence in its destructor.
-class PushTaskInDestructor {
- public:
- explicit PushTaskInDestructor(scoped_refptr<Sequence> sequence)
- : sequence_(std::move(sequence)) {}
- PushTaskInDestructor(PushTaskInDestructor&&) = default;
- PushTaskInDestructor& operator=(PushTaskInDestructor&&) = default;
-
- ~PushTaskInDestructor() {
- // |sequence_| may be nullptr in a temporary instance of this class.
- if (sequence_) {
- EXPECT_FALSE(sequence_->PeekTask());
- sequence_->PushTask(WrapUnique(
- new Task(FROM_HERE, Closure(), TaskTraits(), TimeDelta())));
- }
- }
-
- private:
- scoped_refptr<Sequence> sequence_;
-
- DISALLOW_COPY_AND_ASSIGN(PushTaskInDestructor);
-};
-
-void DoNothing(const PushTaskInDestructor&) {}
class TaskSchedulerSequenceTest : public testing::Test {
public:
@@ -99,54 +76,54 @@ class TaskSchedulerSequenceTest : public testing::Test {
} // namespace
-TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
+TEST_F(TaskSchedulerSequenceTest, PushTakeRemove) {
scoped_refptr<Sequence> sequence(new Sequence);
// Push task A in the sequence. Its sequenced time should be updated and it
// should be in front of the sequence.
EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
EXPECT_FALSE(task_a_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
// Push task B, C and D in the sequence. Their sequenced time should be
// updated and task A should always remain in front of the sequence.
EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
EXPECT_FALSE(task_b_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
EXPECT_FALSE(task_c_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
EXPECT_FALSE(task_d_->sequenced_time.is_null());
- EXPECT_EQ(task_a_, sequence->PeekTask());
+ EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
- // Pop task A. Task B should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_b_, sequence->PeekTask());
+ // Get the task in front of the sequence. It should be task A.
+ EXPECT_EQ(task_a_, sequence->TakeTask().get());
- // Pop task B. Task C should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_c_, sequence->PeekTask());
+ // Remove the empty slot. Task B should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_b_, sequence->TakeTask().get());
- // Pop task C. Task D should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_d_, sequence->PeekTask());
+ // Remove the empty slot. Task C should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_c_, sequence->TakeTask().get());
- // Push task E in the sequence. Its sequenced time should be updated and
- // task D should remain in front.
+ // Remove the empty slot. Task D should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_d_, sequence->TakeTask().get());
+
+ // Push task E in the sequence. Its sequenced time should be updated.
EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
EXPECT_FALSE(task_e_->sequenced_time.is_null());
- EXPECT_EQ(task_d_, sequence->PeekTask());
- // Pop task D. Task E should now be in front.
- EXPECT_FALSE(sequence->PopTask());
- EXPECT_EQ(task_e_, sequence->PeekTask());
+ // Remove the empty slot. Task E should now be in front.
+ EXPECT_FALSE(sequence->Pop());
+ EXPECT_EQ(task_e_, sequence->TakeTask().get());
- // Pop task E. The sequence should now be empty.
- EXPECT_TRUE(sequence->PopTask());
- EXPECT_EQ(nullptr, sequence->PeekTask());
+ // Remove the empty slot. The sequence should now be empty.
+ EXPECT_TRUE(sequence->Pop());
}
TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
@@ -181,21 +158,24 @@ TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
// Pop task A. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task B.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
sequence->GetSortKey());
// Pop task B. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task C.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
sequence->GetSortKey());
// Pop task C. The highest priority is still USER_BLOCKING. The task in front
// of the sequence is now task D.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(
SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
sequence->GetSortKey());
@@ -209,25 +189,37 @@ TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
// Pop task D. The highest priority is now from task E (BACKGROUND). The
// task in front of the sequence is now task E.
- sequence->PopTask();
+ sequence->TakeTask();
+ sequence->Pop();
EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
sequence->GetSortKey());
}
-TEST_F(TaskSchedulerSequenceTest, CanPushTaskInTaskDestructor) {
+// Verify that a DCHECK fires if Pop() is called on a sequence whose front slot
+// isn't empty.
+TEST_F(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+ sequence->PushTask(
+ MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
+
+ EXPECT_DCHECK_DEATH({ sequence->Pop(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
+// slot is empty.
+TEST_F(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence(new Sequence);
+ sequence->PushTask(
+ MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
+
+ EXPECT_TRUE(sequence->TakeTask());
+ EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
+}
+
+// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
+TEST_F(TaskSchedulerSequenceTest, TakeEmptySequence) {
scoped_refptr<Sequence> sequence(new Sequence);
- sequence->PushTask(MakeUnique<Task>(
- FROM_HERE, Bind(&DoNothing, PushTaskInDestructor(sequence)), TaskTraits(),
- TimeDelta()));
-
- // PushTask() is invoked on |sequence| when the popped Task is destroyed. If
- // PopTask() destroys the Task outside the scope of its lock as expected, no
- // deadlock will occur when PushTask() tries to acquire the Sequence's lock.
- sequence->PopTask();
-
- // Verify that |sequence| contains exactly one Task.
- EXPECT_TRUE(sequence->PeekTask());
- EXPECT_TRUE(sequence->PopTask());
+ EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/task.cc b/chromium/base/task_scheduler/task.cc
index 5a17b7f2093..7314099c43a 100644
--- a/chromium/base/task_scheduler/task.cc
+++ b/chromium/base/task_scheduler/task.cc
@@ -22,7 +22,8 @@ Task::Task(const tracked_objects::Location& posted_from,
TaskShutdownBehavior::BLOCK_SHUTDOWN
? TaskTraits(traits).WithShutdownBehavior(
TaskShutdownBehavior::SKIP_ON_SHUTDOWN)
- : traits) {}
+ : traits),
+ delay(delay) {}
Task::~Task() = default;
diff --git a/chromium/base/task_scheduler/task.h b/chromium/base/task_scheduler/task.h
index b2fa7661b56..c014671ce07 100644
--- a/chromium/base/task_scheduler/task.h
+++ b/chromium/base/task_scheduler/task.h
@@ -36,6 +36,9 @@ struct BASE_EXPORT Task : public PendingTask {
// The TaskTraits of this task.
const TaskTraits traits;
+ // The delay that must expire before the task runs.
+ const TimeDelta delay;
+
// The time at which the task was inserted in its sequence. For an undelayed
// task, this happens at post time. For a delayed task, this happens some
// time after the task's delay has expired. If the task hasn't been inserted
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index 91a608ec746..385edd77a8b 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -11,6 +11,8 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
@@ -20,6 +22,7 @@ class Location;
namespace base {
+class HistogramBase;
class SchedulerWorkerPoolParams;
// Interface for a task scheduler and static methods to manage the instance used
@@ -42,11 +45,24 @@ class BASE_EXPORT TaskScheduler {
const TaskTraits& traits,
const Closure& task) = 0;
- // Returns a TaskRunner whose PostTask invocations will result in scheduling
- // Tasks with |traits| which will be executed according to |execution_mode|.
+ // Returns a TaskRunner whose PostTask invocations result in scheduling tasks
+ // using |traits|. Tasks may run in any order and in parallel.
virtual scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) = 0;
+ const TaskTraits& traits) = 0;
+
+ // Returns a SequencedTaskRunner whose PostTask invocations result in
+ // scheduling tasks using |traits|. Tasks run one at a time in posting order.
+ virtual scoped_refptr<SequencedTaskRunner>
+ CreateSequencedTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+
+ // Returns a SingleThreadTaskRunner whose PostTask invocations result in
+ // scheduling tasks using |traits|. Tasks run on a single thread in posting
+ // order.
+ virtual scoped_refptr<SingleThreadTaskRunner>
+ CreateSingleThreadTaskRunnerWithTraits(const TaskTraits& traits) = 0;
+
+ // Returns a vector of all histograms available in this task scheduler.
+ virtual std::vector<const HistogramBase*> GetHistograms() const = 0;
// Synchronously shuts down the scheduler. Once this is called, only tasks
// posted with the BLOCK_SHUTDOWN behavior will be run. When this returns:
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index 70a71baf7ce..708685d68d6 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -9,11 +9,17 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/memory/ptr_util.h"
-#include "base/task_scheduler/scheduler_service_thread.h"
+#include "base/task_scheduler/delayed_task_manager.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_tracker.h"
#include "base/time/time.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+#include "base/task_scheduler/task_tracker_posix.h"
+#endif
namespace base {
namespace internal {
@@ -46,19 +52,41 @@ void TaskSchedulerImpl::PostTaskWithTraits(
}
scoped_refptr<TaskRunner> TaskSchedulerImpl::CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) {
- return GetWorkerPoolForTraits(traits)->CreateTaskRunnerWithTraits(
- traits, execution_mode);
+ const TaskTraits& traits) {
+ return GetWorkerPoolForTraits(traits)->CreateTaskRunnerWithTraits(traits);
+}
+
+scoped_refptr<SequencedTaskRunner>
+TaskSchedulerImpl::CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return GetWorkerPoolForTraits(traits)->CreateSequencedTaskRunnerWithTraits(
+ traits);
+}
+
+scoped_refptr<SingleThreadTaskRunner>
+TaskSchedulerImpl::CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) {
+ return GetWorkerPoolForTraits(traits)->CreateSingleThreadTaskRunnerWithTraits(
+ traits);
+}
+
+std::vector<const HistogramBase*> TaskSchedulerImpl::GetHistograms() const {
+ std::vector<const HistogramBase*> histograms;
+ for (const auto& worker_pool : worker_pools_)
+ worker_pool->GetHistograms(&histograms);
+
+ return histograms;
}
void TaskSchedulerImpl::Shutdown() {
// TODO(fdoray): Increase the priority of BACKGROUND tasks blocking shutdown.
- task_tracker_.Shutdown();
+ DCHECK(task_tracker_);
+ task_tracker_->Shutdown();
}
void TaskSchedulerImpl::FlushForTesting() {
- task_tracker_.Flush();
+ DCHECK(task_tracker_);
+ task_tracker_->Flush();
}
void TaskSchedulerImpl::JoinForTesting() {
@@ -67,7 +95,7 @@ void TaskSchedulerImpl::JoinForTesting() {
#endif
for (const auto& worker_pool : worker_pools_)
worker_pool->JoinForTesting();
- service_thread_->JoinForTesting();
+ service_thread_.Stop();
#if DCHECK_IS_ON()
join_for_testing_returned_.Set();
#endif
@@ -75,11 +103,9 @@ void TaskSchedulerImpl::JoinForTesting() {
TaskSchedulerImpl::TaskSchedulerImpl(const WorkerPoolIndexForTraitsCallback&
worker_pool_index_for_traits_callback)
- : delayed_task_manager_(
- Bind(&TaskSchedulerImpl::OnDelayedRunTimeUpdated, Unretained(this))),
+ : service_thread_("TaskSchedulerServiceThread"),
worker_pool_index_for_traits_callback_(
- worker_pool_index_for_traits_callback)
-{
+ worker_pool_index_for_traits_callback) {
DCHECK(!worker_pool_index_for_traits_callback_.is_null());
}
@@ -87,23 +113,50 @@ void TaskSchedulerImpl::Initialize(
const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector) {
DCHECK(!worker_pool_params_vector.empty());
+ // Start the service thread. On platforms that support it (POSIX except NaCL
+ // SFI), the service thread runs a MessageLoopForIO which is used to support
+ // FileDescriptorWatcher in the scope in which tasks run.
+ constexpr MessageLoop::Type kServiceThreadMessageLoopType =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+ MessageLoop::TYPE_IO;
+#else
+ MessageLoop::TYPE_DEFAULT;
+#endif
+ constexpr size_t kDefaultStackSize = 0;
+ CHECK(service_thread_.StartWithOptions(
+ Thread::Options(kServiceThreadMessageLoopType, kDefaultStackSize)));
+
+ // Instantiate TaskTracker. Needs to happen after starting the service thread
+ // to get its message_loop().
+ task_tracker_ =
+#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
+ base::MakeUnique<TaskTrackerPosix>(
+ static_cast<MessageLoopForIO*>(service_thread_.message_loop()));
+#else
+ base::MakeUnique<TaskTracker>();
+#endif
+
+ // Instantiate DelayedTaskManager. Needs to happen after starting the service
+ // thread to get its task_runner().
+ delayed_task_manager_ =
+ base::MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
+
+ // Callback invoked by workers to re-enqueue a sequence in the appropriate
+ // PriorityQueue.
const SchedulerWorkerPoolImpl::ReEnqueueSequenceCallback
re_enqueue_sequence_callback =
Bind(&TaskSchedulerImpl::ReEnqueueSequenceCallback, Unretained(this));
+ // Start worker pools.
for (const auto& worker_pool_params : worker_pool_params_vector) {
// Passing pointers to objects owned by |this| to
// SchedulerWorkerPoolImpl::Create() is safe because a TaskSchedulerImpl
// can't be deleted before all its worker pools have been joined.
worker_pools_.push_back(SchedulerWorkerPoolImpl::Create(
- worker_pool_params, re_enqueue_sequence_callback, &task_tracker_,
- &delayed_task_manager_));
+ worker_pool_params, re_enqueue_sequence_callback, task_tracker_.get(),
+ delayed_task_manager_.get()));
CHECK(worker_pools_.back());
}
-
- service_thread_ = SchedulerServiceThread::Create(&task_tracker_,
- &delayed_task_manager_);
- CHECK(service_thread_);
}
SchedulerWorkerPool* TaskSchedulerImpl::GetWorkerPoolForTraits(
@@ -118,20 +171,16 @@ void TaskSchedulerImpl::ReEnqueueSequenceCallback(
DCHECK(sequence);
const SequenceSortKey sort_key = sequence->GetSortKey();
- TaskTraits traits(sequence->PeekTask()->traits);
- // Update the priority of |traits| so that the next task in |sequence| runs
- // with the highest priority in |sequence| as opposed to the next task's
- // specific priority.
- traits.WithPriority(sort_key.priority());
+ // The next task in |sequence| should run in a worker pool suited for its
+ // traits, except for the priority which is adjusted to the highest priority
+ // in |sequence|.
+ const TaskTraits traits =
+ sequence->PeekTaskTraits().WithPriority(sort_key.priority());
GetWorkerPoolForTraits(traits)->ReEnqueueSequence(std::move(sequence),
sort_key);
}
-void TaskSchedulerImpl::OnDelayedRunTimeUpdated() {
- service_thread_->WakeUp();
-}
-
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index f49cccc80e4..1483c5d646c 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -8,7 +8,6 @@
#include <stddef.h>
#include <memory>
-#include <string>
#include <vector>
#include "base/base_export.h"
@@ -17,22 +16,21 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/atomic_flag.h"
-#include "base/task_runner.h"
-#include "base/task_scheduler/delayed_task_manager.h"
#include "base/task_scheduler/scheduler_worker_pool_impl.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task_scheduler.h"
-#include "base/task_scheduler/task_tracker.h"
#include "base/task_scheduler/task_traits.h"
#include "base/threading/thread.h"
namespace base {
+class HistogramBase;
class SchedulerWorkerPoolParams;
namespace internal {
-class SchedulerServiceThread;
+class DelayedTaskManager;
+class TaskTracker;
// Default TaskScheduler implementation. This class is thread-safe.
class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
@@ -56,8 +54,12 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
const TaskTraits& traits,
const Closure& task) override;
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
- const TaskTraits& traits,
- ExecutionMode execution_mode) override;
+ const TaskTraits& traits) override;
+ scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
+ const TaskTraits& traits) override;
+ std::vector<const HistogramBase*> GetHistograms() const override;
void Shutdown() override;
void FlushForTesting() override;
@@ -79,15 +81,11 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
// worker pops a Task from it.
void ReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence);
- // Callback invoked when the delayed run time is changed from the
- // DelayedTaskManager.
- void OnDelayedRunTimeUpdated();
-
- TaskTracker task_tracker_;
- DelayedTaskManager delayed_task_manager_;
+ Thread service_thread_;
+ std::unique_ptr<TaskTracker> task_tracker_;
+ std::unique_ptr<DelayedTaskManager> delayed_task_manager_;
const WorkerPoolIndexForTraitsCallback worker_pool_index_for_traits_callback_;
std::vector<std::unique_ptr<SchedulerWorkerPoolImpl>> worker_pools_;
- std::unique_ptr<SchedulerServiceThread> service_thread_;
#if DCHECK_IS_ON()
// Set once JoinForTesting() has returned.
diff --git a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
index 21a18b0dba8..43f8d745bd2 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -34,11 +34,11 @@ namespace {
struct TraitsExecutionModePair {
TraitsExecutionModePair(const TaskTraits& traits,
- ExecutionMode execution_mode)
+ test::ExecutionMode execution_mode)
: traits(traits), execution_mode(execution_mode) {}
TaskTraits traits;
- ExecutionMode execution_mode;
+ test::ExecutionMode execution_mode;
};
#if DCHECK_IS_ON()
@@ -88,16 +88,34 @@ void VerifyTaskEnvironementAndSignalEvent(const TaskTraits& traits,
event->Signal();
}
+scoped_refptr<TaskRunner> CreateTaskRunnerWithTraitsAndExecutionMode(
+ TaskScheduler* scheduler,
+ const TaskTraits& traits,
+ test::ExecutionMode execution_mode) {
+ switch (execution_mode) {
+ case test::ExecutionMode::PARALLEL:
+ return scheduler->CreateTaskRunnerWithTraits(traits);
+ case test::ExecutionMode::SEQUENCED:
+ return scheduler->CreateSequencedTaskRunnerWithTraits(traits);
+ case test::ExecutionMode::SINGLE_THREADED:
+ return scheduler->CreateSingleThreadTaskRunnerWithTraits(traits);
+ }
+ ADD_FAILURE() << "Unknown ExecutionMode";
+ return nullptr;
+}
+
class ThreadPostingTasks : public SimpleThread {
public:
// Creates a thread that posts Tasks to |scheduler| with |traits| and
// |execution_mode|.
ThreadPostingTasks(TaskSchedulerImpl* scheduler,
const TaskTraits& traits,
- ExecutionMode execution_mode)
+ test::ExecutionMode execution_mode)
: SimpleThread("ThreadPostingTasks"),
traits_(traits),
- factory_(scheduler->CreateTaskRunnerWithTraits(traits, execution_mode),
+ factory_(CreateTaskRunnerWithTraitsAndExecutionMode(scheduler,
+ traits,
+ execution_mode),
execution_mode) {}
void WaitForAllTasksToRun() { factory_.WaitForAllTasksToRun(); }
@@ -124,11 +142,11 @@ class ThreadPostingTasks : public SimpleThread {
std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
std::vector<TraitsExecutionModePair> params;
- const ExecutionMode execution_modes[] = {ExecutionMode::PARALLEL,
- ExecutionMode::SEQUENCED,
- ExecutionMode::SINGLE_THREADED};
+ const test::ExecutionMode execution_modes[] = {
+ test::ExecutionMode::PARALLEL, test::ExecutionMode::SEQUENCED,
+ test::ExecutionMode::SINGLE_THREADED};
- for (ExecutionMode execution_mode : execution_modes) {
+ for (test::ExecutionMode execution_mode : execution_modes) {
for (size_t priority_index = static_cast<size_t>(TaskPriority::LOWEST);
priority_index <= static_cast<size_t>(TaskPriority::HIGHEST);
++priority_index) {
@@ -167,24 +185,29 @@ class TaskSchedulerImplTest
void SetUp() override {
using IORestriction = SchedulerWorkerPoolParams::IORestriction;
+ using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
std::vector<SchedulerWorkerPoolParams> params_vector;
ASSERT_EQ(BACKGROUND_WORKER_POOL, params_vector.size());
params_vector.emplace_back("Background", ThreadPriority::BACKGROUND,
- IORestriction::DISALLOWED, 1U, TimeDelta::Max());
+ IORestriction::DISALLOWED,
+ StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max());
ASSERT_EQ(BACKGROUND_FILE_IO_WORKER_POOL, params_vector.size());
params_vector.emplace_back("BackgroundFileIO", ThreadPriority::BACKGROUND,
- IORestriction::ALLOWED, 3U, TimeDelta::Max());
+ IORestriction::ALLOWED,
+ StandbyThreadPolicy::LAZY, 3U, TimeDelta::Max());
ASSERT_EQ(FOREGROUND_WORKER_POOL, params_vector.size());
params_vector.emplace_back("Foreground", ThreadPriority::NORMAL,
- IORestriction::DISALLOWED, 4U, TimeDelta::Max());
+ IORestriction::DISALLOWED,
+ StandbyThreadPolicy::LAZY, 4U, TimeDelta::Max());
ASSERT_EQ(FOREGROUND_FILE_IO_WORKER_POOL, params_vector.size());
- params_vector.emplace_back("ForegroundFileIO", ThreadPriority::NORMAL,
- IORestriction::ALLOWED, 12U, TimeDelta::Max());
+ params_vector.emplace_back(
+ "ForegroundFileIO", ThreadPriority::NORMAL, IORestriction::ALLOWED,
+ StandbyThreadPolicy::LAZY, 12U, TimeDelta::Max());
scheduler_ = TaskSchedulerImpl::Create(params_vector,
Bind(&GetThreadPoolIndexForTraits));
@@ -219,8 +242,8 @@ TEST_P(TaskSchedulerImplTest, PostTaskWithTraits) {
// and respect the characteristics of their ExecutionMode.
TEST_P(TaskSchedulerImplTest, PostTasksViaTaskRunner) {
test::TestTaskFactory factory(
- scheduler_->CreateTaskRunnerWithTraits(GetParam().traits,
- GetParam().execution_mode),
+ CreateTaskRunnerWithTraitsAndExecutionMode(
+ scheduler_.get(), GetParam().traits, GetParam().execution_mode),
GetParam().execution_mode);
EXPECT_FALSE(factory.task_runner()->RunsTasksOnCurrentThread());
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index eeb8e39a415..d06a84dc1f4 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -25,11 +25,15 @@ namespace internal {
namespace {
+constexpr char kParallelExecutionMode[] = "parallel";
+constexpr char kSequencedExecutionMode[] = "sequenced";
+constexpr char kSingleThreadExecutionMode[] = "single thread";
+
// An immutable copy of a scheduler task's info required by tracing.
class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
public:
TaskTracingInfo(const TaskTraits& task_traits,
- ExecutionMode execution_mode,
+ const char* execution_mode,
const SequenceToken& sequence_token)
: task_traits_(task_traits),
execution_mode_(execution_mode),
@@ -40,7 +44,7 @@ class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
private:
const TaskTraits task_traits_;
- const ExecutionMode execution_mode_;
+ const char* const execution_mode_;
const SequenceToken sequence_token_;
DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
@@ -51,9 +55,8 @@ void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
dict.SetString("task_priority",
base::TaskPriorityToString(task_traits_.priority()));
- dict.SetString("execution_mode",
- base::ExecutionModeToString(execution_mode_));
- if (execution_mode_ != ExecutionMode::PARALLEL)
+ dict.SetString("execution_mode", execution_mode_);
+ if (execution_mode_ != kParallelExecutionMode)
dict.SetInteger("sequence_token", sequence_token_.ToInternalValue());
std::string tmp;
@@ -205,7 +208,7 @@ bool TaskTracker::WillPostTask(const Task* task) {
return true;
}
-bool TaskTracker::RunTask(const Task* task,
+bool TaskTracker::RunTask(std::unique_ptr<Task> task,
const SequenceToken& sequence_token) {
DCHECK(task);
DCHECK(sequence_token.IsValid());
@@ -213,6 +216,7 @@ bool TaskTracker::RunTask(const Task* task,
const TaskShutdownBehavior shutdown_behavior =
task->traits.shutdown_behavior();
const bool can_run_task = BeforeRunTask(shutdown_behavior);
+ const bool is_delayed = !task->delayed_run_time.is_null();
if (can_run_task) {
// All tasks run through here and the scheduler itself doesn't use
@@ -242,11 +246,11 @@ bool TaskTracker::RunTask(const Task* task,
TRACE_TASK_EXECUTION(kRunFunctionName, *task);
- const ExecutionMode execution_mode =
+ const char* const execution_mode =
task->single_thread_task_runner_ref
- ? ExecutionMode::SINGLE_THREADED
- : (task->sequenced_task_runner_ref ? ExecutionMode::SEQUENCED
- : ExecutionMode::PARALLEL);
+ ? kSingleThreadExecutionMode
+ : (task->sequenced_task_runner_ref ? kSequencedExecutionMode
+ : kParallelExecutionMode);
// TODO(gab): In a better world this would be tacked on as an extra arg
// to the trace event generated above. This is not possible however until
// http://crbug.com/652692 is resolved.
@@ -254,14 +258,13 @@ bool TaskTracker::RunTask(const Task* task,
MakeUnique<TaskTracingInfo>(task->traits, execution_mode,
sequence_token));
- debug::TaskAnnotator task_annotator;
- task_annotator.RunTask(kQueueFunctionName, *task);
+ PerformRunTask(std::move(task));
}
AfterRunTask(shutdown_behavior);
}
- if (task->delayed_run_time.is_null())
+ if (!is_delayed)
DecrementNumPendingUndelayedTasks();
return can_run_task;
@@ -280,6 +283,10 @@ void TaskTracker::SetHasShutdownStartedForTesting() {
state_->StartShutdown();
}
+void TaskTracker::PerformRunTask(std::unique_ptr<Task> task) {
+ debug::TaskAnnotator().RunTask(kQueueFunctionName, task.get());
+}
+
void TaskTracker::PerformShutdown() {
{
AutoSchedulerLock auto_lock(shutdown_lock_);
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index 93769f1751b..a5caf213985 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -57,7 +57,7 @@ class BASE_EXPORT TaskTracker {
// |sequence_token| is the token identifying the sequence from which |task|
// was extracted. Returns true if |task| ran. WillPostTask() must have allowed
// |task| to be posted before this is called.
- bool RunTask(const Task* task, const SequenceToken& sequence_token);
+ bool RunTask(std::unique_ptr<Task> task, const SequenceToken& sequence_token);
// Returns true once shutdown has started (Shutdown() has been called but
// might not have returned). Note: sequential consistency with the thread
@@ -73,6 +73,11 @@ class BASE_EXPORT TaskTracker {
// cannot be called after this.
void SetHasShutdownStartedForTesting();
+ protected:
+ // Runs |task|. An override is expected to call its parent's implementation
+ // but is free to perform extra work before and after doing so.
+ virtual void PerformRunTask(std::unique_ptr<Task> task);
+
private:
class State;
diff --git a/chromium/base/task_scheduler/task_tracker_posix.cc b/chromium/base/task_scheduler/task_tracker_posix.cc
new file mode 100644
index 00000000000..5c54f371949
--- /dev/null
+++ b/chromium/base/task_scheduler/task_tracker_posix.cc
@@ -0,0 +1,30 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker_posix.h"
+
+#include <utility>
+
+#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/logging.h"
+
+namespace base {
+namespace internal {
+
+TaskTrackerPosix::TaskTrackerPosix(
+ MessageLoopForIO* watch_file_descriptor_message_loop)
+ : watch_file_descriptor_message_loop_(watch_file_descriptor_message_loop) {
+ DCHECK(watch_file_descriptor_message_loop_);
+}
+
+TaskTrackerPosix::~TaskTrackerPosix() = default;
+
+void TaskTrackerPosix::PerformRunTask(std::unique_ptr<Task> task) {
+ FileDescriptorWatcher file_descriptor_watcher(
+ watch_file_descriptor_message_loop_);
+ TaskTracker::PerformRunTask(std::move(task));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/task_tracker_posix.h b/chromium/base/task_scheduler/task_tracker_posix.h
new file mode 100644
index 00000000000..f957e4c0daf
--- /dev/null
+++ b/chromium/base/task_scheduler/task_tracker_posix.h
@@ -0,0 +1,43 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+#define BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_tracker.h"
+
+namespace base {
+
+class MessageLoopForIO;
+
+namespace internal {
+
+struct Task;
+
+// A TaskTracker that instantiates a FileDescriptorWatcher in the scope in which
+// a task runs. Used on all POSIX platforms except NaCl SFI.
+class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
+ public:
+ // |watch_file_descriptor_message_loop| is used to setup FileDescriptorWatcher
+ // in the scope in which a Task runs.
+ TaskTrackerPosix(MessageLoopForIO* watch_file_descriptor_message_loop);
+ ~TaskTrackerPosix();
+
+ private:
+ // TaskTracker:
+ void PerformRunTask(std::unique_ptr<Task> task) override;
+
+ MessageLoopForIO* const watch_file_descriptor_message_loop_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskTrackerPosix);
+};
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TASK_TRACKER_POSIX_H_
diff --git a/chromium/base/task_scheduler/task_tracker_posix_unittest.cc b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
new file mode 100644
index 00000000000..b654a6d5438
--- /dev/null
+++ b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
@@ -0,0 +1,65 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_tracker_posix.h"
+
+#include <unistd.h>
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/files/file_descriptor_watcher_posix.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/run_loop.h"
+#include "base/sequence_token.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+// Verify that TaskTrackerPosix runs a Task it receives.
+TEST(TaskSchedulerTaskTrackerPosixTest, RunTask) {
+ MessageLoopForIO message_loop;
+ bool did_run = false;
+ auto task = MakeUnique<Task>(
+ FROM_HERE,
+ Bind([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
+ TaskTraits(), TimeDelta());
+ TaskTrackerPosix tracker(&message_loop);
+
+ EXPECT_TRUE(tracker.WillPostTask(task.get()));
+ EXPECT_TRUE(tracker.RunTask(std::move(task), SequenceToken::Create()));
+ EXPECT_TRUE(did_run);
+}
+
+// Verify that FileDescriptorWatcher::WatchReadable() can be called from a task
+// running in TaskTrackerPosix without a crash.
+TEST(TaskSchedulerTaskTrackerPosixTest, FileDescriptorWatcher) {
+ MessageLoopForIO message_loop;
+ int fds[2];
+ ASSERT_EQ(0, pipe(fds));
+ auto task = MakeUnique<Task>(
+ FROM_HERE, Bind(IgnoreResult(&FileDescriptorWatcher::WatchReadable),
+ fds[0], Bind(&DoNothing)),
+ TaskTraits(), TimeDelta());
+ TaskTrackerPosix tracker(&message_loop);
+
+ EXPECT_TRUE(tracker.WillPostTask(task.get()));
+ EXPECT_TRUE(tracker.RunTask(std::move(task), SequenceToken::Create()));
+
+ // Run the MessageLoop to allow the read watch to be registered and
+ // unregistered. This prevents a memory leak.
+ RunLoop().RunUntilIdle();
+
+ EXPECT_EQ(0, IGNORE_EINTR(close(fds[0])));
+ EXPECT_EQ(0, IGNORE_EINTR(close(fds[1])));
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 57dca1f3909..3a1ff789442 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -7,6 +7,7 @@
#include <stdint.h>
#include <memory>
+#include <utility>
#include <vector>
#include "base/bind.h"
@@ -77,7 +78,26 @@ class ThreadPostingAndRunningTask : public SimpleThread {
tracker_(tracker),
task_(task),
action_(action),
- expect_post_succeeds_(expect_post_succeeds) {}
+ expect_post_succeeds_(expect_post_succeeds) {
+ EXPECT_TRUE(task_);
+
+ // Ownership of the Task is required to run it.
+ EXPECT_NE(Action::RUN, action_);
+ EXPECT_NE(Action::WILL_POST_AND_RUN, action_);
+ }
+
+ ThreadPostingAndRunningTask(TaskTracker* tracker,
+ std::unique_ptr<Task> task,
+ Action action,
+ bool expect_post_succeeds)
+ : SimpleThread("ThreadPostingAndRunningTask"),
+ tracker_(tracker),
+ task_(task.get()),
+ owned_task_(std::move(task)),
+ action_(action),
+ expect_post_succeeds_(expect_post_succeeds) {
+ EXPECT_TRUE(task_);
+ }
private:
void Run() override {
@@ -88,12 +108,14 @@ class ThreadPostingAndRunningTask : public SimpleThread {
}
if (post_succeeded &&
(action_ == Action::RUN || action_ == Action::WILL_POST_AND_RUN)) {
- tracker_->RunTask(task_, SequenceToken::Create());
+ EXPECT_TRUE(owned_task_);
+ tracker_->RunTask(std::move(owned_task_), SequenceToken::Create());
}
}
TaskTracker* const tracker_;
Task* const task_;
+ std::unique_ptr<Task> owned_task_;
const Action action_;
const bool expect_post_succeeds_;
@@ -229,7 +251,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
// Run the task.
EXPECT_EQ(0U, NumTasksExecuted());
- EXPECT_TRUE(tracker_.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(task), SequenceToken::Create()));
EXPECT_EQ(1U, NumTasksExecuted());
// Shutdown() shouldn't block.
@@ -240,16 +262,17 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
// Create a task that will block until |event| is signaled.
WaitableEvent event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
- Task blocked_task(FROM_HERE, Bind(&WaitableEvent::Wait, Unretained(&event)),
- TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ auto blocked_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&WaitableEvent::Wait, Unretained(&event)),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
- EXPECT_TRUE(tracker_.WillPostTask(&blocked_task));
+ EXPECT_TRUE(tracker_.WillPostTask(blocked_task.get()));
// Run the task asynchronouly.
ThreadPostingAndRunningTask thread_running_task(
- &tracker_, &blocked_task, ThreadPostingAndRunningTask::Action::RUN,
- false);
+ &tracker_, std::move(blocked_task),
+ ThreadPostingAndRunningTask::Action::RUN, false);
thread_running_task.Start();
// Initiate shutdown while the task is running.
@@ -291,13 +314,14 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
// should be discarded.
EXPECT_EQ(0U, NumTasksExecuted());
const bool should_run = GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN;
- EXPECT_EQ(should_run, tracker_.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_EQ(should_run,
+ tracker_.RunTask(std::move(task), SequenceToken::Create()));
EXPECT_EQ(should_run ? 1U : 0U, NumTasksExecuted());
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
// Unblock shutdown by running the remaining BLOCK_SHUTDOWN task.
- EXPECT_TRUE(
- tracker_.RunTask(block_shutdown_task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(block_shutdown_task),
+ SequenceToken::Create()));
EXPECT_EQ(should_run ? 2U : 1U, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
@@ -315,7 +339,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
// Run the task to unblock shutdown.
- EXPECT_TRUE(tracker_.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(task), SequenceToken::Create()));
EXPECT_EQ(1U, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
@@ -326,7 +350,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
// The task shouldn't be allowed to run after shutdown.
- EXPECT_FALSE(tracker_.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_FALSE(tracker_.RunTask(std::move(task), SequenceToken::Create()));
EXPECT_EQ(0U, NumTasksExecuted());
}
}
@@ -349,7 +373,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
// Run the BLOCK_SHUTDOWN task.
EXPECT_EQ(0U, NumTasksExecuted());
- EXPECT_TRUE(tracker_.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(task), SequenceToken::Create()));
EXPECT_EQ(1U, NumTasksExecuted());
} else {
// It shouldn't be allowed to post a non BLOCK_SHUTDOWN task.
@@ -361,8 +385,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
// Unblock shutdown by running |block_shutdown_task|.
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
- EXPECT_TRUE(
- tracker_.RunTask(block_shutdown_task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(block_shutdown_task),
+ SequenceToken::Create()));
EXPECT_EQ(GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN ? 2U : 1U,
NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
@@ -400,10 +424,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
// Running the task should fail iff the task isn't allowed to use singletons.
if (can_use_singletons) {
- EXPECT_TRUE(tracker.RunTask(task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker.RunTask(std::move(task), SequenceToken::Create()));
} else {
EXPECT_DCHECK_DEATH(
- { tracker.RunTask(task.get(), SequenceToken::Create()); });
+ { tracker.RunTask(std::move(task), SequenceToken::Create()); });
}
}
@@ -418,7 +442,8 @@ static void RunTaskRunnerHandleVerificationTask(
EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
- EXPECT_TRUE(tracker->RunTask(verify_task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(
+ tracker->RunTask(std::move(verify_task), SequenceToken::Create()));
// TaskRunnerHandle state is reset outside of task's scope.
EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
@@ -498,10 +523,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingDelayedTask) {
}
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
- const Task undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&undelayed_task);
+ auto undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(undelayed_task.get());
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -509,15 +534,15 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Flush() should return after the undelayed task runs.
- tracker_.RunTask(&undelayed_task, SequenceToken::Create());
+ tracker_.RunTask(std::move(undelayed_task), SequenceToken::Create());
WAIT_FOR_ASYNC_FLUSH_RETURNED();
}
TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
- const Task undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&undelayed_task);
+ auto undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(undelayed_task.get());
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -525,33 +550,33 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Simulate posting another undelayed task.
- const Task other_undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&other_undelayed_task);
+ auto other_undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(other_undelayed_task.get());
// Run the first undelayed task.
- tracker_.RunTask(&undelayed_task, SequenceToken::Create());
+ tracker_.RunTask(std::move(undelayed_task), SequenceToken::Create());
// Flush() shouldn't return before the second undelayed task runs.
PlatformThread::Sleep(TestTimeouts::tiny_timeout());
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Flush() should return after the second undelayed task runs.
- tracker_.RunTask(&other_undelayed_task, SequenceToken::Create());
+ tracker_.RunTask(std::move(other_undelayed_task), SequenceToken::Create());
WAIT_FOR_ASYNC_FLUSH_RETURNED();
}
TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
// Simulate posting a delayed and an undelayed task.
- const Task delayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta::FromDays(1));
- tracker_.WillPostTask(&delayed_task);
- const Task undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&undelayed_task);
+ auto delayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta::FromDays(1));
+ tracker_.WillPostTask(delayed_task.get());
+ auto undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(undelayed_task.get());
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -559,7 +584,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Run the delayed task.
- tracker_.RunTask(&delayed_task, SequenceToken::Create());
+ tracker_.RunTask(std::move(delayed_task), SequenceToken::Create());
// Flush() shouldn't return since there is still a pending undelayed
// task.
@@ -567,7 +592,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Run the undelayed task.
- tracker_.RunTask(&undelayed_task, SequenceToken::Create());
+ tracker_.RunTask(std::move(undelayed_task), SequenceToken::Create());
// Flush() should now return.
WAIT_FOR_ASYNC_FLUSH_RETURNED();
@@ -578,10 +603,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdown) {
return;
// Simulate posting a task.
- const Task undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&undelayed_task);
+ auto undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(undelayed_task.get());
// Shutdown() should return immediately since there are no pending
// BLOCK_SHUTDOWN tasks.
@@ -597,10 +622,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlush) {
return;
// Simulate posting a task.
- const Task undelayed_task(FROM_HERE, Bind(&DoNothing),
- TaskTraits().WithShutdownBehavior(GetParam()),
- TimeDelta());
- tracker_.WillPostTask(&undelayed_task);
+ auto undelayed_task = base::MakeUnique<Task>(
+ FROM_HERE, Bind(&DoNothing),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ tracker_.WillPostTask(undelayed_task.get());
// Flush() shouldn't return before the undelayed task runs or
// shutdown completes.
@@ -641,36 +666,33 @@ void ExpectSequenceToken(SequenceToken sequence_token) {
// when a Task runs.
TEST_F(TaskSchedulerTaskTrackerTest, CurrentSequenceToken) {
const SequenceToken sequence_token(SequenceToken::Create());
- Task task(FROM_HERE, Bind(&ExpectSequenceToken, sequence_token), TaskTraits(),
- TimeDelta());
- tracker_.WillPostTask(&task);
+ auto task = base::MakeUnique<Task>(FROM_HERE,
+ Bind(&ExpectSequenceToken, sequence_token),
+ TaskTraits(), TimeDelta());
+ tracker_.WillPostTask(task.get());
EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
- EXPECT_TRUE(tracker_.RunTask(&task, sequence_token));
+ EXPECT_TRUE(tracker_.RunTask(std::move(task), sequence_token));
EXPECT_FALSE(SequenceToken::GetForCurrentThread().IsValid());
}
TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunBeforeShutdown) {
// Post and run tasks asynchronously.
- std::vector<std::unique_ptr<Task>> tasks;
std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> threads;
for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
- tasks.push_back(CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
threads.back()->Start();
}
@@ -720,9 +742,9 @@ TEST_F(TaskSchedulerTaskTrackerTest,
// Run tasks asynchronously.
std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> run_threads;
- for (const auto& task : tasks) {
+ for (auto& task : tasks) {
run_threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, task.get(), ThreadPostingAndRunningTask::Action::RUN,
+ &tracker_, std::move(task), ThreadPostingAndRunningTask::Action::RUN,
false));
run_threads.back()->Start();
}
@@ -747,25 +769,21 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
CallShutdownAsync();
// Post and run tasks asynchronously.
- std::vector<std::unique_ptr<Task>> tasks;
std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> threads;
for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
- tasks.push_back(CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false));
threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, false));
threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
threads.push_back(MakeUnique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN),
ThreadPostingAndRunningTask::Action::WILL_POST_AND_RUN, true));
threads.back()->Start();
}
@@ -780,8 +798,8 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
VERIFY_ASYNC_SHUTDOWN_IN_PROGRESS();
// Unblock shutdown by running |block_shutdown_task|.
- EXPECT_TRUE(
- tracker_.RunTask(block_shutdown_task.get(), SequenceToken::Create()));
+ EXPECT_TRUE(tracker_.RunTask(std::move(block_shutdown_task),
+ SequenceToken::Create()));
EXPECT_EQ(kLoadTestNumIterations + 1, NumTasksExecuted());
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
diff --git a/chromium/base/task_scheduler/task_traits.cc b/chromium/base/task_scheduler/task_traits.cc
index 5bd868a330e..10cbe68efcf 100644
--- a/chromium/base/task_scheduler/task_traits.cc
+++ b/chromium/base/task_scheduler/task_traits.cc
@@ -65,19 +65,6 @@ const char* TaskShutdownBehaviorToString(
return "";
}
-const char* ExecutionModeToString(ExecutionMode execution_mode) {
- switch (execution_mode) {
- case ExecutionMode::PARALLEL:
- return "PARALLEL";
- case ExecutionMode::SEQUENCED:
- return "SEQUENCED";
- case ExecutionMode::SINGLE_THREADED:
- return "SINGLE_THREADED";
- }
- NOTREACHED();
- return "";
-}
-
std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
os << TaskPriorityToString(task_priority);
return os;
@@ -89,10 +76,4 @@ std::ostream& operator<<(std::ostream& os,
return os;
}
-std::ostream& operator<<(std::ostream& os,
- const ExecutionMode& execution_mode) {
- os << ExecutionModeToString(execution_mode);
- return os;
-}
-
} // namespace base
diff --git a/chromium/base/task_scheduler/task_traits.h b/chromium/base/task_scheduler/task_traits.h
index 9fee22427be..93b6d71e9c7 100644
--- a/chromium/base/task_scheduler/task_traits.h
+++ b/chromium/base/task_scheduler/task_traits.h
@@ -113,25 +113,11 @@ class BASE_EXPORT TaskTraits {
TaskShutdownBehavior shutdown_behavior_;
};
-// Describes how tasks are executed by a task runner.
-enum class ExecutionMode {
- // Can execute multiple tasks at a time in any order.
- PARALLEL,
-
- // Executes one task at a time in posting order. The sequence’s priority is
- // equivalent to the highest priority pending task in the sequence.
- SEQUENCED,
-
- // Executes one task at a time on a single thread in posting order.
- SINGLE_THREADED,
-};
-
// Returns string literals for the enums defined in this file. These methods
// should only be used for tracing and debugging.
BASE_EXPORT const char* TaskPriorityToString(TaskPriority task_priority);
BASE_EXPORT const char* TaskShutdownBehaviorToString(
TaskShutdownBehavior task_priority);
-BASE_EXPORT const char* ExecutionModeToString(ExecutionMode task_priority);
// Stream operators so that the enums defined in this file can be used in
// DCHECK and EXPECT statements.
@@ -140,8 +126,6 @@ BASE_EXPORT std::ostream& operator<<(std::ostream& os,
BASE_EXPORT std::ostream& operator<<(
std::ostream& os,
const TaskShutdownBehavior& shutdown_behavior);
-BASE_EXPORT std::ostream& operator<<(std::ostream& os,
- const ExecutionMode& execution_mode);
} // namespace base
diff --git a/chromium/base/task_scheduler/test_task_factory.h b/chromium/base/task_scheduler/test_task_factory.h
index 9ed15a1bac2..5bdd561e197 100644
--- a/chromium/base/task_scheduler/test_task_factory.h
+++ b/chromium/base/task_scheduler/test_task_factory.h
@@ -16,12 +16,10 @@
#include "base/synchronization/lock.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
+#include "base/task_scheduler/test_utils.h"
#include "base/threading/thread_checker_impl.h"
namespace base {
-
-class WaitableEvent;
-
namespace internal {
namespace test {
diff --git a/chromium/base/task_scheduler/test_utils.h b/chromium/base/task_scheduler/test_utils.h
new file mode 100644
index 00000000000..dbd1227f52b
--- /dev/null
+++ b/chromium/base/task_scheduler/test_utils.h
@@ -0,0 +1,20 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
+#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
+
+namespace base {
+namespace internal {
+namespace test {
+
+// An enumeration of possible task scheduler TaskRunner types. Used to
+// parametrize relevant task_scheduler tests.
+enum class ExecutionMode { PARALLEL, SEQUENCED, SINGLE_THREADED };
+
+} // namespace test
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_TEST_UTILS_H_
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 788494ff6d8..89b7028c117 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -75,6 +75,8 @@ static_library("test_support") {
"scoped_feature_list.h",
"scoped_locale.cc",
"scoped_locale.h",
+ "scoped_mock_time_message_loop_task_runner.cc",
+ "scoped_mock_time_message_loop_task_runner.h",
"scoped_path_override.cc",
"scoped_path_override.h",
"sequenced_task_runner_test_template.cc",
diff --git a/chromium/base/third_party/dmg_fp/dtoa.cc b/chromium/base/third_party/dmg_fp/dtoa.cc
index be560bc24d6..19dbdebd63c 100644
--- a/chromium/base/third_party/dmg_fp/dtoa.cc
+++ b/chromium/base/third_party/dmg_fp/dtoa.cc
@@ -2675,9 +2675,9 @@ strtod
L = c - '0';
s1 = s;
while((c = *++s) >= '0' && c <= '9') {
- L = 10*L + c - '0';
- if (L > DBL_MAX_10_EXP)
- break;
+ if (L < (INT_MAX - 10) / 10) {
+ L = 10*L + (c - '0');
+ }
}
if (s - s1 > 8 || L > 19999)
/* Avoid confusion from exponents
diff --git a/chromium/base/third_party/dmg_fp/exp_length.patch b/chromium/base/third_party/dmg_fp/exp_length.patch
index 65d033a403e..278ec17b289 100644
--- a/chromium/base/third_party/dmg_fp/exp_length.patch
+++ b/chromium/base/third_party/dmg_fp/exp_length.patch
@@ -1,16 +1,17 @@
diff --git a/base/third_party/dmg_fp/dtoa.cc b/base/third_party/dmg_fp/dtoa.cc
-index 502c16c..f3d793e 100644
+index c0a51c2..ab4e056 100644
--- a/base/third_party/dmg_fp/dtoa.cc
+++ b/base/third_party/dmg_fp/dtoa.cc
-@@ -2597,8 +2597,11 @@ strtod
+@@ -2674,8 +2674,11 @@ strtod
if (c > '0' && c <= '9') {
L = c - '0';
s1 = s;
- while((c = *++s) >= '0' && c <= '9')
+- L = 10*L + c - '0';
+ while((c = *++s) >= '0' && c <= '9') {
- L = 10*L + c - '0';
-+ if (L > DBL_MAX_10_EXP)
-+ break;
++ if (L < (INT_MAX - 10) / 10) {
++ L = 10*L + (c - '0');
++ }
+ }
if (s - s1 > 8 || L > 19999)
/* Avoid confusion from exponents
diff --git a/chromium/base/third_party/libevent/http-internal.h b/chromium/base/third_party/libevent/http-internal.h
index 9cd03cdd2bc..1c4c3db0538 100644
--- a/chromium/base/third_party/libevent/http-internal.h
+++ b/chromium/base/third_party/libevent/http-internal.h
@@ -31,7 +31,6 @@ enum evhttp_connection_error {
};
struct evbuffer;
-struct addrinfo;
struct evhttp_request;
/* A stupid connection object - maybe make this a bufferevent later */
diff --git a/chromium/base/threading/platform_thread_linux.cc b/chromium/base/threading/platform_thread_linux.cc
index 299ae2b9277..92fbda5ee1d 100644
--- a/chromium/base/threading/platform_thread_linux.cc
+++ b/chromium/base/threading/platform_thread_linux.cc
@@ -29,36 +29,57 @@
namespace base {
namespace {
#if !defined(OS_NACL)
-const FilePath::CharType kCpusetDirectory[] =
- FILE_PATH_LITERAL("/sys/fs/cgroup/cpuset/chrome");
+const FilePath::CharType kCgroupDirectory[] =
+ FILE_PATH_LITERAL("/sys/fs/cgroup");
-FilePath ThreadPriorityToCpusetDirectory(ThreadPriority priority) {
- FilePath cpuset_filepath(kCpusetDirectory);
+FilePath ThreadPriorityToCgroupDirectory(const FilePath& cgroup_filepath,
+ ThreadPriority priority) {
switch (priority) {
case ThreadPriority::NORMAL:
- return cpuset_filepath;
+ return cgroup_filepath;
case ThreadPriority::BACKGROUND:
- return cpuset_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
+ return cgroup_filepath.Append(FILE_PATH_LITERAL("non-urgent"));
case ThreadPriority::DISPLAY:
case ThreadPriority::REALTIME_AUDIO:
- return cpuset_filepath.Append(FILE_PATH_LITERAL("urgent"));
+ return cgroup_filepath.Append(FILE_PATH_LITERAL("urgent"));
}
NOTREACHED();
return FilePath();
}
-void SetThreadCpuset(PlatformThreadId thread_id,
- const FilePath& cpuset_directory) {
- // Silently ignore request if cpuset directory doesn't exist.
- if (!DirectoryExists(cpuset_directory))
- return;
- FilePath tasks_filepath = cpuset_directory.Append(FILE_PATH_LITERAL("tasks"));
+void SetThreadCgroup(PlatformThreadId thread_id,
+ const FilePath& cgroup_directory) {
+ FilePath tasks_filepath = cgroup_directory.Append(FILE_PATH_LITERAL("tasks"));
std::string tid = IntToString(thread_id);
int bytes_written = WriteFile(tasks_filepath, tid.c_str(), tid.size());
if (bytes_written != static_cast<int>(tid.size())) {
DVLOG(1) << "Failed to add " << tid << " to " << tasks_filepath.value();
}
}
+
+void SetThreadCgroupForThreadPriority(PlatformThreadId thread_id,
+ const FilePath& cgroup_filepath,
+ ThreadPriority priority) {
+ // Append "chrome" suffix.
+ FilePath cgroup_directory = ThreadPriorityToCgroupDirectory(
+ cgroup_filepath.Append(FILE_PATH_LITERAL("chrome")), priority);
+
+ // Silently ignore request if cgroup directory doesn't exist.
+ if (!DirectoryExists(cgroup_directory))
+ return;
+
+ SetThreadCgroup(thread_id, cgroup_directory);
+}
+
+void SetThreadCgroupsForThreadPriority(PlatformThreadId thread_id,
+ ThreadPriority priority) {
+ FilePath cgroup_filepath(kCgroupDirectory);
+ SetThreadCgroupForThreadPriority(
+ thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("cpuset")), priority);
+ SetThreadCgroupForThreadPriority(
+ thread_id, cgroup_filepath.Append(FILE_PATH_LITERAL("schedtune")),
+ priority);
+}
#endif
} // namespace
@@ -79,8 +100,7 @@ const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
#if !defined(OS_NACL)
- FilePath cpuset_directory = ThreadPriorityToCpusetDirectory(priority);
- SetThreadCpuset(PlatformThread::CurrentId(), cpuset_directory);
+ SetThreadCgroupsForThreadPriority(PlatformThread::CurrentId(), priority);
return priority == ThreadPriority::REALTIME_AUDIO &&
pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
#else
@@ -139,8 +159,7 @@ void PlatformThread::SetThreadPriority(PlatformThreadId thread_id,
// priority.
CHECK_NE(thread_id, getpid());
- FilePath cpuset_directory = ThreadPriorityToCpusetDirectory(priority);
- SetThreadCpuset(thread_id, cpuset_directory);
+ SetThreadCgroupsForThreadPriority(thread_id, priority);
const int nice_setting = internal::ThreadPriorityToNiceValue(priority);
if (setpriority(PRIO_PROCESS, thread_id, nice_setting)) {
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index d856c3a70b7..360fb4a537f 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -30,12 +30,11 @@
#include "base/task_scheduler/post_task.h"
#include "base/task_scheduler/task_scheduler.h"
#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_restrictions.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/trace_event.h"
#include "base/tracked_objects.h"
#include "base/tracking_info.h"
@@ -849,9 +848,9 @@ SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner(
// same shutdown behavior.
if (!task_runner) {
- ExecutionMode execution_mode =
- sequence_token_id ? ExecutionMode::SEQUENCED : ExecutionMode::PARALLEL;
- task_runner = CreateTaskRunnerWithTraits(traits, execution_mode);
+ task_runner = sequence_token_id
+ ? CreateSequencedTaskRunnerWithTraits(traits)
+ : CreateTaskRunnerWithTraits(traits);
}
return task_runner;
@@ -863,8 +862,7 @@ bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
if (!runs_tasks_on_verifier_) {
runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits(
- TaskTraits().WithFileIO().WithPriority(task_priority_),
- ExecutionMode::PARALLEL);
+ TaskTraits().WithFileIO().WithPriority(task_priority_));
}
return runs_tasks_on_verifier_->RunsTasksOnCurrentThread();
} else {
@@ -992,14 +990,11 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
GetWorkStatus status =
GetWork(&task, &wait_time, &delete_these_outside_lock);
if (status == GET_WORK_FOUND) {
- TRACE_EVENT_WITH_FLOW2(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- "SequencedWorkerPool::Inner::ThreadLoop",
+ TRACE_TASK_EXECUTION("SequencedWorkerPool::Inner::ThreadLoop", task);
+ TRACE_EVENT_WITH_FLOW0(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ "SequencedWorkerPool::Inner::PostTask",
TRACE_ID_MANGLE(GetTaskTraceID(task, static_cast<void*>(this))),
- TRACE_EVENT_FLAG_FLOW_IN,
- "src_file", task.posted_from.file_name(),
- "src_func", task.posted_from.function_name());
- TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION task_event(
- task.posted_from.file_name());
+ TRACE_EVENT_FLAG_FLOW_IN);
int new_thread_id = WillRunWorkerTask(task);
{
AutoUnlock unlock(lock_);
@@ -1467,7 +1462,7 @@ void SequencedWorkerPool::ResetRedirectToTaskSchedulerForProcessForTesting() {
SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
const std::string& thread_name_prefix,
base::TaskPriority task_priority)
- : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+ : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
inner_(new Inner(this,
max_threads,
thread_name_prefix,
@@ -1478,7 +1473,7 @@ SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
const std::string& thread_name_prefix,
base::TaskPriority task_priority,
TestingObserver* observer)
- : constructor_task_runner_(ThreadTaskRunnerHandle::Get()),
+ : constructor_task_runner_(SequencedTaskRunnerHandle::Get()),
inner_(new Inner(this,
max_threads,
thread_name_prefix,
@@ -1614,7 +1609,7 @@ void SequencedWorkerPool::SignalHasWorkForTesting() {
}
void SequencedWorkerPool::Shutdown(int max_new_blocking_tasks_after_shutdown) {
- DCHECK(constructor_task_runner_->BelongsToCurrentThread());
+ DCHECK(constructor_task_runner_->RunsTasksOnCurrentThread());
inner_->Shutdown(max_new_blocking_tasks_after_shutdown);
}
diff --git a/chromium/base/threading/sequenced_worker_pool.h b/chromium/base/threading/sequenced_worker_pool.h
index f06a9e523cc..252d511d3f2 100644
--- a/chromium/base/threading/sequenced_worker_pool.h
+++ b/chromium/base/threading/sequenced_worker_pool.h
@@ -13,9 +13,9 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
@@ -25,12 +25,10 @@ class Location;
namespace base {
-class SingleThreadTaskRunner;
+class SequencedTaskRunner;
template <class T> class DeleteHelper;
-class SequencedTaskRunner;
-
// A worker thread pool that enforces ordering between sets of tasks. It also
// allows you to specify what should happen to your tasks on shutdown.
//
@@ -232,7 +230,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// delay are posted with SKIP_ON_SHUTDOWN behavior and tasks with zero delay
// are posted with BLOCK_SHUTDOWN behavior.
scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunner(
- SequenceToken token);
+ SequenceToken token) WARN_UNUSED_RESULT;
// Returns a SequencedTaskRunner wrapper which posts to this
// SequencedWorkerPool using the given sequence token. Tasks with nonzero
@@ -240,14 +238,14 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// are posted with the given shutdown behavior.
scoped_refptr<SequencedTaskRunner> GetSequencedTaskRunnerWithShutdownBehavior(
SequenceToken token,
- WorkerShutdown shutdown_behavior);
+ WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
// Returns a TaskRunner wrapper which posts to this SequencedWorkerPool using
// the given shutdown behavior. Tasks with nonzero delay are posted with
// SKIP_ON_SHUTDOWN behavior and tasks with zero delay are posted with the
// given shutdown behavior.
scoped_refptr<TaskRunner> GetTaskRunnerWithShutdownBehavior(
- WorkerShutdown shutdown_behavior);
+ WorkerShutdown shutdown_behavior) WARN_UNUSED_RESULT;
// Posts the given task for execution in the worker pool. Tasks posted with
// this function will execute in an unspecified order on a background thread.
@@ -398,7 +396,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// sequence_token.
bool IsRunningSequenceOnCurrentThread(SequenceToken sequence_token) const;
- const scoped_refptr<SingleThreadTaskRunner> constructor_task_runner_;
+ const scoped_refptr<SequencedTaskRunner> constructor_task_runner_;
// Avoid pulling in too many headers by putting (almost) everything
// into |inner_|.
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index 5782af7e767..d2007f8be52 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -248,8 +248,9 @@ class SequencedWorkerPoolTest
std::vector<SchedulerWorkerPoolParams> worker_pool_params;
worker_pool_params.emplace_back(
"SchedulerWorkerPoolName", ThreadPriority::NORMAL,
- SchedulerWorkerPoolParams::IORestriction::ALLOWED, kNumWorkerThreads,
- TimeDelta::Max());
+ SchedulerWorkerPoolParams::IORestriction::ALLOWED,
+ SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY,
+ kNumWorkerThreads, TimeDelta::Max());
TaskScheduler::CreateAndSetDefaultTaskScheduler(
std::move(worker_pool_params),
base::Bind([](const TaskTraits&) -> size_t { return 0U; }));
diff --git a/chromium/base/threading/thread.h b/chromium/base/threading/thread.h
index b8136db8829..f103e3d2288 100644
--- a/chromium/base/threading/thread.h
+++ b/chromium/base/threading/thread.h
@@ -244,6 +244,10 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// Bind this Thread to an existing MessageLoop instead of starting a new one.
void SetMessageLoop(MessageLoop* message_loop);
+ bool using_external_message_loop() const {
+ return using_external_message_loop_;
+ }
+
private:
#if defined(OS_WIN)
enum ComStatus {
@@ -293,7 +297,9 @@ class BASE_EXPORT Thread : PlatformThread::Delegate {
// True only if |message_loop_| was externally provided by |SetMessageLoop()|
// in which case this Thread has no underlying |thread_| and should merely
- // drop |message_loop_| on Stop().
+ // drop |message_loop_| on Stop(). In that event, this remains true after
+ // Stop() was invoked so that subclasses can use this state to build their own
+ // cleanup logic as required.
bool using_external_message_loop_ = false;
// Stores Options::timer_slack_ until the message loop has been bound to
diff --git a/chromium/base/threading/thread_local.h b/chromium/base/threading/thread_local.h
index f40420cd2f8..cad9add3a9c 100644
--- a/chromium/base/threading/thread_local.h
+++ b/chromium/base/threading/thread_local.h
@@ -2,35 +2,34 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// WARNING: Thread local storage is a bit tricky to get right. Please make
-// sure that this is really the proper solution for what you're trying to
-// achieve. Don't prematurely optimize, most likely you can just use a Lock.
+// WARNING: Thread local storage is a bit tricky to get right. Please make sure
+// that this is really the proper solution for what you're trying to achieve.
+// Don't prematurely optimize, most likely you can just use a Lock.
//
-// These classes implement a wrapper around the platform's TLS storage
-// mechanism. On construction, they will allocate a TLS slot, and free the
-// TLS slot on destruction. No memory management (creation or destruction) is
-// handled. This means for uses of ThreadLocalPointer, you must correctly
-// manage the memory yourself, these classes will not destroy the pointer for
-// you. There are no at-thread-exit actions taken by these classes.
+// These classes implement a wrapper around ThreadLocalStorage::Slot. On
+// construction, they will allocate a TLS slot, and free the TLS slot on
+// destruction. No memory management (creation or destruction) is handled. This
+// means for uses of ThreadLocalPointer, you must correctly manage the memory
+// yourself, these classes will not destroy the pointer for you. There are no
+// at-thread-exit actions taken by these classes.
//
-// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
-// destruction, so memory management must be handled elsewhere. The first call
-// to Get() on a thread will return NULL. You can update the pointer with a
-// call to Set().
+// ThreadLocalPointer<Type> wraps a Type*. It performs no creation or
+// destruction, so memory management must be handled elsewhere. The first call
+// to Get() on a thread will return NULL. You can update the pointer with a call
+// to Set().
//
-// ThreadLocalBoolean wraps a bool. It will default to false if it has never
+// ThreadLocalBoolean wraps a bool. It will default to false if it has never
// been set otherwise with Set().
//
-// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
-// once it has been created. If you want to dynamically create an instance,
-// you must of course properly deal with safety and race conditions. This
-// means a function-level static initializer is generally inappropiate.
+// Thread Safety: An instance of ThreadLocalStorage is completely thread safe
+// once it has been created. If you want to dynamically create an instance, you
+// must of course properly deal with safety and race conditions. This means a
+// function-level static initializer is generally inappropiate.
//
-// In Android, the system TLS is limited, the implementation is backed with
-// ThreadLocalStorage.
+// In Android, the system TLS is limited.
//
// Example usage:
-// // My class is logically attached to a single thread. We cache a pointer
+// // My class is logically attached to a single thread. We cache a pointer
// // on the thread it was created on, so we can implement current().
// MyClass::MyClass() {
// DCHECK(Singleton<ThreadLocalPointer<MyClass> >::get()->Get() == NULL);
@@ -51,76 +50,42 @@
#ifndef BASE_THREADING_THREAD_LOCAL_H_
#define BASE_THREADING_THREAD_LOCAL_H_
-#include "base/base_export.h"
#include "base/macros.h"
#include "base/threading/thread_local_storage.h"
-#include "build/build_config.h"
-
-#if defined(OS_POSIX)
-#include <pthread.h>
-#endif
namespace base {
-namespace internal {
-
-// Helper functions that abstract the cross-platform APIs. Do not use directly.
-struct BASE_EXPORT ThreadLocalPlatform {
-#if defined(OS_WIN)
- typedef unsigned long SlotType;
-#elif defined(OS_ANDROID)
- typedef ThreadLocalStorage::StaticSlot SlotType;
-#elif defined(OS_POSIX)
- typedef pthread_key_t SlotType;
-#endif
-
- static void AllocateSlot(SlotType* slot);
- static void FreeSlot(SlotType slot);
- static void* GetValueFromSlot(SlotType slot);
- static void SetValueInSlot(SlotType slot, void* value);
-};
-
-} // namespace internal
template <typename Type>
class ThreadLocalPointer {
public:
- ThreadLocalPointer() : slot_() {
- internal::ThreadLocalPlatform::AllocateSlot(&slot_);
- }
-
- ~ThreadLocalPointer() {
- internal::ThreadLocalPlatform::FreeSlot(slot_);
- }
+ ThreadLocalPointer() = default;
+ ~ThreadLocalPointer() = default;
Type* Get() {
- return static_cast<Type*>(
- internal::ThreadLocalPlatform::GetValueFromSlot(slot_));
+ return static_cast<Type*>(slot_.Get());
}
void Set(Type* ptr) {
- internal::ThreadLocalPlatform::SetValueInSlot(
- slot_, const_cast<void*>(static_cast<const void*>(ptr)));
+ slot_.Set(const_cast<void*>(static_cast<const void*>(ptr)));
}
private:
- typedef internal::ThreadLocalPlatform::SlotType SlotType;
-
- SlotType slot_;
+ ThreadLocalStorage::Slot slot_;
DISALLOW_COPY_AND_ASSIGN(ThreadLocalPointer<Type>);
};
class ThreadLocalBoolean {
public:
- ThreadLocalBoolean() {}
- ~ThreadLocalBoolean() {}
+ ThreadLocalBoolean() = default;
+ ~ThreadLocalBoolean() = default;
bool Get() {
- return tlp_.Get() != NULL;
+ return tlp_.Get() != nullptr;
}
void Set(bool val) {
- tlp_.Set(val ? this : NULL);
+ tlp_.Set(val ? this : nullptr);
}
private:
diff --git a/chromium/base/threading/thread_local_android.cc b/chromium/base/threading/thread_local_android.cc
deleted file mode 100644
index 813dd78b5ec..00000000000
--- a/chromium/base/threading/thread_local_android.cc
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/threading/thread_local.h"
-
-namespace base {
-namespace internal {
-
-// static
-void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
- slot->Initialize(nullptr);
-}
-
-// static
-void ThreadLocalPlatform::FreeSlot(SlotType slot) {
- slot.Free();
-}
-
-// static
-void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
- return slot.Get();
-}
-
-// static
-void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
- slot.Set(value);
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/threading/thread_local_posix.cc b/chromium/base/threading/thread_local_posix.cc
deleted file mode 100644
index 8bc46ad1902..00000000000
--- a/chromium/base/threading/thread_local_posix.cc
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/threading/thread_local.h"
-
-#include <pthread.h>
-
-#include "base/logging.h"
-#include "build/build_config.h"
-
-#if !defined(OS_ANDROID)
-
-namespace base {
-namespace internal {
-
-// static
-void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
- int error = pthread_key_create(slot, NULL);
- CHECK_EQ(error, 0);
-}
-
-// static
-void ThreadLocalPlatform::FreeSlot(SlotType slot) {
- int error = pthread_key_delete(slot);
- DCHECK_EQ(0, error);
-}
-
-// static
-void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
- return pthread_getspecific(slot);
-}
-
-// static
-void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
- int error = pthread_setspecific(slot, value);
- DCHECK_EQ(error, 0);
-}
-
-} // namespace internal
-} // namespace base
-
-#endif // !defined(OS_ANDROID)
diff --git a/chromium/base/threading/thread_local_storage.cc b/chromium/base/threading/thread_local_storage.cc
index 0ef31f74050..15a1d5e2dbb 100644
--- a/chromium/base/threading/thread_local_storage.cc
+++ b/chromium/base/threading/thread_local_storage.cc
@@ -5,11 +5,59 @@
#include "base/threading/thread_local_storage.h"
#include "base/atomicops.h"
+#include "base/lazy_instance.h"
#include "base/logging.h"
+#include "base/synchronization/lock.h"
#include "build/build_config.h"
using base::internal::PlatformThreadLocalStorage;
+// Chrome Thread Local Storage (TLS)
+//
+// This TLS system allows Chrome to use a single OS level TLS slot process-wide,
+// and allows us to control the slot limits instead of being at the mercy of the
+// platform. To do this, Chrome TLS replicates an array commonly found in the OS
+// thread metadata.
+//
+// Overview:
+//
+// OS TLS Slots Per-Thread Per-Process Global
+// ...
+// [] Chrome TLS Array Chrome TLS Metadata
+// [] ----------> [][][][][ ][][][][] [][][][][ ][][][][]
+// [] | |
+// ... V V
+// Metadata Version Slot Information
+// Your Data!
+//
+// Using a single OS TLS slot, Chrome TLS allocates an array on demand for the
+// lifetime of each thread that requests Chrome TLS data. Each per-thread TLS
+// array matches the length of the per-process global metadata array.
+//
+// A per-process global TLS metadata array tracks information about each item in
+// the per-thread array:
+// * Status: Tracks if the slot is allocated or free to assign.
+// * Destructor: An optional destructor to call on thread destruction for that
+// specific slot.
+// * Version: Tracks the current version of the TLS slot. Each TLS slot
+// allocation is associated with a unique version number.
+//
+// Most OS TLS APIs guarantee that a newly allocated TLS slot is
+// initialized to 0 for all threads. The Chrome TLS system provides
+// this guarantee by tracking the version for each TLS slot here
+// on each per-thread Chrome TLS array entry. Threads that access
+// a slot with a mismatched version will receive 0 as their value.
+// The metadata version is incremented when the client frees a
+// slot. The per-thread metadata version is updated when a client
+// writes to the slot. This scheme allows for constant time
+// invalidation and avoids the need to iterate through each Chrome
+// TLS array to mark the slot as zero.
+//
+// Just like an OS TLS API, clients of the Chrome TLS are responsible for
+// managing any necessary lifetime of the data in their slots. The only
+// convenience provided is automatic destruction when a thread ends. If a client
+// frees a slot, that client is responsible for destroying the data in the slot.
+
namespace {
// In order to make TLS destructors work, we need to keep around a function
// pointer to the destructor for each slot. We keep this array of pointers in a
@@ -18,37 +66,39 @@ namespace {
// hold a pointer to a per-thread array (table) of slots that we allocate to
// Chromium consumers.
-// g_native_tls_key is the one native TLS that we use. It stores our table.
+// g_native_tls_key is the one native TLS that we use. It stores our table.
base::subtle::Atomic32 g_native_tls_key =
PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES;
-// g_last_used_tls_key is the high-water-mark of allocated thread local storage.
-// Each allocation is an index into our g_tls_destructors[]. Each such index is
-// assigned to the instance variable slot_ in a ThreadLocalStorage::Slot
-// instance. We reserve the value slot_ == 0 to indicate that the corresponding
-// instance of ThreadLocalStorage::Slot has been freed (i.e., destructor called,
-// etc.). This reserved use of 0 is then stated as the initial value of
-// g_last_used_tls_key, so that the first issued index will be 1.
-base::subtle::Atomic32 g_last_used_tls_key = 0;
+// The maximum number of slots in our thread local storage stack.
+constexpr int kThreadLocalStorageSize = 256;
+constexpr int kInvalidSlotValue = -1;
+
+enum TlsStatus {
+ FREE,
+ IN_USE,
+};
+
+struct TlsMetadata {
+ TlsStatus status;
+ base::ThreadLocalStorage::TLSDestructorFunc destructor;
+ uint32_t version;
+};
-// The maximum number of 'slots' in our thread local storage stack.
-const int kThreadLocalStorageSize = 256;
+struct TlsVectorEntry {
+ void* data;
+ uint32_t version;
+};
+
+// This LazyInstance isn't needed until after we've constructed the per-thread
+// TLS vector, so it's safe to use.
+base::LazyInstance<base::Lock>::Leaky g_tls_metadata_lock;
+TlsMetadata g_tls_metadata[kThreadLocalStorageSize];
+size_t g_last_assigned_slot = 0;
// The maximum number of times to try to clear slots by calling destructors.
// Use pthread naming convention for clarity.
-const int kMaxDestructorIterations = kThreadLocalStorageSize;
-
-// An array of destructor function pointers for the slots. If a slot has a
-// destructor, it will be stored in its corresponding entry in this array.
-// The elements are volatile to ensure that when the compiler reads the value
-// to potentially call the destructor, it does so once, and that value is tested
-// for null-ness and then used. Yes, that would be a weird de-optimization,
-// but I can imagine some register machines where it was just as easy to
-// re-fetch an array element, and I want to be sure a call to free the key
-// (i.e., null out the destructor entry) that happens on a separate thread can't
-// hurt the racy calls to the destructors on another thread.
-volatile base::ThreadLocalStorage::TLSDestructorFunc
- g_tls_destructors[kThreadLocalStorageSize];
+constexpr int kMaxDestructorIterations = kThreadLocalStorageSize;
// This function is called to initialize our entire Chromium TLS system.
// It may be called very early, and we need to complete most all of the setup
@@ -56,7 +106,7 @@ volatile base::ThreadLocalStorage::TLSDestructorFunc
// recursively depend on this initialization.
// As a result, we use Atomics, and avoid anything (like a singleton) that might
// require memory allocations.
-void** ConstructTlsVector() {
+TlsVectorEntry* ConstructTlsVector() {
PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES) {
@@ -73,8 +123,8 @@ void** ConstructTlsVector() {
key != PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES);
PlatformThreadLocalStorage::FreeTLS(tmp);
}
- // Atomically test-and-set the tls_key. If the key is
- // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
+ // Atomically test-and-set the tls_key. If the key is
+ // TLS_KEY_OUT_OF_INDEXES, go ahead and set it. Otherwise, do nothing, as
// another thread already did our dirty work.
if (PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES !=
static_cast<PlatformThreadLocalStorage::TLSKey>(
@@ -90,39 +140,38 @@ void** ConstructTlsVector() {
}
CHECK(!PlatformThreadLocalStorage::GetTLSValue(key));
- // Some allocators, such as TCMalloc, make use of thread local storage.
- // As a result, any attempt to call new (or malloc) will lazily cause such a
- // system to initialize, which will include registering for a TLS key. If we
- // are not careful here, then that request to create a key will call new back,
- // and we'll have an infinite loop. We avoid that as follows:
- // Use a stack allocated vector, so that we don't have dependence on our
- // allocator until our service is in place. (i.e., don't even call new until
- // after we're setup)
- void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ // Some allocators, such as TCMalloc, make use of thread local storage. As a
+ // result, any attempt to call new (or malloc) will lazily cause such a system
+ // to initialize, which will include registering for a TLS key. If we are not
+ // careful here, then that request to create a key will call new back, and
+ // we'll have an infinite loop. We avoid that as follows: Use a stack
+ // allocated vector, so that we don't have dependence on our allocator until
+ // our service is in place. (i.e., don't even call new until after we're
+ // setup)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
memset(stack_allocated_tls_data, 0, sizeof(stack_allocated_tls_data));
// Ensure that any rentrant calls change the temp version.
PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
// Allocate an array to store our data.
- void** tls_data = new void*[kThreadLocalStorageSize];
+ TlsVectorEntry* tls_data = new TlsVectorEntry[kThreadLocalStorageSize];
memcpy(tls_data, stack_allocated_tls_data, sizeof(stack_allocated_tls_data));
PlatformThreadLocalStorage::SetTLSValue(key, tls_data);
return tls_data;
}
-void OnThreadExitInternal(void* value) {
- DCHECK(value);
- void** tls_data = static_cast<void**>(value);
- // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
+void OnThreadExitInternal(TlsVectorEntry* tls_data) {
+ DCHECK(tls_data);
+ // Some allocators, such as TCMalloc, use TLS. As a result, when a thread
// terminates, one of the destructor calls we make may be to shut down an
- // allocator. We have to be careful that after we've shutdown all of the
- // known destructors (perchance including an allocator), that we don't call
- // the allocator and cause it to resurrect itself (with no possibly destructor
- // call to follow). We handle this problem as follows:
- // Switch to using a stack allocated vector, so that we don't have dependence
- // on our allocator after we have called all g_tls_destructors. (i.e., don't
- // even call delete[] after we're done with destructors.)
- void* stack_allocated_tls_data[kThreadLocalStorageSize];
+ // allocator. We have to be careful that after we've shutdown all of the known
+ // destructors (perchance including an allocator), that we don't call the
+ // allocator and cause it to resurrect itself (with no possibly destructor
+ // call to follow). We handle this problem as follows: Switch to using a stack
+ // allocated vector, so that we don't have dependence on our allocator after
+ // we have called all g_tls_metadata destructors. (i.e., don't even call
+ // delete[] after we're done with destructors.)
+ TlsVectorEntry stack_allocated_tls_data[kThreadLocalStorageSize];
memcpy(stack_allocated_tls_data, tls_data, sizeof(stack_allocated_tls_data));
// Ensure that any re-entrant calls change the temp version.
PlatformThreadLocalStorage::TLSKey key =
@@ -130,32 +179,38 @@ void OnThreadExitInternal(void* value) {
PlatformThreadLocalStorage::SetTLSValue(key, stack_allocated_tls_data);
delete[] tls_data; // Our last dependence on an allocator.
+ // Snapshot the TLS Metadata so we don't have to lock on every access.
+ TlsMetadata tls_metadata[kThreadLocalStorageSize];
+ {
+ base::AutoLock auto_lock(g_tls_metadata_lock.Get());
+ memcpy(tls_metadata, g_tls_metadata, sizeof(g_tls_metadata));
+ }
+
int remaining_attempts = kMaxDestructorIterations;
bool need_to_scan_destructors = true;
while (need_to_scan_destructors) {
need_to_scan_destructors = false;
// Try to destroy the first-created-slot (which is slot 1) in our last
- // destructor call. That user was able to function, and define a slot with
+ // destructor call. That user was able to function, and define a slot with
// no other services running, so perhaps it is a basic service (like an
- // allocator) and should also be destroyed last. If we get the order wrong,
- // then we'll itterate several more times, so it is really not that
- // critical (but it might help).
- base::subtle::Atomic32 last_used_tls_key =
- base::subtle::NoBarrier_Load(&g_last_used_tls_key);
- for (int slot = last_used_tls_key; slot > 0; --slot) {
- void* tls_value = stack_allocated_tls_data[slot];
- if (tls_value == NULL)
+ // allocator) and should also be destroyed last. If we get the order wrong,
+ // then we'll iterate several more times, so it is really not that critical
+ // (but it might help).
+ for (int slot = 0; slot < kThreadLocalStorageSize ; ++slot) {
+ void* tls_value = stack_allocated_tls_data[slot].data;
+ if (!tls_value || tls_metadata[slot].status == TlsStatus::FREE ||
+ stack_allocated_tls_data[slot].version != tls_metadata[slot].version)
continue;
base::ThreadLocalStorage::TLSDestructorFunc destructor =
- g_tls_destructors[slot];
- if (destructor == NULL)
+ tls_metadata[slot].destructor;
+ if (!destructor)
continue;
- stack_allocated_tls_data[slot] = NULL; // pre-clear the slot.
+ stack_allocated_tls_data[slot].data = nullptr; // pre-clear the slot.
destructor(tls_value);
- // Any destructor might have called a different service, which then set
- // a different slot to a non-NULL value. Hence we need to check
- // the whole vector again. This is a pthread standard.
+ // Any destructor might have called a different service, which then set a
+ // different slot to a non-null value. Hence we need to check the whole
+ // vector again. This is a pthread standard.
need_to_scan_destructors = true;
}
if (--remaining_attempts <= 0) {
@@ -165,7 +220,7 @@ void OnThreadExitInternal(void* value) {
}
// Remove our stack allocated vector.
- PlatformThreadLocalStorage::SetTLSValue(key, NULL);
+ PlatformThreadLocalStorage::SetTLSValue(key, nullptr);
}
} // namespace
@@ -184,11 +239,11 @@ void PlatformThreadLocalStorage::OnThreadExit() {
// Maybe we have never initialized TLS for this thread.
if (!tls_data)
return;
- OnThreadExitInternal(tls_data);
+ OnThreadExitInternal(static_cast<TlsVectorEntry*>(tls_data));
}
#elif defined(OS_POSIX)
void PlatformThreadLocalStorage::OnThreadExit(void* value) {
- OnThreadExitInternal(value);
+ OnThreadExitInternal(static_cast<TlsVectorEntry*>(value));
}
#endif // defined(OS_WIN)
@@ -198,49 +253,77 @@ void ThreadLocalStorage::StaticSlot::Initialize(TLSDestructorFunc destructor) {
PlatformThreadLocalStorage::TLSKey key =
base::subtle::NoBarrier_Load(&g_native_tls_key);
if (key == PlatformThreadLocalStorage::TLS_KEY_OUT_OF_INDEXES ||
- !PlatformThreadLocalStorage::GetTLSValue(key))
+ !PlatformThreadLocalStorage::GetTLSValue(key)) {
ConstructTlsVector();
+ }
// Grab a new slot.
- slot_ = base::subtle::NoBarrier_AtomicIncrement(&g_last_used_tls_key, 1);
- DCHECK_GT(slot_, 0);
+ slot_ = kInvalidSlotValue;
+ version_ = 0;
+ {
+ base::AutoLock auto_lock(g_tls_metadata_lock.Get());
+ for (int i = 0; i < kThreadLocalStorageSize; ++i) {
+ // Tracking the last assigned slot is an attempt to find the next
+ // available slot within one iteration. Under normal usage, slots remain
+ // in use for the lifetime of the process (otherwise before we reclaimed
+ // slots, we would have run out of slots). This makes it highly likely the
+ // next slot is going to be a free slot.
+ size_t slot_candidate =
+ (g_last_assigned_slot + 1 + i) % kThreadLocalStorageSize;
+ if (g_tls_metadata[slot_candidate].status == TlsStatus::FREE) {
+ g_tls_metadata[slot_candidate].status = TlsStatus::IN_USE;
+ g_tls_metadata[slot_candidate].destructor = destructor;
+ g_last_assigned_slot = slot_candidate;
+ slot_ = slot_candidate;
+ version_ = g_tls_metadata[slot_candidate].version;
+ break;
+ }
+ }
+ }
+ CHECK_NE(slot_, kInvalidSlotValue);
CHECK_LT(slot_, kThreadLocalStorageSize);
// Setup our destructor.
- g_tls_destructors[slot_] = destructor;
base::subtle::Release_Store(&initialized_, 1);
}
void ThreadLocalStorage::StaticSlot::Free() {
- // At this time, we don't reclaim old indices for TLS slots.
- // So all we need to do is wipe the destructor.
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- g_tls_destructors[slot_] = NULL;
- slot_ = 0;
+ {
+ base::AutoLock auto_lock(g_tls_metadata_lock.Get());
+ g_tls_metadata[slot_].status = TlsStatus::FREE;
+ g_tls_metadata[slot_].destructor = nullptr;
+ ++(g_tls_metadata[slot_].version);
+ }
+ slot_ = kInvalidSlotValue;
base::subtle::Release_Store(&initialized_, 0);
}
void* ThreadLocalStorage::StaticSlot::Get() const {
- void** tls_data = static_cast<void**>(
+ TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
if (!tls_data)
tls_data = ConstructTlsVector();
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- return tls_data[slot_];
+ // Version mismatches means this slot was previously freed.
+ if (tls_data[slot_].version != version_)
+ return nullptr;
+ return tls_data[slot_].data;
}
void ThreadLocalStorage::StaticSlot::Set(void* value) {
- void** tls_data = static_cast<void**>(
+ TlsVectorEntry* tls_data = static_cast<TlsVectorEntry*>(
PlatformThreadLocalStorage::GetTLSValue(
base::subtle::NoBarrier_Load(&g_native_tls_key)));
if (!tls_data)
tls_data = ConstructTlsVector();
- DCHECK_GT(slot_, 0);
+ DCHECK_NE(slot_, kInvalidSlotValue);
DCHECK_LT(slot_, kThreadLocalStorageSize);
- tls_data[slot_] = value;
+ tls_data[slot_].data = value;
+ tls_data[slot_].version = version_;
}
ThreadLocalStorage::Slot::Slot(TLSDestructorFunc destructor) {
diff --git a/chromium/base/threading/thread_local_storage.h b/chromium/base/threading/thread_local_storage.h
index bc956a73bc0..fd2a789d015 100644
--- a/chromium/base/threading/thread_local_storage.h
+++ b/chromium/base/threading/thread_local_storage.h
@@ -5,6 +5,8 @@
#ifndef BASE_THREADING_THREAD_LOCAL_STORAGE_H_
#define BASE_THREADING_THREAD_LOCAL_STORAGE_H_
+#include <stdint.h>
+
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
@@ -20,9 +22,12 @@ namespace base {
namespace internal {
-// WARNING: You should *NOT* be using this class directly.
-// PlatformThreadLocalStorage is low-level abstraction to the OS's TLS
-// interface, you should instead be using ThreadLocalStorage::StaticSlot/Slot.
+// WARNING: You should *NOT* use this class directly.
+// PlatformThreadLocalStorage is a low-level abstraction of the OS's TLS
+// interface. Instead, you should use one of the following:
+// * ThreadLocalBoolean (from thread_local.h) for booleans.
+// * ThreadLocalPointer (from thread_local.h) for pointers.
+// * ThreadLocalStorage::StaticSlot/Slot for more direct control of the slot.
class BASE_EXPORT PlatformThreadLocalStorage {
public:
@@ -123,6 +128,7 @@ class BASE_EXPORT ThreadLocalStorage {
// The internals of this struct should be considered private.
base::subtle::Atomic32 initialized_;
int slot_;
+ uint32_t version_;
};
// A convenience wrapper around StaticSlot with a constructor. Can be used
diff --git a/chromium/base/threading/thread_local_storage_unittest.cc b/chromium/base/threading/thread_local_storage_unittest.cc
index 322524b10e1..335252b18ed 100644
--- a/chromium/base/threading/thread_local_storage_unittest.cc
+++ b/chromium/base/threading/thread_local_storage_unittest.cc
@@ -127,4 +127,14 @@ TEST(ThreadLocalStorageTest, MAYBE_TLSDestructors) {
tls_slot.Free(); // Stop doing callbacks to cleanup threads.
}
+TEST(ThreadLocalStorageTest, TLSReclaim) {
+ // Creates and destroys many TLS slots and ensures they all zero-inited.
+ for (int i = 0; i < 1000; ++i) {
+ ThreadLocalStorage::Slot slot(nullptr);
+ EXPECT_EQ(nullptr, slot.Get());
+ slot.Set(reinterpret_cast<void*>(0xBAADF00D));
+ EXPECT_EQ(reinterpret_cast<void*>(0xBAADF00D), slot.Get());
+ }
+}
+
} // namespace base
diff --git a/chromium/base/threading/thread_local_win.cc b/chromium/base/threading/thread_local_win.cc
deleted file mode 100644
index 1c74e421387..00000000000
--- a/chromium/base/threading/thread_local_win.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright (c) 2010 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/threading/thread_local.h"
-
-#include <windows.h>
-
-#include "base/logging.h"
-
-namespace base {
-namespace internal {
-
-// static
-void ThreadLocalPlatform::AllocateSlot(SlotType* slot) {
- *slot = TlsAlloc();
- CHECK_NE(*slot, TLS_OUT_OF_INDEXES);
-}
-
-// static
-void ThreadLocalPlatform::FreeSlot(SlotType slot) {
- if (!TlsFree(slot)) {
- NOTREACHED() << "Failed to deallocate tls slot with TlsFree().";
- }
-}
-
-// static
-void* ThreadLocalPlatform::GetValueFromSlot(SlotType slot) {
- return TlsGetValue(slot);
-}
-
-// static
-void ThreadLocalPlatform::SetValueInSlot(SlotType slot, void* value) {
- if (!TlsSetValue(slot, value)) {
- LOG(FATAL) << "Failed to TlsSetValue().";
- }
-}
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 92ccc87d2b5..afb8b4a0830 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -486,6 +486,12 @@ class ExternalMessageLoopThread : public Thread {
void InstallMessageLoop() { SetMessageLoop(&external_message_loop_); }
+ void VerifyUsingExternalMessageLoop(
+ bool expected_using_external_message_loop) {
+ EXPECT_EQ(expected_using_external_message_loop,
+ using_external_message_loop());
+ }
+
private:
base::MessageLoop external_message_loop_;
@@ -498,10 +504,12 @@ TEST_F(ThreadTest, ExternalMessageLoop) {
ExternalMessageLoopThread a;
EXPECT_FALSE(a.message_loop());
EXPECT_FALSE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(false);
a.InstallMessageLoop();
EXPECT_TRUE(a.message_loop());
EXPECT_TRUE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(true);
bool ran = false;
a.task_runner()->PostTask(
@@ -512,6 +520,7 @@ TEST_F(ThreadTest, ExternalMessageLoop) {
a.Stop();
EXPECT_FALSE(a.message_loop());
EXPECT_FALSE(a.IsRunning());
+ a.VerifyUsingExternalMessageLoop(true);
// Confirm that running any remaining tasks posted from Stop() goes smoothly
// (e.g. https://codereview.chromium.org/2135413003/#ps300001 crashed if
diff --git a/chromium/base/threading/worker_pool.h b/chromium/base/threading/worker_pool.h
index a52a41428b3..1f1b8186e05 100644
--- a/chromium/base/threading/worker_pool.h
+++ b/chromium/base/threading/worker_pool.h
@@ -9,8 +9,6 @@
#include "base/callback_forward.h"
#include "base/memory/ref_counted.h"
-class Task;
-
namespace tracked_objects {
class Location;
} // namespace tracked_objects
diff --git a/chromium/base/threading/worker_pool_posix.cc b/chromium/base/threading/worker_pool_posix.cc
index aee5caccfbc..7dd452b854f 100644
--- a/chromium/base/threading/worker_pool_posix.cc
+++ b/chromium/base/threading/worker_pool_posix.cc
@@ -90,7 +90,7 @@ void WorkerThread::ThreadMain() {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- pending_task.task.Run();
+ std::move(pending_task.task).Run();
stopwatch.Stop();
tracked_objects::ThreadData::TallyRunOnWorkerThreadIfTracking(
diff --git a/chromium/base/threading/worker_pool_posix.h b/chromium/base/threading/worker_pool_posix.h
index 628e2b6420d..0598d706a37 100644
--- a/chromium/base/threading/worker_pool_posix.h
+++ b/chromium/base/threading/worker_pool_posix.h
@@ -38,8 +38,6 @@
#include "base/threading/platform_thread.h"
#include "base/tracked_objects.h"
-class Task;
-
namespace base {
class BASE_EXPORT PosixDynamicThreadPool
diff --git a/chromium/base/threading/worker_pool_win.cc b/chromium/base/threading/worker_pool_win.cc
index 575caaa6ba8..d638c0f1e42 100644
--- a/chromium/base/threading/worker_pool_win.cc
+++ b/chromium/base/threading/worker_pool_win.cc
@@ -27,7 +27,7 @@ DWORD CALLBACK WorkItemCallback(void* param) {
tracked_objects::TaskStopwatch stopwatch;
stopwatch.Start();
- pending_task->task.Run();
+ std::move(pending_task->task).Run();
stopwatch.Stop();
g_worker_pool_running_on_this_thread.Get().Set(false);
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index 3670f557589..4e942015fcf 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -104,27 +104,23 @@ namespace time_internal {
int64_t SaturatedAdd(TimeDelta delta, int64_t value) {
CheckedNumeric<int64_t> rv(delta.delta_);
rv += value;
- return FromCheckedNumeric(rv);
+ if (rv.IsValid())
+ return rv.ValueOrDie();
+ // Positive RHS overflows. Negative RHS underflows.
+ if (value < 0)
+ return -std::numeric_limits<int64_t>::max();
+ return std::numeric_limits<int64_t>::max();
}
int64_t SaturatedSub(TimeDelta delta, int64_t value) {
CheckedNumeric<int64_t> rv(delta.delta_);
rv -= value;
- return FromCheckedNumeric(rv);
-}
-
-int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value) {
- if (value.IsValid())
- return value.ValueUnsafe();
-
- // We could return max/min but we don't really expose what the maximum delta
- // is. Instead, return max/(-max), which is something that clients can reason
- // about.
- // TODO(rvargas) crbug.com/332611: don't use internal values.
- int64_t limit = std::numeric_limits<int64_t>::max();
- if (value.validity() == internal::RANGE_UNDERFLOW)
- limit = -limit;
- return value.ValueOrDefault(limit);
+ if (rv.IsValid())
+ return rv.ValueOrDie();
+ // Negative RHS overflows. Positive RHS underflows.
+ if (value < 0)
+ return std::numeric_limits<int64_t>::max();
+ return -std::numeric_limits<int64_t>::max();
}
} // namespace time_internal
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index 12e43d9e7a0..1084e951794 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -95,10 +95,6 @@ namespace time_internal {
BASE_EXPORT int64_t SaturatedAdd(TimeDelta delta, int64_t value);
BASE_EXPORT int64_t SaturatedSub(TimeDelta delta, int64_t value);
-// Clamp |value| on overflow and underflow conditions. The int64_t argument and
-// return value are in terms of a microsecond timebase.
-BASE_EXPORT int64_t FromCheckedNumeric(const CheckedNumeric<int64_t> value);
-
} // namespace time_internal
// TimeDelta ------------------------------------------------------------------
@@ -205,13 +201,24 @@ class BASE_EXPORT TimeDelta {
TimeDelta operator*(T a) const {
CheckedNumeric<int64_t> rv(delta_);
rv *= a;
- return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ if ((delta_ < 0) ^ (a < 0))
+ return TimeDelta(-std::numeric_limits<int64_t>::max());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
}
template<typename T>
TimeDelta operator/(T a) const {
CheckedNumeric<int64_t> rv(delta_);
rv /= a;
- return TimeDelta(time_internal::FromCheckedNumeric(rv));
+ if (rv.IsValid())
+ return TimeDelta(rv.ValueOrDie());
+ // Matched sign overflows. Mismatched sign underflows.
+ // Special case to catch divide by zero.
+ if ((delta_ < 0) ^ (a <= 0))
+ return TimeDelta(-std::numeric_limits<int64_t>::max());
+ return TimeDelta(std::numeric_limits<int64_t>::max());
}
template<typename T>
TimeDelta& operator*=(T a) {
@@ -545,7 +552,7 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// Converts an exploded structure representing either the local time or UTC
// into a Time class. Returns false on a failure when, for example, a day of
- // month is set to 31 on a 28-30 day month.
+ // month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
static bool FromUTCExploded(const Exploded& exploded,
Time* time) WARN_UNUSED_RESULT {
return FromExploded(false, exploded, time);
diff --git a/chromium/base/time/time_mac.cc b/chromium/base/time/time_mac.cc
index 5803acd3511..c75423df9c2 100644
--- a/chromium/base/time/time_mac.cc
+++ b/chromium/base/time/time_mac.cc
@@ -190,9 +190,18 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
exploded.millisecond);
CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
- base::Time converted_time =
- Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
- kWindowsEpochDeltaMicroseconds);
+ // CFAbsolutTime is typedef of double. Convert seconds to
+ // microseconds and then cast to int64. If
+ // it cannot be suited to int64, then fail to avoid overflows.
+ double microseconds =
+ (seconds * kMicrosecondsPerSecond) + kWindowsEpochDeltaMicroseconds;
+ if (microseconds > std::numeric_limits<int64_t>::max() ||
+ microseconds < std::numeric_limits<int64_t>::min()) {
+ *time = Time(0);
+ return false;
+ }
+
+ base::Time converted_time = Time(static_cast<int64_t>(microseconds));
// If |exploded.day_of_month| is set to 31
// on a 28-30 day month, it will return the first day of the next month.
diff --git a/chromium/base/time/time_posix.cc b/chromium/base/time/time_posix.cc
index ac0e99f7eb6..4caf3866c52 100644
--- a/chromium/base/time/time_posix.cc
+++ b/chromium/base/time/time_posix.cc
@@ -242,7 +242,6 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
#endif
- int64_t milliseconds;
SysTime seconds;
// Certain exploded dates do not really exist due to daylight saving times,
@@ -280,6 +279,7 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
// return is the best that can be done here. It's not ideal, but it's better
// than failing here or ignoring the overflow case and treating each time
// overflow as one second prior to the epoch.
+ int64_t milliseconds = 0;
if (seconds == -1 &&
(exploded.year < 1969 || exploded.year > 1970)) {
// If exploded.year is 1969 or 1970, take -1 as correct, with the
@@ -312,13 +312,25 @@ bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
milliseconds += (kMillisecondsPerSecond - 1);
}
} else {
- milliseconds = seconds * kMillisecondsPerSecond + exploded.millisecond;
+ base::CheckedNumeric<int64_t> checked_millis = seconds;
+ checked_millis *= kMillisecondsPerSecond;
+ checked_millis += exploded.millisecond;
+ if (!checked_millis.IsValid()) {
+ *time = base::Time(0);
+ return false;
+ }
+ milliseconds = checked_millis.ValueOrDie();
}
- // Adjust from Unix (1970) to Windows (1601) epoch.
- base::Time converted_time =
- Time((milliseconds * kMicrosecondsPerMillisecond) +
- kWindowsEpochDeltaMicroseconds);
+ // Adjust from Unix (1970) to Windows (1601) epoch avoiding overflows.
+ base::CheckedNumeric<int64_t> checked_microseconds_win_epoch = milliseconds;
+ checked_microseconds_win_epoch *= kMicrosecondsPerMillisecond;
+ checked_microseconds_win_epoch += kWindowsEpochDeltaMicroseconds;
+ if (!checked_microseconds_win_epoch.IsValid()) {
+ *time = base::Time(0);
+ return false;
+ }
+ base::Time converted_time(checked_microseconds_win_epoch.ValueOrDie());
// If |exploded.day_of_month| is set to 31 on a 28-30 day month, it will
// return the first day of the next month. Thus round-trip the time and
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index 313eaea51f7..a42d701f25a 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -54,6 +54,11 @@ TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
{{2016, 10, 0, 25, 7, 47, 234, 0}, false},
// Milliseconds are too large
{{2016, 10, 0, 25, 6, 31, 23, 1643}, false},
+ // Test overflow. Time is valid, but overflow case
+ // results in Time(0).
+ {{9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+ // Underflow will fail as well.
+ {{-9840633, 1, 0, 1, 1, 1, 0, 0}, true},
};
for (const auto& test : kDateTestData) {
diff --git a/chromium/base/time/time_win.cc b/chromium/base/time/time_win.cc
index ba6fc1da3bf..19144cb2792 100644
--- a/chromium/base/time/time_win.cc
+++ b/chromium/base/time/time_win.cc
@@ -37,6 +37,7 @@
#include <mmsystem.h>
#include <stdint.h>
+#include "base/atomicops.h"
#include "base/bit_cast.h"
#include "base/cpu.h"
#include "base/lazy_instance.h"
@@ -109,6 +110,12 @@ uint64_t QPCNowRaw() {
return perf_counter_now.QuadPart;
}
+bool SafeConvertToWord(int in, WORD* out) {
+ base::CheckedNumeric<WORD> result = in;
+ *out = result.ValueOrDefault(std::numeric_limits<WORD>::max());
+ return result.IsValid();
+}
+
} // namespace
// Time -----------------------------------------------------------------------
@@ -237,16 +244,20 @@ bool Time::IsHighResolutionTimerInUse() {
// static
bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
// Create the system struct representing our exploded time. It will either be
- // in local time or UTC.
+ // in local time or UTC.If casting from int to WORD results in overflow,
+ // fail and return Time(0).
SYSTEMTIME st;
- st.wYear = static_cast<WORD>(exploded.year);
- st.wMonth = static_cast<WORD>(exploded.month);
- st.wDayOfWeek = static_cast<WORD>(exploded.day_of_week);
- st.wDay = static_cast<WORD>(exploded.day_of_month);
- st.wHour = static_cast<WORD>(exploded.hour);
- st.wMinute = static_cast<WORD>(exploded.minute);
- st.wSecond = static_cast<WORD>(exploded.second);
- st.wMilliseconds = static_cast<WORD>(exploded.millisecond);
+ if (!SafeConvertToWord(exploded.year, &st.wYear) ||
+ !SafeConvertToWord(exploded.month, &st.wMonth) ||
+ !SafeConvertToWord(exploded.day_of_week, &st.wDayOfWeek) ||
+ !SafeConvertToWord(exploded.day_of_month, &st.wDay) ||
+ !SafeConvertToWord(exploded.hour, &st.wHour) ||
+ !SafeConvertToWord(exploded.minute, &st.wMinute) ||
+ !SafeConvertToWord(exploded.second, &st.wSecond) ||
+ !SafeConvertToWord(exploded.millisecond, &st.wMilliseconds)) {
+ *time = base::Time(0);
+ return false;
+ }
FILETIME ft;
bool success = true;
@@ -323,34 +334,69 @@ DWORD timeGetTimeWrapper() {
DWORD (*g_tick_function)(void) = &timeGetTimeWrapper;
-// Accumulation of time lost due to rollover (in milliseconds).
-int64_t g_rollover_ms = 0;
-
-// The last timeGetTime value we saw, to detect rollover.
-DWORD g_last_seen_now = 0;
-
-// Lock protecting rollover_ms and last_seen_now.
-// Note: this is a global object, and we usually avoid these. However, the time
-// code is low-level, and we don't want to use Singletons here (it would be too
-// easy to use a Singleton without even knowing it, and that may lead to many
-// gotchas). Its impact on startup time should be negligible due to low-level
-// nature of time code.
-base::Lock g_rollover_lock;
+// A structure holding the most significant bits of "last seen" and a
+// "rollover" counter.
+union LastTimeAndRolloversState {
+ // The state as a single 32-bit opaque value.
+ base::subtle::Atomic32 as_opaque_32;
+
+ // The state as usable values.
+ struct {
+ // The top 8-bits of the "last" time. This is enough to check for rollovers
+ // and the small bit-size means fewer CompareAndSwap operations to store
+ // changes in state, which in turn makes for fewer retries.
+ uint8_t last_8;
+ // A count of the number of detected rollovers. Using this as bits 47-32
+ // of the upper half of a 64-bit value results in a 48-bit tick counter.
+ // This extends the total rollover period from about 49 days to about 8800
+ // years while still allowing it to be stored with last_8 in a single
+ // 32-bit value.
+ uint16_t rollovers;
+ } as_values;
+};
+base::subtle::Atomic32 g_last_time_and_rollovers = 0;
+static_assert(
+ sizeof(LastTimeAndRolloversState) <= sizeof(g_last_time_and_rollovers),
+ "LastTimeAndRolloversState does not fit in a single atomic word");
// We use timeGetTime() to implement TimeTicks::Now(). This can be problematic
// because it returns the number of milliseconds since Windows has started,
// which will roll over the 32-bit value every ~49 days. We try to track
// rollover ourselves, which works if TimeTicks::Now() is called at least every
-// 49 days.
+// 48.8 days (not 49 days because only changes in the top 8 bits get noticed).
TimeDelta RolloverProtectedNow() {
- base::AutoLock locked(g_rollover_lock);
- // We should hold the lock while calling tick_function to make sure that
- // we keep last_seen_now stay correctly in sync.
- DWORD now = g_tick_function();
- if (now < g_last_seen_now)
- g_rollover_ms += 0x100000000I64; // ~49.7 days.
- g_last_seen_now = now;
- return TimeDelta::FromMilliseconds(now + g_rollover_ms);
+ LastTimeAndRolloversState state;
+ DWORD now; // DWORD is always unsigned 32 bits.
+
+ while (true) {
+ // Fetch the "now" and "last" tick values, updating "last" with "now" and
+ // incrementing the "rollovers" counter if the tick-value has wrapped back
+ // around. Atomic operations ensure that both "last" and "rollovers" are
+ // always updated together.
+ int32_t original = base::subtle::Acquire_Load(&g_last_time_and_rollovers);
+ state.as_opaque_32 = original;
+ now = g_tick_function();
+ uint8_t now_8 = static_cast<uint8_t>(now >> 24);
+ if (now_8 < state.as_values.last_8)
+ ++state.as_values.rollovers;
+ state.as_values.last_8 = now_8;
+
+ // If the state hasn't changed, exit the loop.
+ if (state.as_opaque_32 == original)
+ break;
+
+ // Save the changed state. If the existing value is unchanged from the
+ // original, exit the loop.
+ int32_t check = base::subtle::Release_CompareAndSwap(
+ &g_last_time_and_rollovers, original, state.as_opaque_32);
+ if (check == original)
+ break;
+
+ // Another thread has done something in between so retry from the top.
+ }
+
+ return TimeDelta::FromMilliseconds(
+ now + (static_cast<uint64_t>(state.as_values.rollovers) << 32));
}
// Discussion of tick counter options on Windows:
@@ -483,11 +529,9 @@ TimeDelta InitialNowFunction() {
// static
TimeTicks::TickFunctionType TimeTicks::SetMockTickFunction(
TickFunctionType ticker) {
- base::AutoLock locked(g_rollover_lock);
TickFunctionType old = g_tick_function;
g_tick_function = ticker;
- g_rollover_ms = 0;
- g_last_seen_now = 0;
+ base::subtle::NoBarrier_Store(&g_last_time_and_rollovers, 0);
return old;
}
diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc
index e554905ffff..aba8fc8d9fb 100644
--- a/chromium/base/timer/timer.cc
+++ b/chromium/base/timer/timer.cc
@@ -6,11 +6,15 @@
#include <stddef.h>
+#include <utility>
+
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
namespace base {
@@ -60,26 +64,36 @@ class BaseTimerTaskInternal {
};
Timer::Timer(bool retain_user_task, bool is_repeating)
- : scheduled_task_(NULL),
+ : Timer(retain_user_task, is_repeating, nullptr) {}
+
+Timer::Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock)
+ : scheduled_task_(nullptr),
thread_id_(0),
is_repeating_(is_repeating),
retain_user_task_(retain_user_task),
- is_running_(false) {
-}
+ tick_clock_(tick_clock),
+ is_running_(false) {}
Timer::Timer(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating)
- : scheduled_task_(NULL),
+ : Timer(posted_from, delay, user_task, is_repeating, nullptr) {}
+
+Timer::Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ TickClock* tick_clock)
+ : scheduled_task_(nullptr),
posted_from_(posted_from),
delay_(delay),
user_task_(user_task),
thread_id_(0),
is_repeating_(is_repeating),
retain_user_task_(true),
- is_running_(false) {
-}
+ tick_clock_(tick_clock),
+ is_running_(false) {}
Timer::~Timer() {
StopAndAbandon();
@@ -123,7 +137,7 @@ void Timer::Reset() {
// Set the new desired_run_time_.
if (delay_ > TimeDelta::FromMicroseconds(0))
- desired_run_time_ = TimeTicks::Now() + delay_;
+ desired_run_time_ = Now() + delay_;
else
desired_run_time_ = TimeTicks();
@@ -139,6 +153,10 @@ void Timer::Reset() {
PostNewScheduledTask(delay_);
}
+TimeTicks Timer::Now() const {
+ return tick_clock_ ? tick_clock_->NowTicks() : TimeTicks::Now();
+}
+
void Timer::SetTaskInfo(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task) {
@@ -155,7 +173,7 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
GetTaskRunner()->PostDelayedTask(posted_from_,
base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)),
delay);
- scheduled_run_time_ = desired_run_time_ = TimeTicks::Now() + delay;
+ scheduled_run_time_ = desired_run_time_ = Now() + delay;
} else {
GetTaskRunner()->PostTask(posted_from_,
base::Bind(&BaseTimerTaskInternal::Run, base::Owned(scheduled_task_)));
@@ -189,9 +207,9 @@ void Timer::RunScheduledTask() {
// First check if we need to delay the task because of a new target time.
if (desired_run_time_ > scheduled_run_time_) {
- // TimeTicks::Now() can be expensive, so only call it if we know the user
- // has changed the desired_run_time_.
- TimeTicks now = TimeTicks::Now();
+ // Now() can be expensive, so only call it if we know the user has changed
+ // the desired_run_time_.
+ TimeTicks now = Now();
// Task runner may have called us late anyway, so only post a continuation
// task if the desired_run_time_ is in the future.
if (desired_run_time_ > now) {
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index 661829b513d..50aedbd4cec 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -49,6 +49,8 @@
// because they're flaky on the buildbot, but when you run them locally you
// should be able to tell the difference.
+#include <memory>
+
#include "base/base_export.h"
#include "base/bind.h"
#include "base/bind_helpers.h"
@@ -61,6 +63,7 @@ namespace base {
class BaseTimerTaskInternal;
class SingleThreadTaskRunner;
+class TickClock;
//-----------------------------------------------------------------------------
// This class wraps MessageLoop::PostDelayedTask to manage delayed and repeating
@@ -71,14 +74,23 @@ class BASE_EXPORT Timer {
public:
// Construct a timer in repeating or one-shot mode. Start or SetTaskInfo must
// be called later to set task info. |retain_user_task| determines whether the
- // user_task is retained or reset when it runs or stops.
+ // user_task is retained or reset when it runs or stops. If |tick_clock| is
+ // provided, it is used instead of TimeTicks::Now() to get TimeTicks when
+ // scheduling tasks.
Timer(bool retain_user_task, bool is_repeating);
+ Timer(bool retain_user_task, bool is_repeating, TickClock* tick_clock);
- // Construct a timer with retained task info.
+ // Construct a timer with retained task info. If |tick_clock| is provided, it
+ // is used instead of TimeTicks::Now() to get TimeTicks when scheduling tasks.
Timer(const tracked_objects::Location& posted_from,
TimeDelta delay,
const base::Closure& user_task,
bool is_repeating);
+ Timer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ const base::Closure& user_task,
+ bool is_repeating,
+ TickClock* tick_clock);
virtual ~Timer();
@@ -111,6 +123,9 @@ class BASE_EXPORT Timer {
const TimeTicks& desired_run_time() const { return desired_run_time_; }
protected:
+ // Returns the current tick count.
+ TimeTicks Now() const;
+
// Used to initiate a new delayed task. This has the side-effect of disabling
// scheduled_task_ if it is non-null.
void SetTaskInfo(const tracked_objects::Location& posted_from,
@@ -191,6 +206,9 @@ class BASE_EXPORT Timer {
// If true, hold on to the user_task_ closure object for reuse.
const bool retain_user_task_;
+ // The tick clock used to calculate the run time for scheduled tasks.
+ TickClock* const tick_clock_;
+
// If true, user_task_ is scheduled to run sometime in the future.
bool is_running_;
@@ -210,8 +228,8 @@ class BaseTimerMethodPointer : public Timer {
using Timer::Start;
enum RepeatMode { ONE_SHOT, REPEATING };
- BaseTimerMethodPointer(RepeatMode mode)
- : Timer(mode == REPEATING, mode == REPEATING) {}
+ BaseTimerMethodPointer(RepeatMode mode, TickClock* tick_clock)
+ : Timer(mode == REPEATING, mode == REPEATING, tick_clock) {}
// Start the timer to run at the given |delay| from now. If the timer is
// already running, it will be replaced to call a task formed from
@@ -230,14 +248,18 @@ class BaseTimerMethodPointer : public Timer {
// A simple, one-shot timer. See usage notes at the top of the file.
class OneShotTimer : public BaseTimerMethodPointer {
public:
- OneShotTimer() : BaseTimerMethodPointer(ONE_SHOT) {}
+ OneShotTimer() : OneShotTimer(nullptr) {}
+ explicit OneShotTimer(TickClock* tick_clock)
+ : BaseTimerMethodPointer(ONE_SHOT, tick_clock) {}
};
//-----------------------------------------------------------------------------
// A simple, repeating timer. See usage notes at the top of the file.
class RepeatingTimer : public BaseTimerMethodPointer {
public:
- RepeatingTimer() : BaseTimerMethodPointer(REPEATING) {}
+ RepeatingTimer() : RepeatingTimer(nullptr) {}
+ explicit RepeatingTimer(TickClock* tick_clock)
+ : BaseTimerMethodPointer(REPEATING, tick_clock) {}
};
//-----------------------------------------------------------------------------
@@ -258,10 +280,19 @@ class DelayTimer : protected Timer {
TimeDelta delay,
Receiver* receiver,
void (Receiver::*method)())
+ : DelayTimer(posted_from, delay, receiver, method, nullptr) {}
+
+ template <class Receiver>
+ DelayTimer(const tracked_objects::Location& posted_from,
+ TimeDelta delay,
+ Receiver* receiver,
+ void (Receiver::*method)(),
+ TickClock* tick_clock)
: Timer(posted_from,
delay,
base::Bind(method, base::Unretained(receiver)),
- false) {}
+ false,
+ tick_clock) {}
void Reset() override;
};
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index 6fcd25b93a3..b34da20ba3f 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -8,11 +8,15 @@
#include <memory>
+#include "base/bind.h"
+#include "base/bind_helpers.h"
#include "base/macros.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/test/test_mock_time_task_runner.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/tick_clock.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -32,6 +36,17 @@ const base::MessageLoop::Type testing_message_loops[] = {
const int kNumTestingMessageLoops = arraysize(testing_message_loops);
+class Receiver {
+ public:
+ Receiver() : count_(0) {}
+ void OnCalled() { count_++; }
+ bool WasCalled() { return count_ > 0; }
+ int TimesCalled() { return count_; }
+
+ private:
+ int count_;
+};
+
class OneShotTimerTester {
public:
explicit OneShotTimerTester(bool* did_run, unsigned milliseconds = 10)
@@ -341,6 +356,21 @@ TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
EXPECT_TRUE(did_run);
}
+TEST(TimerTest, OneShotTimerWithTickClock) {
+ scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
+ new base::TestMockTimeTaskRunner(base::Time::Now(),
+ base::TimeTicks::Now()));
+ std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
+ base::MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ base::OneShotTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, base::TimeDelta::FromSeconds(1),
+ base::Bind(&Receiver::OnCalled, base::Unretained(&receiver)));
+ task_runner->FastForwardBy(base::TimeDelta::FromSeconds(1));
+ EXPECT_TRUE(receiver.WasCalled());
+}
+
TEST(TimerTest, RepeatingTimer) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
RunTest_RepeatingTimer(testing_message_loops[i],
@@ -369,6 +399,24 @@ TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
}
}
+TEST(TimerTest, RepeatingTimerWithTickClock) {
+ scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
+ new base::TestMockTimeTaskRunner(base::Time::Now(),
+ base::TimeTicks::Now()));
+ std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
+ base::MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ const int expected_times_called = 10;
+ base::RepeatingTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, base::TimeDelta::FromSeconds(1),
+ base::Bind(&Receiver::OnCalled, base::Unretained(&receiver)));
+ task_runner->FastForwardBy(
+ base::TimeDelta::FromSeconds(expected_times_called));
+ timer.Stop();
+ EXPECT_EQ(expected_times_called, receiver.TimesCalled());
+}
+
TEST(TimerTest, DelayTimer_NoCall) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
RunTest_DelayTimer_NoCall(testing_message_loops[i]);
@@ -394,6 +442,26 @@ TEST(TimerTest, DelayTimer_Deleted) {
}
}
+TEST(TimerTest, DelayTimerWithTickClock) {
+ scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
+ new base::TestMockTimeTaskRunner(base::Time::Now(),
+ base::TimeTicks::Now()));
+ std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
+ base::MessageLoop message_loop;
+ message_loop.SetTaskRunner(task_runner);
+ Receiver receiver;
+ base::DelayTimer timer(FROM_HERE, base::TimeDelta::FromSeconds(1), &receiver,
+ &Receiver::OnCalled, tick_clock.get());
+ task_runner->FastForwardBy(base::TimeDelta::FromMilliseconds(999));
+ EXPECT_FALSE(receiver.WasCalled());
+ timer.Reset();
+ task_runner->FastForwardBy(base::TimeDelta::FromMilliseconds(999));
+ EXPECT_FALSE(receiver.WasCalled());
+ timer.Reset();
+ task_runner->FastForwardBy(base::TimeDelta::FromSeconds(1));
+ EXPECT_TRUE(receiver.WasCalled());
+}
+
TEST(TimerTest, MessageLoopShutdown) {
// This test is designed to verify that shutdown of the
// message loop does not cause crashes if there were pending
diff --git a/chromium/base/trace_event/auto_open_close_event.cc b/chromium/base/trace_event/auto_open_close_event.cc
new file mode 100644
index 00000000000..f2794f497cb
--- /dev/null
+++ b/chromium/base/trace_event/auto_open_close_event.cc
@@ -0,0 +1,52 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/auto_open_close_event.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+AutoOpenCloseEvent::AutoOpenCloseEvent(AutoOpenCloseEvent::Type type,
+ const char* category, const char* event_name):
+ category_(category),
+ event_name_(event_name),
+ weak_factory_(this) {
+ base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
+ weak_factory_.GetWeakPtr());
+}
+
+AutoOpenCloseEvent::~AutoOpenCloseEvent() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ base::trace_event::TraceLog::GetInstance()->RemoveAsyncEnabledStateObserver(
+ this);
+}
+
+void AutoOpenCloseEvent::Begin() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ start_time_ = base::TimeTicks::Now();
+ TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
+ category_, event_name_, static_cast<void*>(this), start_time_);
+}
+
+void AutoOpenCloseEvent::End() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ TRACE_EVENT_ASYNC_END0(category_, event_name_, static_cast<void*>(this));
+ start_time_ = base::TimeTicks();
+}
+
+void AutoOpenCloseEvent::OnTraceLogEnabled() {
+ DCHECK(thread_checker_.CalledOnValidThread());
+ if (start_time_.ToInternalValue() != 0)
+ TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
+ category_, event_name_, static_cast<void*>(this), start_time_);
+}
+
+void AutoOpenCloseEvent::OnTraceLogDisabled() {}
+
+} // namespace trace_event
+} // namespace base \ No newline at end of file
diff --git a/chromium/base/trace_event/auto_open_close_event.h b/chromium/base/trace_event/auto_open_close_event.h
new file mode 100644
index 00000000000..795a4948ac0
--- /dev/null
+++ b/chromium/base/trace_event/auto_open_close_event.h
@@ -0,0 +1,57 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_AUTO_OPEN_CLOSE_EVENT_H_
+#define BASE_AUTO_OPEN_CLOSE_EVENT_H_
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/threading/thread_checker.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+
+namespace base {
+namespace trace_event {
+
+// Class for tracing events that support "auto-opening" and "auto-closing".
+// "auto-opening" = if the trace event is started (call Begin() before
+// tracing is started,the trace event will be opened, with the start time
+// being the time that the trace event was actually started.
+// "auto-closing" = if the trace event is started but not ended by the time
+// tracing ends, then the trace event will be automatically closed at the
+// end of tracing.
+class BASE_EXPORT AutoOpenCloseEvent
+ : public TraceLog::AsyncEnabledStateObserver {
+ public:
+ enum Type {
+ ASYNC
+ };
+
+ // As in the rest of the tracing macros, the const char* arguments here
+ // must be pointers to indefinitely lived strings (e.g. hard-coded string
+ // literals are okay, but not strings created by c_str())
+ AutoOpenCloseEvent(Type type, const char* category, const char* event_name);
+ ~AutoOpenCloseEvent() override;
+
+ void Begin();
+ void End();
+
+ // AsyncEnabledStateObserver implementation
+ void OnTraceLogEnabled() override;
+ void OnTraceLogDisabled() override;
+
+ private:
+ const char* const category_;
+ const char* const event_name_;
+ base::TimeTicks start_time_;
+ base::ThreadChecker thread_checker_;
+ WeakPtrFactory<AutoOpenCloseEvent> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(AutoOpenCloseEvent);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_AUTO_OPEN_CLOSE_EVENT_H_ \ No newline at end of file
diff --git a/chromium/base/trace_event/category_registry.cc b/chromium/base/trace_event/category_registry.cc
new file mode 100644
index 00000000000..87715fc806a
--- /dev/null
+++ b/chromium/base/trace_event/category_registry.cc
@@ -0,0 +1,162 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/category_registry.h"
+
+#include <string.h>
+
+#include <type_traits>
+
+#include "base/atomicops.h"
+#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/lock.h"
+#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
+#include "base/trace_event/trace_category.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+constexpr size_t kMaxCategories = 200;
+const int kNumBuiltinCategories = 4;
+
+// |g_categories| might end up causing creating dynamic initializers if not POD.
+static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
+
+// These entries must be kept consistent with the kCategory* consts below.
+TraceCategory g_categories[kMaxCategories] = {
+ {0, 0, "tracing categories exhausted; must increase kMaxCategories"},
+ {0, 0, "tracing already shutdown"}, // See kCategoryAlreadyShutdown below.
+ {0, 0, "__metadata"}, // See kCategoryMetadata below.
+ {0, 0, "toplevel"}, // Warmup the toplevel category.
+};
+
+base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
+
+base::LazyInstance<base::Lock>::Leaky g_category_lock =
+ LAZY_INSTANCE_INITIALIZER;
+
+bool IsValidCategoryPtr(const TraceCategory* category) {
+ // If any of these are hit, something has cached a corrupt category pointer.
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
+ return ptr % sizeof(void*) == 0 &&
+ ptr >= reinterpret_cast<uintptr_t>(&g_categories[0]) &&
+ ptr <= reinterpret_cast<uintptr_t>(&g_categories[kMaxCategories - 1]);
+}
+
+} // namespace
+
+// static
+TraceCategory* const CategoryRegistry::kCategoryExhausted = &g_categories[0];
+TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
+ &g_categories[1];
+TraceCategory* const CategoryRegistry::kCategoryMetadata = &g_categories[2];
+
+// static
+void CategoryRegistry::Initialize() {
+ // Trace is enabled or disabled on one thread while other threads are
+ // accessing the enabled flag. We don't care whether edge-case events are
+ // traced or not, so we allow races on the enabled flag to keep the trace
+ // macros fast.
+ for (size_t i = 0; i < kMaxCategories; ++i) {
+ ANNOTATE_BENIGN_RACE(g_categories[i].state_ptr(),
+ "trace_event category enabled");
+ // If this DCHECK is hit in a test it means that ResetForTesting() is not
+ // called and the categories state leaks between test fixtures.
+ DCHECK(!g_categories[i].is_enabled());
+ }
+}
+
+// static
+void CategoryRegistry::ResetForTesting() {
+ AutoLock lock(g_category_lock.Get());
+ for (size_t i = 0; i < kMaxCategories; ++i)
+ g_categories[i].reset_for_testing();
+}
+
+// static
+bool CategoryRegistry::GetOrCreateCategoryByName(const char* category_name,
+ TraceCategory** category) {
+ DCHECK(!strchr(category_name, '"'))
+ << "Category names may not contain double quote";
+
+ // The g_categories is append only, avoid using a lock for the fast path.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+
+ // Search for pre-existing category group.
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_categories[i].name(), category_name) == 0) {
+ *category = &g_categories[i];
+ return false;
+ }
+ }
+
+ // This is the slow path: the lock is not held in the case above, so more
+ // than one thread could have reached here trying to add the same category.
+ // Only hold the lock when actually appending a new category, and check the
+ // categories groups again.
+ // TODO(primiano): there should be no need for the acquire/release semantics
+ // on g_category_index below, the outer lock implies that. Remove once the
+ // tracing refactoring reaches a quieter state and we can afford the risk.
+ AutoLock lock(g_category_lock.Get());
+ category_index = base::subtle::Acquire_Load(&g_category_index);
+ for (size_t i = 0; i < category_index; ++i) {
+ if (strcmp(g_categories[i].name(), category_name) == 0) {
+ *category = &g_categories[i];
+ return false;
+ }
+ }
+
+ // Create a new category.
+ if (category_index >= kMaxCategories) {
+ NOTREACHED() << "must increase kMaxCategories";
+ *category = kCategoryExhausted;
+ return false;
+ }
+
+ // TODO(primiano): this strdup should be removed. The only documented reason
+ // for it was TraceWatchEvent, which is gone. However, something might have
+ // ended up relying on this. Needs some auditing before removal.
+ const char* category_name_copy = strdup(category_name);
+ ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
+
+ *category = &g_categories[category_index];
+ DCHECK(!(*category)->is_valid());
+ DCHECK(!(*category)->is_enabled());
+ (*category)->set_name(category_name_copy);
+
+ // Update the max index now.
+ base::subtle::Release_Store(&g_category_index, category_index + 1);
+ return true;
+}
+
+// static
+const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
+ const uint8_t* category_state) {
+ const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
+ DCHECK(IsValidCategoryPtr(category));
+ return category;
+}
+
+// static
+bool CategoryRegistry::IsBuiltinCategory(const TraceCategory* category) {
+ DCHECK(IsValidCategoryPtr(category));
+ return category < &g_categories[kNumBuiltinCategories];
+}
+
+// static
+CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
+ // The |g_categories| array is append only. We have to only guarantee to
+ // not return an index to a category which is being initialized by
+ // GetOrCreateCategoryByName().
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
+ return CategoryRegistry::Range(&g_categories[0],
+ &g_categories[category_index]);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/category_registry.h b/chromium/base/trace_event/category_registry.h
new file mode 100644
index 00000000000..da998993c4e
--- /dev/null
+++ b/chromium/base/trace_event/category_registry.h
@@ -0,0 +1,83 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_CATEGORY_H_
+#define BASE_TRACE_EVENT_CATEGORY_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/base_export.h"
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+struct TraceCategory;
+class TraceCategoryTest;
+class TraceLog;
+
+// Keeps track of the state of all tracing categories. The reason why this
+// is a fully static class with global state is to allow to statically define
+// known categories as global linker-initialized structs, without requiring
+// static initializers.
+class BASE_EXPORT CategoryRegistry {
+ public:
+ // Allows for-each iterations over a slice of the categories array.
+ class Range {
+ public:
+ Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
+ DCHECK_LE(begin, end);
+ }
+ TraceCategory* begin() const { return begin_; }
+ TraceCategory* end() const { return end_; }
+
+ private:
+ TraceCategory* const begin_;
+ TraceCategory* const end_;
+ };
+
+ // Known categories.
+ static TraceCategory* const kCategoryExhausted;
+ static TraceCategory* const kCategoryMetadata;
+ static TraceCategory* const kCategoryAlreadyShutdown;
+
+ // Returns a category entry from the Category.state_ptr() pointer.
+ // TODO(primiano): trace macros should just keep a pointer to the entire
+ // TraceCategory, not just the enabled state pointer. That would remove the
+ // need for this function and make everything cleaner at no extra cost (as
+ // long as the |state_| is the first field of the struct, which can be
+ // guaranteed via static_assert, see TraceCategory ctor).
+ static const TraceCategory* GetCategoryByStatePtr(
+ const uint8_t* category_state);
+
+ static bool IsBuiltinCategory(const TraceCategory*);
+
+ private:
+ friend class TraceCategoryTest;
+ friend class TraceLog;
+
+ // Only for debugging/testing purposes, is a no-op on release builds.
+ static void Initialize();
+
+ // Resets the state of all categories, to clear up the state between tests.
+ static void ResetForTesting();
+
+ // The output |category| argument is an undefinitely lived pointer to the
+ // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
+ // pointer and use it for checks in their fast-paths.
+ // Returns false if the category was already present, true if the category
+ // has just been added and hence requires initialization.
+ static bool GetOrCreateCategoryByName(const char* category_name,
+ TraceCategory** category);
+
+ // Allows to iterate over the valid categories in a for-each loop.
+ // This includes builtin categories such as __metadata.
+ static Range GetAllCategories();
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_CATEGORY_H_
diff --git a/chromium/base/trace_event/common/trace_event_common.h b/chromium/base/trace_event/common/trace_event_common.h
index 0db92692a0f..e87665b8cdb 100644
--- a/chromium/base/trace_event/common/trace_event_common.h
+++ b/chromium/base/trace_event/common/trace_event_common.h
@@ -223,49 +223,6 @@
flow_flags, arg1_name, arg1_val, \
arg2_name, arg2_val)
-// UNSHIPPED_TRACE_EVENT* are like TRACE_EVENT* except that they are not
-// included in official builds.
-
-#if OFFICIAL_BUILD
-#undef TRACING_IS_OFFICIAL_BUILD
-#define TRACING_IS_OFFICIAL_BUILD 1
-#elif !defined(TRACING_IS_OFFICIAL_BUILD)
-#define TRACING_IS_OFFICIAL_BUILD 0
-#endif
-
-#if TRACING_IS_OFFICIAL_BUILD
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) (void)0
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- (void)0
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- (void)0
-#else
-#define UNSHIPPED_TRACE_EVENT0(category_group, name) \
- TRACE_EVENT0(category_group, name)
-#define UNSHIPPED_TRACE_EVENT1(category_group, name, arg1_name, arg1_val) \
- TRACE_EVENT1(category_group, name, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT2(category_group, name, arg1_name, arg1_val, \
- arg2_name, arg2_val) \
- TRACE_EVENT2(category_group, name, arg1_name, arg1_val, arg2_name, arg2_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT0(category_group, name, scope) \
- TRACE_EVENT_INSTANT0(category_group, name, scope)
-#define UNSHIPPED_TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, \
- arg1_val) \
- TRACE_EVENT_INSTANT1(category_group, name, scope, arg1_name, arg1_val)
-#define UNSHIPPED_TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, \
- arg1_val, arg2_name, arg2_val) \
- TRACE_EVENT_INSTANT2(category_group, name, scope, arg1_name, arg1_val, \
- arg2_name, arg2_val)
-#endif
-
// Records a single event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -301,16 +258,6 @@
TRACE_EVENT_PHASE_INSTANT, category_group, name, timestamp, \
TRACE_EVENT_FLAG_NONE | scope)
-// Syntactic sugars for the sampling tracing in the main thread.
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_GET_SAMPLING_STATE() \
- TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(0)
-#define TRACE_EVENT_SET_SAMPLING_STATE(category, name) \
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(0, category, name)
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(category_and_name) \
- TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET(0, category_and_name)
-
// Records a single BEGIN event called "name" immediately, with 0, 1 or 2
// associated arguments. If the category is not enabled, then this
// does nothing.
@@ -1006,15 +953,15 @@
INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context)
// Macro to specify that two trace IDs are identical. For example,
-// TRACE_BIND_IDS(
+// TRACE_LINK_IDS(
// "category", "name",
// TRACE_ID_WITH_SCOPE("net::URLRequest", 0x1000),
// TRACE_ID_WITH_SCOPE("blink::ResourceFetcher::FetchRequest", 0x2000))
// tells the trace consumer that events with ID ("net::URLRequest", 0x1000) from
// the current process have the same ID as events with ID
// ("blink::ResourceFetcher::FetchRequest", 0x2000).
-#define TRACE_BIND_IDS(category_group, name, id, bind_id) \
- INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id);
+#define TRACE_LINK_IDS(category_group, name, id, linked_id) \
+ INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id, linked_id);
// Macro to efficiently determine if a given category group is enabled.
#define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret) \
@@ -1081,7 +1028,7 @@
#define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
-#define TRACE_EVENT_PHASE_BIND_IDS ('=')
+#define TRACE_EVENT_PHASE_LINK_IDS ('=')
// Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
#define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index cbece2b14ff..f3a03fe90d0 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -129,7 +129,9 @@ void AllocationContextTracker::PopPseudoStackFrame(
// hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
// without a corresponding TRACE_EVENT_BEGIN).
DCHECK(stack_frame == pseudo_stack_.back())
- << "Encountered an unmatched TRACE_EVENT_END";
+ << "Encountered an unmatched TRACE_EVENT_END: "
+ << stack_frame.trace_event_name
+ << " vs event in stack: " << pseudo_stack_.back().trace_event_name;
pseudo_stack_.pop_back();
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index de19ea25b86..056aa2c001d 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -27,6 +27,18 @@ const char kEclair[] = "Eclair";
const char kFroyo[] = "Froyo";
const char kGingerbread[] = "Gingerbread";
+const char kFilteringTraceConfig[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"excluded_categories\": [],"
+ " \"filter_args\": {},"
+ " \"filter_predicate\": \"heap_profiler_predicate\","
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
// Asserts that the fixed-size array |expected_backtrace| matches the backtrace
// in |AllocationContextTracker::GetContextSnapshot|.
template <size_t N>
@@ -68,15 +80,15 @@ class AllocationContextTrackerTest : public testing::Test {
AllocationContextTracker::CaptureMode::PSEUDO_STACK);
// Enabling memory-infra category sets default memory dump config which
// includes filters for capturing pseudo stack.
- TraceConfig config(MemoryDumpManager::kTraceCategory, "");
- TraceLog::GetInstance()->SetEnabled(config, TraceLog::RECORDING_MODE);
+ TraceConfig config(kFilteringTraceConfig);
+ TraceLog::GetInstance()->SetEnabled(config, TraceLog::FILTERING_MODE);
AllocationContextTracker::SetCurrentThreadName(kThreadName);
}
void TearDown() override {
AllocationContextTracker::SetCaptureMode(
AllocationContextTracker::CaptureMode::DISABLED);
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
}
};
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index 26eb92319a2..7d0cb579315 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -105,7 +105,18 @@ struct WinHeapInfo {
bool GetHeapInformation(WinHeapInfo* heap_info,
const std::set<void*>& block_to_skip) {
- CHECK(::HeapLock(heap_info->heap_id) == TRUE);
+ // NOTE: crbug.com/464430
+ // As a part of the Client/Server Runtine Subsystem (CSRSS) lockdown in the
+ // referenced bug, it will invalidate the heap used by CSRSS. The author has
+ // not found a way to clean up an invalid heap handle, so it will be left in
+ // the process's heap list. Therefore we need to support when there is this
+ // invalid heap handle in the heap list.
+ // HeapLock implicitly checks certain aspects of the HEAP structure, such as
+ // the signature. If this passes, we assume that this heap is valid and is
+ // not the one owned by CSRSS.
+ if (!::HeapLock(heap_info->heap_id)) {
+ return false;
+ }
PROCESS_HEAP_ENTRY heap_entry;
heap_entry.lpData = nullptr;
// Walk over all the entries in this heap.
@@ -161,15 +172,21 @@ void WinHeapMemoryDumpImpl(WinHeapInfo* all_heap_info) {
block_to_skip.insert(all_heaps.get());
// Retrieves some metrics about each heap.
+ size_t heap_info_errors = 0;
for (size_t i = 0; i < number_of_heaps; ++i) {
WinHeapInfo heap_info = {0};
heap_info.heap_id = all_heaps[i];
- GetHeapInformation(&heap_info, block_to_skip);
-
- all_heap_info->allocated_size += heap_info.allocated_size;
- all_heap_info->committed_size += heap_info.committed_size;
- all_heap_info->uncommitted_size += heap_info.uncommitted_size;
- all_heap_info->block_count += heap_info.block_count;
+ if (GetHeapInformation(&heap_info, block_to_skip)) {
+ all_heap_info->allocated_size += heap_info.allocated_size;
+ all_heap_info->committed_size += heap_info.committed_size;
+ all_heap_info->uncommitted_size += heap_info.uncommitted_size;
+ all_heap_info->block_count += heap_info.block_count;
+ } else {
+ ++heap_info_errors;
+ // See notes in GetHeapInformation() but we only expect 1 heap to not be
+ // able to be read.
+ CHECK_EQ(1u, heap_info_errors);
+ }
}
}
#endif // defined(OS_WIN)
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index 7d1023606bd..2e6b08ab32f 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -19,7 +19,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class ProcessMemoryDump;
class TracedValue;
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 9719a6c6720..28d5d56c59d 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -201,10 +201,33 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
nullptr);
#endif
+ TRACE_EVENT_WARMUP_CATEGORY(kTraceCategory);
+
+ // TODO(ssid): This should be done in EnableHeapProfiling so that we capture
+ // more allocations (crbug.com/625170).
+ if (AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK &&
+ !(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
+ // Create trace config with heap profiling filter.
+ TraceConfig::EventFilterConfig heap_profiler_filter_config(
+ TraceLog::TraceEventFilter::kHeapProfilerPredicate);
+ heap_profiler_filter_config.AddIncludedCategory("*");
+ heap_profiler_filter_config.AddIncludedCategory(
+ MemoryDumpManager::kTraceCategory);
+ TraceConfig::EventFilters filters;
+ filters.push_back(heap_profiler_filter_config);
+ TraceConfig filtering_trace_config;
+ filtering_trace_config.SetEventFilters(filters);
+
+ TraceLog::GetInstance()->SetEnabled(filtering_trace_config,
+ TraceLog::FILTERING_MODE);
+ }
+
// If tracing was enabled before initializing MemoryDumpManager, we missed the
// OnTraceLogEnabled() event. Synthetize it so we can late-join the party.
+ // IsEnabled is called before adding observer to avoid calling
+ // OnTraceLogEnabled twice.
bool is_tracing_already_enabled = TraceLog::GetInstance()->IsEnabled();
- TRACE_EVENT0(kTraceCategory, "init"); // Add to trace-viewer category list.
TraceLog::GetInstance()->AddEnabledStateObserver(this);
if (is_tracing_already_enabled)
OnTraceLogEnabled();
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 3f4e921a06e..3ea8ac28fb8 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -16,6 +16,7 @@
#include "base/run_loop.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
+#include "base/test/sequenced_worker_pool_owner.h"
#include "base/test/test_io_thread.h"
#include "base/test/trace_event_analyzer.h"
#include "base/threading/platform_thread.h"
@@ -162,9 +163,7 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
class TestSequencedTaskRunner : public SequencedTaskRunner {
public:
TestSequencedTaskRunner()
- : worker_pool_(new SequencedWorkerPool(2 /* max_threads */,
- "Test Task Runner",
- base::TaskPriority::USER_VISIBLE)),
+ : worker_pool_(2 /* max_threads */, "Test Task Runner"),
enabled_(true),
num_of_post_tasks_(0) {}
@@ -182,19 +181,21 @@ class TestSequencedTaskRunner : public SequencedTaskRunner {
const Closure& task,
TimeDelta delay) override {
num_of_post_tasks_++;
- if (enabled_)
- return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+ if (enabled_) {
+ return worker_pool_.pool()->PostSequencedWorkerTask(token_, from_here,
+ task);
+ }
return false;
}
bool RunsTasksOnCurrentThread() const override {
- return worker_pool_->RunsTasksOnCurrentThread();
+ return worker_pool_.pool()->RunsTasksOnCurrentThread();
}
private:
~TestSequencedTaskRunner() override {}
- scoped_refptr<SequencedWorkerPool> worker_pool_;
+ SequencedWorkerPoolOwner worker_pool_;
const SequencedWorkerPool::SequenceToken token_;
bool enabled_;
unsigned num_of_post_tasks_;
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index 37ba1c77e78..21bff690456 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -17,6 +17,7 @@ namespace {
// providers can be added here only if the background mode dump has very
// less performance and memory overhead.
const char* const kDumpProviderWhitelist[] = {
+ "android::ResourceManagerImpl",
"BlinkGC",
"ChildDiscardableSharedMemoryManager",
"DOMStorage",
@@ -49,6 +50,12 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"java_heap/allocated_objects",
"leveldb/index_db/0x?",
"leveldb/leveldb_proto/0x?",
+ "leveldb/leveldb_proto/BudgetManager/0x?",
+ "leveldb/leveldb_proto/DomDistillerStore/0x?",
+ "leveldb/leveldb_proto/GCMKeyStore/0x?",
+ "leveldb/leveldb_proto/ImageManager/0x?",
+ "leveldb/leveldb_proto/NTPSnippetImages/0x?",
+ "leveldb/leveldb_proto/NTPSnippets/0x?",
"leveldb/value_store/Extensions.Database.Open.Settings/0x?",
"leveldb/value_store/Extensions.Database.Open.Rules/0x?",
"leveldb/value_store/Extensions.Database.Open.State/0x?",
@@ -72,6 +79,7 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"skia/sk_glyph_cache",
"skia/sk_resource_cache",
"sqlite",
+ "ui/resource_manager_0x?",
"v8/isolate_0x?/heap_spaces",
"v8/isolate_0x?/heap_spaces/code_space",
"v8/isolate_0x?/heap_spaces/large_object_space",
diff --git a/chromium/base/trace_event/memory_usage_estimator.cc b/chromium/base/trace_event/memory_usage_estimator.cc
new file mode 100644
index 00000000000..c769d5b6f1e
--- /dev/null
+++ b/chromium/base/trace_event/memory_usage_estimator.cc
@@ -0,0 +1,14 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+namespace base {
+namespace trace_event {
+
+template size_t EstimateMemoryUsage(const std::string&);
+template size_t EstimateMemoryUsage(const string16&);
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/memory_usage_estimator.h b/chromium/base/trace_event/memory_usage_estimator.h
new file mode 100644
index 00000000000..c089b0ee614
--- /dev/null
+++ b/chromium/base/trace_event/memory_usage_estimator.h
@@ -0,0 +1,418 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
+
+#include <stdint.h>
+
+#include <array>
+#include <list>
+#include <map>
+#include <memory>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/strings/string16.h"
+#include "base/template_util.h"
+
+// Composable memory usage estimators.
+//
+// This file defines set of EstimateMemoryUsage(object) functions that return
+// approximate memory usage of their argument.
+//
+// The ultimate goal is to make memory usage estimation for a class simply a
+// matter of aggregating EstimateMemoryUsage() results over all fields.
+//
+// That is achieved via composability: if EstimateMemoryUsage() is defined
+// for T then EstimateMemoryUsage() is also defined for any combination of
+// containers holding T (e.g. std::map<int, std::vector<T>>).
+//
+// There are two ways of defining EstimateMemoryUsage() for a type:
+//
+// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
+// in base::trace_event namespace.
+//
+// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
+// EstimateMemoryUsage(T) function in base::trace_event namespace is
+// provided automatically.
+//
+// Here is an example implementation:
+//
+// size_t foo::bar::MyClass::EstimateMemoryUsage() const {
+// return base::trace_event::EstimateMemoryUsage(name_) +
+// base::trace_event::EstimateMemoryUsage(id_) +
+// base::trace_event::EstimateMemoryUsage(items_);
+// }
+//
+// The approach is simple: first call EstimateMemoryUsage() on all members,
+// then recursively fix compilation errors that are caused by types not
+// implementing EstimateMemoryUsage().
+
+namespace base {
+namespace trace_event {
+
+// Declarations
+
+// If T declares 'EstimateMemoryUsage() const' member function, then
+// global function EstimateMemoryUsage(T) is available, and just calls
+// the member function.
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage());
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array);
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]);
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length);
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length);
+
+// Containers
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
+
+template <class T, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
+
+// TODO(dskiba):
+// std::forward_list
+// std::deque
+// std::queue
+// std::stack
+// std::queue
+// std::priority_queue
+
+// Definitions
+
+namespace internal {
+
+// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
+// (This is the default version, which is false.)
+template <class T, class X = void>
+struct HasEMU : std::false_type {};
+
+// This HasEMU specialization is only picked up if there exists function
+// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
+// achieve this don't work on MSVC.
+template <class T>
+struct HasEMU<
+ T,
+ typename std::enable_if<std::is_same<
+ size_t,
+ decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
+ : std::true_type {};
+
+// EMUCaller<T> does three things:
+// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
+// available.
+// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
+// (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
+// method that returns 0. This is useful for containers, which allocate
+// memory regardless of T (also for cases like std::map<int, MyClass>).
+// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
+// a static_assert with a helpful message. That cuts numbers of errors
+// considerably - if you just call EstimateMemoryUsage(T) but it's not
+// available for T, then compiler will helpfully list *all* possible
+// variants of it, with an explanation for each.
+template <class T, class X = void>
+struct EMUCaller {
+ // std::is_same<> below makes static_assert depend on T, in order to
+ // prevent it from asserting regardless instantiation.
+ static_assert(std::is_same<T, std::false_type>::value,
+ "Neither global function 'size_t EstimateMemoryUsage(T)' "
+ "nor member function 'size_t T::EstimateMemoryUsage() const' "
+ "is defined for the type.");
+
+ static size_t Call(const T&) { return 0; }
+};
+
+template <class T>
+struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
+ static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
+};
+
+template <class T>
+struct EMUCaller<
+ T,
+ typename std::enable_if<!HasEMU<T>::value &&
+ is_trivially_destructible<T>::value>::type> {
+ static size_t Call(const T& value) { return 0; }
+};
+
+} // namespace internal
+
+// Proxy that deducts T and calls EMUCaller<T>.
+// To be used by EstimateMemoryUsage() implementations for containers.
+template <class T>
+size_t EstimateItemMemoryUsage(const T& value) {
+ return internal::EMUCaller<T>::Call(value);
+}
+
+template <class I>
+size_t EstimateIterableMemoryUsage(const I& iterable) {
+ size_t memory_usage = 0;
+ for (const auto& item : iterable) {
+ memory_usage += EstimateItemMemoryUsage(item);
+ }
+ return memory_usage;
+}
+
+// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
+template <class T>
+auto EstimateMemoryUsage(const T& object)
+ -> decltype(object.EstimateMemoryUsage()) {
+ static_assert(
+ std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
+ "'T::EstimateMemoryUsage() const' must return size_t.");
+ return object.EstimateMemoryUsage();
+}
+
+// String
+
+template <class C, class T, class A>
+size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
+ using string_type = std::basic_string<C, T, A>;
+ using value_type = typename string_type::value_type;
+ // C++11 doesn't leave much room for implementors - std::string can
+ // use short string optimization, but that's about it. We detect SSO
+ // by checking that c_str() points inside |string|.
+ const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
+ const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
+ if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
+ // SSO string
+ return 0;
+ }
+ return (string.capacity() + 1) * sizeof(value_type);
+}
+
+// Use explicit instantiations from the .cc file (reduces bloat).
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
+extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
+
+// Arrays
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(const std::array<T, N>& array) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T, size_t N>
+size_t EstimateMemoryUsage(T (&array)[N]) {
+ return EstimateIterableMemoryUsage(array);
+}
+
+template <class T>
+size_t EstimateMemoryUsage(const T* array, size_t array_length) {
+ size_t memory_usage = sizeof(T) * array_length;
+ for (size_t i = 0; i != array_length; ++i) {
+ memory_usage += EstimateItemMemoryUsage(array[i]);
+ }
+ return memory_usage;
+}
+
+// std::unique_ptr
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
+ return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
+}
+
+template <class T, class D>
+size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
+ size_t array_length) {
+ return EstimateMemoryUsage(array.get(), array_length);
+}
+
+// std::pair
+
+template <class F, class S>
+size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
+ return EstimateItemMemoryUsage(pair.first) +
+ EstimateItemMemoryUsage(pair.second);
+}
+
+// std::vector
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
+ return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
+}
+
+// std::list
+
+template <class T, class A>
+size_t EstimateMemoryUsage(const std::list<T, A>& list) {
+ using value_type = typename std::list<T, A>::value_type;
+ struct Node {
+ Node* prev;
+ Node* next;
+ value_type value;
+ };
+ return sizeof(Node) * list.size() +
+ EstimateIterableMemoryUsage(list);
+}
+
+// Tree containers
+
+template <class V>
+size_t EstimateTreeMemoryUsage(size_t size) {
+ // Tree containers are modeled after libc++
+ // (__tree_node from include/__tree)
+ struct Node {
+ Node* left;
+ Node* right;
+ Node* parent;
+ bool is_black;
+ V value;
+ };
+ return sizeof(Node) * size;
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
+ using value_type = typename std::set<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class T, class C, class A>
+size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
+ using value_type = typename std::multiset<T, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
+ using value_type = typename std::map<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class C, class A>
+size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
+ using value_type = typename std::multimap<K, V, C, A>::value_type;
+ return EstimateTreeMemoryUsage<value_type>(map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+// HashMap containers
+
+namespace internal {
+
+// While hashtable containers model doesn't depend on STL implementation, one
+// detail still crept in: bucket_count. It's used in size estimation, but its
+// value after inserting N items is not predictable.
+// This function is specialized by unittests to return constant value, thus
+// excluding bucket_count from testing.
+template <class V>
+size_t HashMapBucketCountForTesting(size_t bucket_count) {
+ return bucket_count;
+}
+
+} // namespace internal
+
+template <class V>
+size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
+ // Hashtable containers are modeled after libc++
+ // (__hash_node from include/__hash_table)
+ struct Node {
+ void* next;
+ size_t hash;
+ V value;
+ };
+ using Bucket = void*;
+ bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
+ return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
+ using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
+ set.size()) +
+ EstimateIterableMemoryUsage(set);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
+ using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+template <class K, class V, class H, class KE, class A>
+size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
+ using value_type =
+ typename std::unordered_multimap<K, V, H, KE, A>::value_type;
+ return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
+ map.size()) +
+ EstimateIterableMemoryUsage(map);
+}
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
diff --git a/chromium/base/trace_event/memory_usage_estimator_unittest.cc b/chromium/base/trace_event/memory_usage_estimator_unittest.cc
new file mode 100644
index 00000000000..b77d5fdc979
--- /dev/null
+++ b/chromium/base/trace_event/memory_usage_estimator_unittest.cc
@@ -0,0 +1,229 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/memory_usage_estimator.h"
+
+#include <stdlib.h>
+
+#include "base/memory/ptr_util.h"
+#include "base/strings/string16.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(ARCH_CPU_64_BITS)
+#define EXPECT_EQ_32_64(_, e, a) EXPECT_EQ(e, a)
+#else
+#define EXPECT_EQ_32_64(e, _, a) EXPECT_EQ(e, a)
+#endif
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+// Test class with predictable memory usage.
+class Data {
+ public:
+ explicit Data(size_t size = 17): size_(size) {
+ }
+
+ size_t size() const { return size_; }
+
+ size_t EstimateMemoryUsage() const {
+ return size_;
+ }
+
+ bool operator < (const Data& other) const {
+ return size_ < other.size_;
+ }
+ bool operator == (const Data& other) const {
+ return size_ == other.size_;
+ }
+
+ struct Hasher {
+ size_t operator () (const Data& data) const {
+ return data.size();
+ }
+ };
+
+ private:
+ size_t size_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This kills variance of bucket_count across STL implementations.
+template <>
+size_t HashMapBucketCountForTesting<Data>(size_t) {
+ return 10;
+}
+template <>
+size_t HashMapBucketCountForTesting<std::pair<const Data, short>>(size_t) {
+ return 10;
+}
+
+} // namespace internal
+
+TEST(EstimateMemoryUsageTest, String) {
+ std::string string(777, 'a');
+ EXPECT_EQ(string.capacity() + 1, EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, String16) {
+ string16 string(777, 'a');
+ EXPECT_EQ(sizeof(char16) * (string.capacity() + 1),
+ EstimateMemoryUsage(string));
+}
+
+TEST(EstimateMemoryUsageTest, Arrays) {
+ // std::array
+ {
+ std::array<Data, 10> array;
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // T[N]
+ {
+ Data array[10];
+ EXPECT_EQ(170u, EstimateMemoryUsage(array));
+ }
+
+ // C array
+ {
+ struct Item {
+ char payload[10];
+ };
+ Item* array = new Item[7];
+ EXPECT_EQ(70u, EstimateMemoryUsage(array, 7));
+ delete[] array;
+ }
+}
+
+TEST(EstimateMemoryUsageTest, UniquePtr) {
+ // Empty
+ {
+ std::unique_ptr<Data> ptr;
+ EXPECT_EQ(0u, EstimateMemoryUsage(ptr));
+ }
+
+ // Not empty
+ {
+ std::unique_ptr<Data> ptr(new Data());
+ EXPECT_EQ_32_64(21u, 25u, EstimateMemoryUsage(ptr));
+ }
+
+ // With a pointer
+ {
+ std::unique_ptr<Data*> ptr(new Data*());
+ EXPECT_EQ(sizeof(void*), EstimateMemoryUsage(ptr));
+ }
+
+ // With an array
+ {
+ struct Item {
+ uint32_t payload[10];
+ };
+ std::unique_ptr<Item[]> ptr(new Item[7]);
+ EXPECT_EQ(280u, EstimateMemoryUsage(ptr, 7));
+ }
+}
+
+TEST(EstimateMemoryUsageTest, Vector) {
+ std::vector<Data> vector;
+ vector.reserve(1000);
+
+ // For an empty vector we should return memory usage of its buffer
+ size_t capacity = vector.capacity();
+ size_t expected_size = capacity * sizeof(Data);
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+
+ // If vector is not empty, its size should also include memory usages
+ // of all elements.
+ for (size_t i = 0; i != capacity / 2; ++i) {
+ vector.push_back(Data(i));
+ expected_size += EstimateMemoryUsage(vector.back());
+ }
+ EXPECT_EQ(expected_size, EstimateMemoryUsage(vector));
+}
+
+TEST(EstimateMemoryUsageTest, List) {
+ struct POD {
+ short data;
+ };
+ std::list<POD> list;
+ for (int i = 0; i != 1000; ++i) {
+ list.push_back(POD());
+ }
+ EXPECT_EQ_32_64(12000u, 24000u, EstimateMemoryUsage(list));
+}
+
+TEST(EstimateMemoryUsageTest, Set) {
+ std::set<std::pair<int, Data>> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert({i, Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, MultiSet) {
+ std::multiset<bool> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert((i & 1) != 0);
+ }
+ EXPECT_EQ_32_64(16000u, 32000u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, Map) {
+ std::map<Data, int> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), i});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, MultiMap) {
+ std::multimap<char, Data> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({static_cast<char>(i), Data(i)});
+ }
+ EXPECT_EQ_32_64(523500u, 547500u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedSet) {
+ std::unordered_set<Data, Data::Hasher> set;
+ for (int i = 0; i != 1000; ++i) {
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(511540u, 523580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiSet) {
+ std::unordered_multiset<Data, Data::Hasher> set;
+ for (int i = 0; i != 500; ++i) {
+ set.insert(Data(i));
+ set.insert(Data(i));
+ }
+ EXPECT_EQ_32_64(261540u, 273580u, EstimateMemoryUsage(set));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMap) {
+ std::unordered_map<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+TEST(EstimateMemoryUsageTest, UnorderedMultiMap) {
+ std::unordered_multimap<Data, short, Data::Hasher> map;
+ for (int i = 0; i != 1000; ++i) {
+ map.insert({Data(i), static_cast<short>(i)});
+ }
+ EXPECT_EQ_32_64(515540u, 531580u, EstimateMemoryUsage(map));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/process_memory_dump.h b/chromium/base/trace_event/process_memory_dump.h
index d020c7d652f..6f8d1672733 100644
--- a/chromium/base/trace_event/process_memory_dump.h
+++ b/chromium/base/trace_event/process_memory_dump.h
@@ -31,7 +31,6 @@
namespace base {
namespace trace_event {
-class MemoryDumpManager;
class MemoryDumpSessionState;
class TracedValue;
diff --git a/chromium/base/trace_event/trace_category.h b/chromium/base/trace_event/trace_category.h
new file mode 100644
index 00000000000..5a7915ac038
--- /dev/null
+++ b/chromium/base/trace_event/trace_category.h
@@ -0,0 +1,109 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
+
+#include <stdint.h>
+
+namespace base {
+namespace trace_event {
+
+// Captures the state of an invidivual trace category. Nothing except tracing
+// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
+struct TraceCategory {
+ // The TRACE_EVENT macros should only use this value as a bool.
+ // These enum values are effectively a public API and third_party projects
+ // depend on their value. Hence, never remove or recycle existing bits, unless
+ // you are sure that all the third-party projects that depend on this have
+ // been updated.
+ enum StateFlags : uint8_t {
+ ENABLED_FOR_RECORDING = 1 << 0,
+
+ // Not used anymore.
+ DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
+ DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
+
+ ENABLED_FOR_ETW_EXPORT = 1 << 3,
+ ENABLED_FOR_FILTERING = 1 << 4
+ };
+
+ static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
+ static_assert(
+ offsetof(TraceCategory, state_) == 0,
+ "|state_| must be the first field of the TraceCategory class.");
+ return reinterpret_cast<const TraceCategory*>(state_ptr);
+ }
+
+ bool is_valid() const { return name_ != nullptr; }
+ void set_name(const char* name) { name_ = name; }
+ const char* name() const {
+ DCHECK(is_valid());
+ return name_;
+ }
+
+ // TODO(primiano): This is an intermediate solution to deal with the fact that
+ // today TRACE_EVENT* macros cache the state ptr. They should just cache the
+ // full TraceCategory ptr, which is immutable, and use these helper function
+ // here. This will get rid of the need of this awkward ptr getter completely.
+ const uint8_t* state_ptr() const {
+ return const_cast<const uint8_t*>(&state_);
+ }
+
+ uint8_t state() const {
+ return *const_cast<volatile const uint8_t*>(&state_);
+ }
+
+ bool is_enabled() const { return state() != 0; }
+
+ void set_state(uint8_t state) {
+ *const_cast<volatile uint8_t*>(&state_) = state;
+ }
+
+ void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
+ void set_state_flag(StateFlags flag) { set_state(state() | flag); }
+
+ uint32_t enabled_filters() const {
+ return *const_cast<volatile const uint32_t*>(&enabled_filters_);
+ }
+
+ bool is_filter_enabled(size_t index) const {
+ DCHECK(index < sizeof(enabled_filters_) * 8);
+ return (enabled_filters() & (1 << index)) != 0;
+ }
+
+ void set_enabled_filters(uint32_t enabled_filters) {
+ *const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
+ }
+
+ void reset_for_testing() {
+ set_state(0);
+ set_enabled_filters(0);
+ }
+
+ // These fields should not be accessed directly, not even by tracing code.
+ // The only reason why these are not private is because it makes it impossible
+ // to have a global array of TraceCategory in category_registry.cc without
+ // creating initializers. See discussion on goo.gl/qhZN94 and
+ // crbug.com/{660967,660828}.
+
+ // The enabled state. TRACE_EVENT* macros will capture events if any of the
+ // flags here are set. Since TRACE_EVENTx macros are used in a lot of
+ // fast-paths, accesses to this field are non-barriered and racy by design.
+ // This field is mutated when starting/stopping tracing and we don't care
+ // about missing some events.
+ uint8_t state_;
+
+ // When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
+ // coressponding filter (see event_filters.h).
+ uint32_t enabled_filters_;
+
+ // TraceCategory group names are long lived static strings.
+ const char* name_;
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_CATEGORY_H_
diff --git a/chromium/base/trace_event/trace_category_unittest.cc b/chromium/base/trace_event/trace_category_unittest.cc
new file mode 100644
index 00000000000..6fc9bb3dc5d
--- /dev/null
+++ b/chromium/base/trace_event/trace_category_unittest.cc
@@ -0,0 +1,137 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/thread.h"
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/trace_category.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+// Static initializers are generally forbidden. However, in the past we ran in
+// the case of some test using tracing in a static initializer. This test checks
+// That the category registry doesn't rely on static initializers itself and is
+// functional even if called from another static initializer.
+bool Initializer() {
+ return CategoryRegistry::kCategoryMetadata &&
+ CategoryRegistry::kCategoryMetadata->is_valid();
+}
+bool g_initializer_check = Initializer();
+
+class TraceCategoryTest : public testing::Test {
+ public:
+ void SetUp() override { CategoryRegistry::Initialize(); }
+
+ void TearDown() override { CategoryRegistry::ResetForTesting(); }
+
+ static bool GetOrCreateCategoryByName(const char* name, TraceCategory** cat) {
+ return CategoryRegistry::GetOrCreateCategoryByName(name, cat);
+ };
+
+ static CategoryRegistry::Range GetAllCategories() {
+ return CategoryRegistry::GetAllCategories();
+ }
+
+ static void TestRaceThreadMain(WaitableEvent* event) {
+ TraceCategory* cat = nullptr;
+ event->Wait();
+ GetOrCreateCategoryByName("__test_race", &cat);
+ EXPECT_NE(nullptr, cat);
+ }
+};
+
+TEST_F(TraceCategoryTest, Basic) {
+ ASSERT_NE(nullptr, CategoryRegistry::kCategoryMetadata);
+ ASSERT_TRUE(CategoryRegistry::kCategoryMetadata->is_valid());
+ ASSERT_FALSE(CategoryRegistry::kCategoryMetadata->is_enabled());
+
+ // Metadata category is built-in and should create a new category.
+ TraceCategory* cat_meta = nullptr;
+ const char* kMetadataName = CategoryRegistry::kCategoryMetadata->name();
+ ASSERT_FALSE(GetOrCreateCategoryByName(kMetadataName, &cat_meta));
+ ASSERT_EQ(CategoryRegistry::kCategoryMetadata, cat_meta);
+
+ TraceCategory* cat_1 = nullptr;
+ ASSERT_TRUE(GetOrCreateCategoryByName("__test_ab", &cat_1));
+ ASSERT_FALSE(cat_1->is_enabled());
+ ASSERT_EQ(0u, cat_1->enabled_filters());
+ cat_1->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
+ cat_1->set_state_flag(TraceCategory::ENABLED_FOR_FILTERING);
+ ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING |
+ TraceCategory::ENABLED_FOR_FILTERING,
+ cat_1->state());
+
+ cat_1->set_enabled_filters(129);
+ ASSERT_EQ(129u, cat_1->enabled_filters());
+ ASSERT_EQ(cat_1, CategoryRegistry::GetCategoryByStatePtr(cat_1->state_ptr()));
+
+ cat_1->clear_state_flag(TraceCategory::ENABLED_FOR_FILTERING);
+ ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING, cat_1->state());
+ ASSERT_EQ(TraceCategory::ENABLED_FOR_RECORDING, *cat_1->state_ptr());
+ ASSERT_TRUE(cat_1->is_enabled());
+
+ TraceCategory* cat_2 = nullptr;
+ ASSERT_TRUE(GetOrCreateCategoryByName("__test_a", &cat_2));
+ ASSERT_FALSE(cat_2->is_enabled());
+ cat_2->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
+
+ TraceCategory* cat_2_copy = nullptr;
+ ASSERT_FALSE(GetOrCreateCategoryByName("__test_a", &cat_2_copy));
+ ASSERT_EQ(cat_2, cat_2_copy);
+
+ TraceCategory* cat_3 = nullptr;
+ ASSERT_TRUE(GetOrCreateCategoryByName("__test_ab,__test_a", &cat_3));
+ ASSERT_FALSE(cat_3->is_enabled());
+ ASSERT_EQ(0u, cat_3->enabled_filters());
+
+ int num_test_categories_seen = 0;
+ for (const TraceCategory& cat : GetAllCategories()) {
+ if (strcmp(cat.name(), kMetadataName) == 0)
+ ASSERT_TRUE(CategoryRegistry::IsBuiltinCategory(&cat));
+
+ if (strncmp(cat.name(), "__test", 6) == 0) {
+ ASSERT_FALSE(CategoryRegistry::IsBuiltinCategory(&cat));
+ num_test_categories_seen++;
+ }
+ }
+ ASSERT_EQ(3, num_test_categories_seen);
+ ASSERT_TRUE(g_initializer_check);
+}
+
+// Tries to cover the case of multiple threads creating the same category
+// simultaeously. Should never end up with distinct entries with the same name.
+TEST_F(TraceCategoryTest, ThreadRaces) {
+ const int kNumThreads = 32;
+ std::unique_ptr<Thread> threads[kNumThreads];
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i].reset(new Thread("test thread"));
+ threads[i]->Start();
+ }
+ WaitableEvent sync_event(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ for (int i = 0; i < kNumThreads; i++) {
+ threads[i]->task_runner()->PostTask(
+ FROM_HERE, Bind(&TestRaceThreadMain, Unretained(&sync_event)));
+ }
+ sync_event.Signal();
+ for (int i = 0; i < kNumThreads; i++)
+ threads[i]->Stop();
+
+ int num_times_seen = 0;
+ for (const TraceCategory& cat : GetAllCategories()) {
+ if (strcmp(cat.name(), "__test_race") == 0)
+ num_times_seen++;
+ }
+ ASSERT_EQ(1, num_times_seen);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 96c4dc70646..9a17adb969a 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -30,13 +30,11 @@ const char kRecordUntilFull[] = "record-until-full";
const char kRecordContinuously[] = "record-continuously";
const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
const char kTraceToConsole[] = "trace-to-console";
-const char kEnableSampling[] = "enable-sampling";
const char kEnableSystrace[] = "enable-systrace";
const char kEnableArgumentFilter[] = "enable-argument-filter";
// String parameters that can be used to parse the trace config string.
const char kRecordModeParam[] = "record_mode";
-const char kEnableSamplingParam[] = "enable_sampling";
const char kEnableSystraceParam[] = "enable_systrace";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
const char kIncludedCategoriesParam[] = "included_categories";
@@ -121,6 +119,17 @@ void TraceConfig::MemoryDumpConfig::Clear() {
heap_profiler_options.Clear();
}
+void TraceConfig::MemoryDumpConfig::Merge(
+ const TraceConfig::MemoryDumpConfig& config) {
+ triggers.insert(triggers.end(), config.triggers.begin(),
+ config.triggers.end());
+ allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
+ config.allowed_dump_modes.end());
+ heap_profiler_options.breakdown_threshold_bytes =
+ std::min(heap_profiler_options.breakdown_threshold_bytes,
+ config.heap_profiler_options.breakdown_threshold_bytes);
+}
+
TraceConfig::EventFilterConfig::EventFilterConfig(
const std::string& predicate_name)
: predicate_name_(predicate_name) {}
@@ -228,7 +237,6 @@ TraceConfig::TraceConfig(StringPiece config_string) {
TraceConfig::TraceConfig(const TraceConfig& tc)
: record_mode_(tc.record_mode_),
- enable_sampling_(tc.enable_sampling_),
enable_systrace_(tc.enable_systrace_),
enable_argument_filter_(tc.enable_argument_filter_),
memory_dump_config_(tc.memory_dump_config_),
@@ -246,7 +254,6 @@ TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
return *this;
record_mode_ = rhs.record_mode_;
- enable_sampling_ = rhs.enable_sampling_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
memory_dump_config_ = rhs.memory_dump_config_;
@@ -342,7 +349,6 @@ bool TraceConfig::IsCategoryGroupEnabled(
void TraceConfig::Merge(const TraceConfig& config) {
if (record_mode_ != config.record_mode_
- || enable_sampling_ != config.enable_sampling_
|| enable_systrace_ != config.enable_systrace_
|| enable_argument_filter_ != config.enable_argument_filter_) {
DLOG(ERROR) << "Attempting to merge trace config with a different "
@@ -360,9 +366,7 @@ void TraceConfig::Merge(const TraceConfig& config) {
included_categories_.clear();
}
- memory_dump_config_.triggers.insert(memory_dump_config_.triggers.end(),
- config.memory_dump_config_.triggers.begin(),
- config.memory_dump_config_.triggers.end());
+ memory_dump_config_.Merge(config.memory_dump_config_);
disabled_categories_.insert(disabled_categories_.end(),
config.disabled_categories_.begin(),
@@ -373,11 +377,12 @@ void TraceConfig::Merge(const TraceConfig& config) {
synthetic_delays_.insert(synthetic_delays_.end(),
config.synthetic_delays_.begin(),
config.synthetic_delays_.end());
+ event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
+ config.event_filters().end());
}
void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
included_categories_.clear();
@@ -390,7 +395,6 @@ void TraceConfig::Clear() {
void TraceConfig::InitializeDefault() {
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
}
@@ -411,7 +415,6 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
}
bool val;
- enable_sampling_ = dict.GetBoolean(kEnableSamplingParam, &val) ? val : false;
enable_systrace_ = dict.GetBoolean(kEnableSystraceParam, &val) ? val : false;
enable_argument_filter_ =
dict.GetBoolean(kEnableArgumentFilterParam, &val) ? val : false;
@@ -424,6 +427,10 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
if (dict.GetList(kSyntheticDelaysParam, &category_list))
SetSyntheticDelaysFromList(*category_list);
+ const base::ListValue* category_event_filters = nullptr;
+ if (dict.GetList(kEventFiltersParam, &category_event_filters))
+ SetEventFiltersFromConfigList(*category_event_filters);
+
if (IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
@@ -433,10 +440,6 @@ void TraceConfig::InitializeFromConfigDict(const DictionaryValue& dict) {
else
SetDefaultMemoryDumpConfig();
}
-
- const base::ListValue* category_event_filters = nullptr;
- if (dict.GetList(kEventFiltersParam, &category_event_filters))
- SetEventFilters(*category_event_filters);
}
void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
@@ -482,7 +485,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
}
record_mode_ = RECORD_UNTIL_FULL;
- enable_sampling_ = false;
enable_systrace_ = false;
enable_argument_filter_ = false;
if (!trace_options_string.empty()) {
@@ -497,8 +499,6 @@ void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
record_mode_ = ECHO_TO_CONSOLE;
} else if (token == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
- } else if (token == kEnableSampling) {
- enable_sampling_ = true;
} else if (token == kEnableSystrace) {
enable_systrace_ = true;
} else if (token == kEnableArgumentFilter) {
@@ -629,25 +629,9 @@ void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.triggers.push_back(kDefaultHeavyMemoryDumpTrigger);
memory_dump_config_.triggers.push_back(kDefaultLightMemoryDumpTrigger);
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
-
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- for (const auto& filter : event_filters_) {
- if (filter.predicate_name() ==
- TraceLog::TraceEventFilter::kHeapProfilerPredicate)
- return;
- }
- // Adds a filter predicate to filter all categories for the heap profiler.
- // Note that the heap profiler predicate does not filter-out any events.
- EventFilterConfig heap_profiler_config(
- TraceLog::TraceEventFilter::kHeapProfilerPredicate);
- heap_profiler_config.AddIncludedCategory("*");
- heap_profiler_config.AddIncludedCategory(MemoryDumpManager::kTraceCategory);
- event_filters_.push_back(heap_profiler_config);
- }
}
-void TraceConfig::SetEventFilters(
+void TraceConfig::SetEventFiltersFromConfigList(
const base::ListValue& category_event_filters) {
event_filters_.clear();
@@ -710,7 +694,6 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
NOTREACHED();
}
- dict->SetBoolean(kEnableSamplingParam, enable_sampling_);
dict->SetBoolean(kEnableSystraceParam, enable_systrace_);
dict->SetBoolean(kEnableArgumentFilterParam, enable_argument_filter_);
@@ -810,8 +793,6 @@ std::string TraceConfig::ToTraceOptionsString() const {
default:
NOTREACHED();
}
- if (enable_sampling_)
- ret = ret + "," + kEnableSampling;
if (enable_systrace_)
ret = ret + "," + kEnableSystrace;
if (enable_argument_filter_)
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index a5f8315bca6..c10ed47f5be 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -71,6 +71,8 @@ class BASE_EXPORT TraceConfig {
// Reset the values in the config.
void Clear();
+ void Merge(const MemoryDumpConfig& config);
+
// Set of memory dump modes allowed for the tracing session. The explicitly
// triggered dumps will be successful only if the dump mode is allowed in
// the config.
@@ -125,22 +127,22 @@ class BASE_EXPORT TraceConfig {
//
// |trace_options_string| is a comma-delimited list of trace options.
// Possible options are: "record-until-full", "record-continuously",
- // "record-as-much-as-possible", "trace-to-console", "enable-sampling",
- // "enable-systrace" and "enable-argument-filter".
+ // "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
+ // "enable-argument-filter".
// The first 4 options are trace recoding modes and hence
// mutually exclusive. If more than one trace recording modes appear in the
// options_string, the last one takes precedence. If none of the trace
// recording mode is specified, recording mode is RECORD_UNTIL_FULL.
//
// The trace option will first be reset to the default option
- // (record_mode set to RECORD_UNTIL_FULL, enable_sampling, enable_systrace,
- // and enable_argument_filter set to false) before options parsed from
+ // (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
+ // enable_argument_filter set to false) before options parsed from
// |trace_options_string| are applied on it. If |trace_options_string| is
// invalid, the final state of trace options is undefined.
//
// Example: TraceConfig("test_MyTest*", "record-until-full");
// Example: TraceConfig("test_MyTest*,test_OtherStuff",
- // "record-continuously, enable-sampling");
+ // "record-continuously");
// Example: TraceConfig("-excluded_category1,-excluded_category2",
// "record-until-full, trace-to-console");
// would set ECHO_TO_CONSOLE as the recording mode.
@@ -170,7 +172,6 @@ class BASE_EXPORT TraceConfig {
// Example:
// {
// "record_mode": "record-continuously",
- // "enable_sampling": true,
// "enable_systrace": true,
// "enable_argument_filter": true,
// "included_categories": ["included",
@@ -206,12 +207,10 @@ class BASE_EXPORT TraceConfig {
const StringList& GetSyntheticDelayValues() const;
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
- bool IsSamplingEnabled() const { return enable_sampling_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
- void EnableSampling() { enable_sampling_ = true; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableArgumentFilter() { enable_argument_filter_ = true; }
@@ -243,6 +242,9 @@ class BASE_EXPORT TraceConfig {
}
const EventFilters& event_filters() const { return event_filters_; }
+ void SetEventFilters(const EventFilters& filter_configs) {
+ event_filters_ = filter_configs;
+ }
private:
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
@@ -284,7 +286,7 @@ class BASE_EXPORT TraceConfig {
const DictionaryValue& memory_dump_config);
void SetDefaultMemoryDumpConfig();
- void SetEventFilters(const base::ListValue& event_filters);
+ void SetEventFiltersFromConfigList(const base::ListValue& event_filters);
std::unique_ptr<DictionaryValue> ToDict() const;
std::string ToTraceOptionsString() const;
@@ -306,7 +308,6 @@ class BASE_EXPORT TraceConfig {
bool HasIncludedPatterns() const;
TraceRecordMode record_mode_;
- bool enable_sampling_ : 1;
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
index 6b47f8dc550..0ecdb3c15ca 100644
--- a/chromium/base/trace_event/trace_config_memory_test_util.h
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -18,7 +18,6 @@ class TraceConfigMemoryTestUtil {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"included_categories\":["
"\"%s\""
@@ -47,7 +46,6 @@ class TraceConfigMemoryTestUtil {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"included_categories\":["
"\"%s\""
@@ -65,7 +63,6 @@ class TraceConfigMemoryTestUtil {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"included_categories\":["
"\"%s\""
@@ -78,7 +75,6 @@ class TraceConfigMemoryTestUtil {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"included_categories\":["
"\"%s\""
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index fa4fa7c3e05..c04029909cc 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -20,7 +20,6 @@ namespace {
const char kDefaultTraceConfigString[] =
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}";
@@ -28,7 +27,6 @@ const char kDefaultTraceConfigString[] =
const char kCustomTraceConfigString[] =
"{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
"\"event_filters\":["
"{"
@@ -59,7 +57,6 @@ const char kCustomTraceConfigString[] =
void CheckDefaultTraceConfigBehavior(const TraceConfig& tc) {
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
@@ -81,44 +78,31 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From trace options strings
TraceConfig config("", "record-until-full");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", "record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
config.ToTraceOptionsString().c_str());
- config = TraceConfig("", "record-until-full, enable-sampling");
- EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
- EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsArgumentFilterEnabled());
- EXPECT_STREQ("record-until-full,enable-sampling",
- config.ToTraceOptionsString().c_str());
-
config = TraceConfig("", "enable-systrace, record-continuously");
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously,enable-systrace",
@@ -126,7 +110,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig("", "enable-argument-filter,record-as-much-as-possible");
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible,enable-argument-filter",
@@ -134,19 +117,17 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
config = TraceConfig(
"",
- "enable-systrace,trace-to-console,enable-sampling,enable-argument-filter");
+ "enable-systrace,trace-to-console,enable-argument-filter");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_TRUE(config.IsArgumentFilterEnabled());
EXPECT_STREQ(
- "trace-to-console,enable-sampling,enable-systrace,enable-argument-filter",
+ "trace-to-console,enable-systrace,enable-argument-filter",
config.ToTraceOptionsString().c_str());
config = TraceConfig(
"", "record-continuously, record-until-full, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
@@ -154,28 +135,24 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From TraceRecordMode
config = TraceConfig("", RECORD_UNTIL_FULL);
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-continuously", config.ToTraceOptionsString().c_str());
config = TraceConfig("", ECHO_TO_CONSOLE);
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("trace-to-console", config.ToTraceOptionsString().c_str());
config = TraceConfig("", RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("record-as-much-as-possible",
@@ -207,33 +184,30 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
// From both trace options and category filter strings
config = TraceConfig("", "");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
EXPECT_STREQ("record-until-full", config.ToTraceOptionsString().c_str());
config = TraceConfig("included,-excluded,inc_pattern*,-exc_pattern*",
- "enable-systrace, trace-to-console, enable-sampling");
+ "enable-systrace, trace-to-console");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From both trace options and category filter strings with spaces.
config = TraceConfig(" included , -excluded, inc_pattern*, ,-exc_pattern* ",
- "enable-systrace, ,trace-to-console, enable-sampling ");
+ "enable-systrace, ,trace-to-console ");
EXPECT_EQ(ECHO_TO_CONSOLE, config.GetTraceRecordMode());
- EXPECT_TRUE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
- EXPECT_STREQ("trace-to-console,enable-sampling,enable-systrace",
+ EXPECT_STREQ("trace-to-console,enable-systrace",
config.ToTraceOptionsString().c_str());
// From category filter string and TraceRecordMode
@@ -241,7 +215,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
RECORD_CONTINUOUSLY);
EXPECT_EQ(RECORD_CONTINUOUSLY, config.GetTraceRecordMode());
EXPECT_FALSE(config.IsSystraceEnabled());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,-excluded,-exc_pattern*",
config.ToCategoryFilterString().c_str());
@@ -251,7 +224,6 @@ TEST(TraceConfigTest, TraceConfigFromValidLegacyFormat) {
TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
TraceConfig config("", "foo-bar-baz");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_FALSE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("", config.ToCategoryFilterString().c_str());
@@ -259,7 +231,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidLegacyStrings) {
config = TraceConfig("arbitrary-category", "foo-bar-baz, enable-systrace");
EXPECT_EQ(RECORD_UNTIL_FULL, config.GetTraceRecordMode());
- EXPECT_FALSE(config.IsSamplingEnabled());
EXPECT_TRUE(config.IsSystraceEnabled());
EXPECT_FALSE(config.IsArgumentFilterEnabled());
EXPECT_STREQ("arbitrary-category", config.ToCategoryFilterString().c_str());
@@ -356,7 +327,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig tc(dict);
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -370,7 +340,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig default_tc(*default_dict);
EXPECT_STREQ(kDefaultTraceConfigString, default_tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, default_tc.GetTraceRecordMode());
- EXPECT_FALSE(default_tc.IsSamplingEnabled());
EXPECT_FALSE(default_tc.IsSystraceEnabled());
EXPECT_FALSE(default_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", default_tc.ToCategoryFilterString().c_str());
@@ -384,7 +353,6 @@ TEST(TraceConfigTest, TraceConfigFromDict) {
TraceConfig custom_tc(*custom_dict);
EXPECT_STREQ(kCustomTraceConfigString, custom_tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, custom_tc.GetTraceRecordMode());
- EXPECT_TRUE(custom_tc.IsSamplingEnabled());
EXPECT_TRUE(custom_tc.IsSystraceEnabled());
EXPECT_TRUE(custom_tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,"
@@ -399,7 +367,6 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
const char config_string[] =
"{"
"\"enable_argument_filter\":true,"
- "\"enable_sampling\":true,"
"\"enable_systrace\":true,"
"\"event_filters\":["
"{"
@@ -420,7 +387,6 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ(config_string, tc.ToString().c_str());
EXPECT_EQ(RECORD_CONTINUOUSLY, tc.GetTraceRecordMode());
- EXPECT_TRUE(tc.IsSamplingEnabled());
EXPECT_TRUE(tc.IsSystraceEnabled());
EXPECT_TRUE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("included,inc_pattern*,disabled-by-default-cc,-excluded,"
@@ -479,7 +445,6 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
EXPECT_STREQ(tc.ToString().c_str(),
"{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"record_mode\":\"record-until-full\""
"}");
@@ -491,7 +456,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
TraceConfig tc("");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -500,7 +464,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("This is an invalid config string.");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -509,7 +472,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("[\"This\", \"is\", \"not\", \"a\", \"dictionary\"]");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -518,7 +480,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"record_mode\": invalid-value-needs-double-quote}");
EXPECT_STREQ(kDefaultTraceConfigString, tc.ToString().c_str());
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -528,7 +489,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
// initialize TraceConfig with best effort.
tc = TraceConfig("{}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -536,7 +496,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
tc = TraceConfig("{\"arbitrary-key\":\"arbitrary-value\"}");
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("", tc.ToCategoryFilterString().c_str());
@@ -544,7 +503,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
const char invalid_config_string[] =
"{"
- "\"enable_sampling\":\"true\","
"\"enable_systrace\":1,"
"\"excluded_categories\":[\"excluded\"],"
"\"included_categories\":\"not a list\","
@@ -555,7 +513,6 @@ TEST(TraceConfigTest, TraceConfigFromInvalidString) {
"}";
tc = TraceConfig(invalid_config_string);
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
EXPECT_FALSE(tc.IsArgumentFilterEnabled());
EXPECT_STREQ("-excluded,DELAY(test.Delay1;16),DELAY(test.Delay2;32)",
@@ -580,7 +537,6 @@ TEST(TraceConfigTest, MergingTraceConfigs) {
tc.Merge(tc2);
EXPECT_STREQ("{"
"\"enable_argument_filter\":false,"
- "\"enable_sampling\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
"\"record_mode\":\"record-until-full\""
@@ -647,15 +603,11 @@ TEST(TraceConfigTest, IsEmptyOrContainsLeadingOrTrailingWhitespace) {
TEST(TraceConfigTest, SetTraceOptionValues) {
TraceConfig tc;
EXPECT_EQ(RECORD_UNTIL_FULL, tc.GetTraceRecordMode());
- EXPECT_FALSE(tc.IsSamplingEnabled());
EXPECT_FALSE(tc.IsSystraceEnabled());
tc.SetTraceRecordMode(RECORD_AS_MUCH_AS_POSSIBLE);
EXPECT_EQ(RECORD_AS_MUCH_AS_POSSIBLE, tc.GetTraceRecordMode());
- tc.EnableSampling();
- EXPECT_TRUE(tc.IsSamplingEnabled());
-
tc.EnableSystrace();
EXPECT_TRUE(tc.IsSystraceEnabled());
}
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index 4de64837161..0299ddd0cb7 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -19,6 +19,7 @@
#include "base/time/time.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
+#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_event_system_stats_monitor.h"
#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
@@ -49,54 +50,19 @@
#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
-// Sets the current sample state to the given category and name (both must be
-// constant strings). These states are intended for a sampling profiler.
-// Implementation note: we store category and name together because we don't
-// want the inconsistency/expense of storing two pointers.
-// |thread_bucket| is [0..2] and is used to statically isolate samples in one
-// thread from others.
-#define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal:: \
- TraceEventSamplingStateScope<bucket_number>::Set(category "\0" name)
-
-// Returns a current sampling state of the given bucket.
-#define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number>::Current()
-
-// Sets a current sampling state of the given bucket.
-// |category_and_name| doesn't need to be a constant string.
-// The format of the string is "category\0name".
-#define TRACE_EVENT_SET_NONCONST_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category_and_name) \
- trace_event_internal:: \
- TraceEventSamplingStateScope<bucket_number>::Set(category_and_name)
-
-// Creates a scope of a sampling state of the given bucket.
-//
-// { // The sampling state is set within this scope.
-// TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name");
-// ...;
-// }
-#define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET( \
- bucket_number, category, name) \
- trace_event_internal::TraceEventSamplingStateScope<bucket_number> \
- traceEventSamplingScope(category "\0" name);
-
#define TRACE_EVENT_API_CURRENT_THREAD_ID \
static_cast<int>(base::PlatformThread::CurrentId())
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
- (base::trace_event::TraceLog::ENABLED_FOR_RECORDING | \
- base::trace_event::TraceLog::ENABLED_FOR_EVENT_CALLBACK | \
- base::trace_event::TraceLog::ENABLED_FOR_ETW_EXPORT | \
- base::trace_event::TraceLog::ENABLED_FOR_FILTERING))
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
+ base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_FILTERING_MODE( \
category_group_enabled) \
UNLIKELY(category_group_enabled& \
- base::trace_event::TraceLog::ENABLED_FOR_FILTERING)
+ base::trace_event::TraceCategory::ENABLED_FOR_FILTERING)
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@@ -232,13 +198,6 @@
// Defines visibility for classes in trace_event.h
#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
-// The thread buckets for the sampling profiler.
-TRACE_EVENT_API_CLASS_EXPORT extern \
- TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
-#define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \
- g_trace_state[thread_bucket]
-
////////////////////////////////////////////////////////////////////////////////
// Implementation detail: trace event macros create temporary variables
@@ -376,29 +335,20 @@ TRACE_EVENT_API_CLASS_EXPORT extern \
} \
} while (0)
-// This macro ignores whether the bind_id is local, global, or mangled.
-#define INTERNAL_TRACE_EVENT_ADD_BIND_IDS(category_group, name, id, bind_id, \
- ...) \
+// The linked ID will not be mangled.
+#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::TraceID source_id((id)); \
+ trace_event_internal::TraceID source_id((id1)); \
unsigned int source_flags = source_id.id_flags(); \
- trace_event_internal::TraceID target_id((bind_id)); \
- if (target_id.scope() == trace_event_internal::kGlobalScope) { \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_BIND_IDS, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, source_id.scope(), source_id.raw_id(), \
- source_flags, target_id.raw_id(), ##__VA_ARGS__); \
- } else { \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_BIND_IDS, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, source_id.scope(), source_id.raw_id(), \
- source_flags, target_id.raw_id(), \
- "bind_scope", target_id.scope(), ##__VA_ARGS__); \
- } \
+ trace_event_internal::TraceID target_id((id2)); \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_LINK_IDS, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
+ name, source_id.scope(), source_id.raw_id(), source_flags, \
+ trace_event_internal::kNoId, \
+ "linked_id", target_id.AsConvertableToTraceFormat()); \
} \
} while (0)
@@ -456,7 +406,7 @@ const unsigned long long kNoId = 0;
// TraceID encapsulates an ID that can either be an integer or pointer. Pointers
// are by default mangled with the Process ID so that they are unlikely to
// collide when the same pointer is used on different processes.
-class TraceID {
+class BASE_EXPORT TraceID {
public:
// Can be combined with WithScope.
class LocalId {
@@ -582,6 +532,9 @@ class TraceID {
const char* scope() const { return scope_; }
unsigned int id_flags() const { return id_flags_; }
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ AsConvertableToTraceFormat() const;
+
private:
const char* scope_ = nullptr;
unsigned long long raw_id_;
@@ -1100,37 +1053,6 @@ class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
trace_event_internal::ScopedTraceBinaryEfficient \
INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
-// TraceEventSamplingStateScope records the current sampling state
-// and sets a new sampling state. When the scope exists, it restores
-// the sampling state having recorded.
-template<size_t BucketNumber>
-class TraceEventSamplingStateScope {
- public:
- TraceEventSamplingStateScope(const char* category_and_name) {
- previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current();
- TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name);
- }
-
- ~TraceEventSamplingStateScope() {
- TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_);
- }
-
- static inline const char* Current() {
- return reinterpret_cast<const char*>(TRACE_EVENT_API_ATOMIC_LOAD(
- g_trace_state[BucketNumber]));
- }
-
- static inline void Set(const char* category_and_name) {
- TRACE_EVENT_API_ATOMIC_STORE(
- g_trace_state[BucketNumber],
- reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(
- const_cast<char*>(category_and_name)));
- }
-
- private:
- const char* previous_state_;
-};
-
} // namespace trace_event_internal
namespace base {
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index d41500dc06f..f9792d0d6df 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -8,6 +8,7 @@
#include "base/format_macros.h"
#include "base/json/string_escape.h"
+#include "base/memory/ptr_util.h"
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
@@ -15,6 +16,7 @@
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_log.h"
namespace base {
@@ -391,8 +393,7 @@ void TraceEvent::AppendAsJSON(
StringAppendF(out, ",\"bp\":\"e\"");
if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
- (flags_ & TRACE_EVENT_FLAG_FLOW_IN) ||
- phase_ == TRACE_EVENT_PHASE_BIND_IDS) {
+ (flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
static_cast<uint64_t>(bind_id_));
}
@@ -448,3 +449,40 @@ void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
} // namespace trace_event
} // namespace base
+
+namespace trace_event_internal {
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TraceID::AsConvertableToTraceFormat() const {
+ auto value = base::MakeUnique<base::trace_event::TracedValue>();
+
+ if (scope_ != kGlobalScope)
+ value->SetString("scope", scope_);
+ switch (id_flags_) {
+ case TRACE_EVENT_FLAG_HAS_ID:
+ value->SetString(
+ "id",
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ break;
+ case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
+ value->BeginDictionary("id2");
+ value->SetString(
+ "global",
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ value->EndDictionary();
+ break;
+ case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
+ value->BeginDictionary("id2");
+ value->SetString(
+ "local",
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ value->EndDictionary();
+ break;
+ default:
+ NOTREACHED() << "Unrecognized ID flag";
+ }
+
+ return std::move(value);
+}
+
+} // namespace trace_event_internal
diff --git a/chromium/base/trace_event/trace_event_impl.h b/chromium/base/trace_event/trace_event_impl.h
index 0b23d31f46e..5eef702fb90 100644
--- a/chromium/base/trace_event/trace_event_impl.h
+++ b/chromium/base/trace_event/trace_event_impl.h
@@ -28,10 +28,6 @@
#include "build/build_config.h"
namespace base {
-
-class WaitableEvent;
-class MessageLoop;
-
namespace trace_event {
typedef base::Callback<bool(const char* arg_name)> ArgumentNameFilterPredicate;
diff --git a/chromium/base/trace_event/trace_event_synthetic_delay.h b/chromium/base/trace_event/trace_event_synthetic_delay.h
index 59e2842f71a..e86f9eee2c6 100644
--- a/chromium/base/trace_event/trace_event_synthetic_delay.h
+++ b/chromium/base/trace_event/trace_event_synthetic_delay.h
@@ -62,9 +62,6 @@
trace_event_internal::GetOrCreateDelay(name, &impl_ptr)->End(); \
} while (false)
-template <typename Type>
-struct DefaultSingletonTraits;
-
namespace base {
namespace trace_event {
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 92fc7bc80dc..81d043ac394 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -31,6 +31,7 @@
#include "base/threading/thread.h"
#include "base/time/time.h"
#include "base/trace_event/trace_buffer.h"
+#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -68,9 +69,6 @@ class TraceEventTestFixture : public testing::Test {
WaitableEvent* flush_complete_event,
const scoped_refptr<base::RefCountedString>& events_str,
bool has_more_events);
- void OnWatchEventMatched() {
- ++event_watch_notification_;
- }
DictionaryValue* FindMatchingTraceEntry(const JsonKeyValue* key_values);
DictionaryValue* FindNamePhase(const char* name, const char* phase);
DictionaryValue* FindNamePhaseKeyValue(const char* name,
@@ -92,7 +90,6 @@ class TraceEventTestFixture : public testing::Test {
}
void BeginSpecificTrace(const std::string& filter) {
- event_watch_notification_ = 0;
TraceLog::GetInstance()->SetEnabled(TraceConfig(filter, ""),
TraceLog::RECORDING_MODE);
}
@@ -136,7 +133,8 @@ class TraceEventTestFixture : public testing::Test {
}
void EndTraceAndFlushAsync(WaitableEvent* flush_complete_event) {
- TraceLog::GetInstance()->SetDisabled();
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE |
+ TraceLog::FILTERING_MODE);
TraceLog::GetInstance()->Flush(
base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
base::Unretained(static_cast<TraceEventTestFixture*>(this)),
@@ -152,7 +150,6 @@ class TraceEventTestFixture : public testing::Test {
ASSERT_TRUE(tracelog);
ASSERT_FALSE(tracelog->IsEnabled());
trace_buffer_.SetOutputCallback(json_output_.GetCallback());
- event_watch_notification_ = 0;
num_flush_callbacks_ = 0;
}
void TearDown() override {
@@ -169,7 +166,6 @@ class TraceEventTestFixture : public testing::Test {
ListValue trace_parsed_;
TraceResultBuffer trace_buffer_;
TraceResultBuffer::SimpleOutput json_output_;
- int event_watch_notification_;
size_t num_flush_callbacks_;
private:
@@ -520,10 +516,14 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
context_id);
- TRACE_BIND_IDS("all", "TRACE_BIND_IDS simple call", 0x1000, 0x2000);
- TRACE_BIND_IDS("all", "TRACE_BIND_IDS scoped call",
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS simple call", 0x1000, 0x2000);
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS scoped call",
TRACE_ID_WITH_SCOPE("scope 1", 0x1000),
TRACE_ID_WITH_SCOPE("scope 2", 0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a local ID", 0x1000,
+ TRACE_ID_LOCAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
+ TRACE_ID_GLOBAL(0x2000));
TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
@@ -972,42 +972,76 @@ void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
EXPECT_EQ("0x20151021", id);
}
- EXPECT_FIND_("TRACE_BIND_IDS simple call");
+ EXPECT_FIND_("TRACE_LINK_IDS simple call");
{
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("=", ph);
EXPECT_FALSE((item && item->HasKey("scope")));
- std::string id;
- EXPECT_TRUE((item && item->GetString("id", &id)));
- EXPECT_EQ("0x1000", id);
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
- EXPECT_FALSE((item && item->HasKey("args.bind_scope")));
- std::string bind_id;
- EXPECT_TRUE((item && item->GetString("bind_id", &id)));
- EXPECT_EQ("0x2000", id);
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
}
- EXPECT_FIND_("TRACE_BIND_IDS scoped call");
+ EXPECT_FIND_("TRACE_LINK_IDS scoped call");
{
std::string ph;
EXPECT_TRUE((item && item->GetString("ph", &ph)));
EXPECT_EQ("=", ph);
- std::string id_scope;
- EXPECT_TRUE((item && item->GetString("scope", &id_scope)));
- EXPECT_EQ("scope 1", id_scope);
- std::string id;
- EXPECT_TRUE((item && item->GetString("id", &id)));
- EXPECT_EQ("0x1000", id);
+ std::string scope1;
+ EXPECT_TRUE((item && item->GetString("scope", &scope1)));
+ EXPECT_EQ("scope 1", scope1);
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
- std::string bind_scope;
- EXPECT_TRUE((item && item->GetString("args.bind_scope", &bind_scope)));
- EXPECT_EQ("scope 2", bind_scope);
- std::string bind_id;
- EXPECT_TRUE((item && item->GetString("bind_id", &id)));
- EXPECT_EQ("0x2000", id);
+ std::string scope2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.scope", &scope2)));
+ EXPECT_EQ("scope 2", scope2);
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a local ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.local", &id2)));
+ EXPECT_EQ("0x2000", id2);
+ }
+
+ EXPECT_FIND_("TRACE_LINK_IDS to a global ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE((item && item->HasKey("scope")));
+ std::string id1;
+ EXPECT_TRUE((item && item->GetString("id", &id1)));
+ EXPECT_EQ("0x1000", id1);
+
+ EXPECT_FALSE((item && item->HasKey("args.linked_id.scope")));
+ std::string id2;
+ EXPECT_TRUE((item && item->GetString("args.linked_id.id2.global", &id2)));
+ EXPECT_EQ("0x2000", id2);
}
EXPECT_FIND_("async default process scope");
@@ -1529,59 +1563,6 @@ TEST_F(TraceEventTestFixture, Categories) {
}
-// Test EVENT_WATCH_NOTIFICATION
-TEST_F(TraceEventTestFixture, EventWatchNotification) {
- // Basic one occurrence.
- BeginTrace();
- TraceLog::WatchEventCallback callback =
- base::Bind(&TraceEventTestFixture::OnWatchEventMatched,
- base::Unretained(this));
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 1);
-
- // Auto-reset after end trace.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- EndTraceAndFlush();
- BeginTrace();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Multiple occurrence.
- BeginTrace();
- int num_occurrences = 5;
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- for (int i = 0; i < num_occurrences; ++i)
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, num_occurrences);
-
- // Wrong category.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("wrong_cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Wrong name.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TRACE_EVENT_INSTANT0("cat", "wrong_event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-
- // Canceled.
- BeginTrace();
- TraceLog::GetInstance()->SetWatchEvent("cat", "event", callback);
- TraceLog::GetInstance()->CancelWatchEvent();
- TRACE_EVENT_INSTANT0("cat", "event", TRACE_EVENT_SCOPE_THREAD);
- EndTraceAndFlush();
- EXPECT_EQ(event_watch_notification_, 0);
-}
-
// Test ASYNC_BEGIN/END events
TEST_F(TraceEventTestFixture, AsyncBeginEndEvents) {
BeginTrace();
@@ -2152,56 +2133,6 @@ TEST_F(TraceEventTestFixture, TraceWithDisabledByDefaultCategoryFilters) {
trace_log->SetDisabled();
}
-TEST_F(TraceEventTestFixture, TraceSampling) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Stuff");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "cc", "Things");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
- EndTraceAndFlush();
-
- // Make sure we hit at least once.
- EXPECT_TRUE(FindNamePhase("Stuff", "P"));
- EXPECT_TRUE(FindNamePhase("Things", "P"));
-}
-
-TEST_F(TraceEventTestFixture, TraceSamplingScope) {
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
- TraceLog::RECORDING_MODE);
-
- TRACE_EVENT_SCOPED_SAMPLING_STATE("AAA", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("BBB", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "BBB");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SCOPED_SAMPLING_STATE("CCC", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "CCC");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- {
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "AAA");
- TRACE_EVENT_SET_SAMPLING_STATE("DDD", "name");
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
- }
- TraceLog::GetInstance()->WaitSamplingEventForTesting();
- EXPECT_STREQ(TRACE_EVENT_GET_SAMPLING_STATE(), "DDD");
-
- EndTraceAndFlush();
-}
-
class MyData : public ConvertableToTraceFormat {
public:
MyData() {}
@@ -2579,233 +2510,6 @@ TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
EXPECT_EQ(args_string, "__stripped__");
}
-class TraceEventCallbackTest : public TraceEventTestFixture {
- public:
- void SetUp() override {
- TraceEventTestFixture::SetUp();
- ASSERT_EQ(NULL, s_instance);
- s_instance = this;
- }
- void TearDown() override {
- TraceLog::GetInstance()->SetDisabled();
- ASSERT_TRUE(s_instance);
- s_instance = NULL;
- TraceEventTestFixture::TearDown();
- }
-
- protected:
- // For TraceEventCallbackAndRecordingX tests.
- void VerifyCallbackAndRecordedEvents(size_t expected_callback_count,
- size_t expected_recorded_count) {
- // Callback events.
- EXPECT_EQ(expected_callback_count, collected_events_names_.size());
- for (size_t i = 0; i < collected_events_names_.size(); ++i) {
- EXPECT_EQ("callback", collected_events_categories_[i]);
- EXPECT_EQ("yes", collected_events_names_[i]);
- }
-
- // Recorded events.
- EXPECT_EQ(expected_recorded_count, trace_parsed_.GetSize());
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "recording"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "callback"));
- EXPECT_TRUE(FindTraceEntry(trace_parsed_, "yes"));
- EXPECT_FALSE(FindTraceEntry(trace_parsed_, "no"));
- }
-
- void VerifyCollectedEvent(size_t i,
- unsigned phase,
- const std::string& category,
- const std::string& name) {
- EXPECT_EQ(phase, collected_events_phases_[i]);
- EXPECT_EQ(category, collected_events_categories_[i]);
- EXPECT_EQ(name, collected_events_names_[i]);
- }
-
- std::vector<std::string> collected_events_categories_;
- std::vector<std::string> collected_events_names_;
- std::vector<unsigned char> collected_events_phases_;
- std::vector<TimeTicks> collected_events_timestamps_;
-
- static TraceEventCallbackTest* s_instance;
- static void Callback(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags) {
- s_instance->collected_events_phases_.push_back(phase);
- s_instance->collected_events_categories_.push_back(
- TraceLog::GetCategoryGroupName(category_group_enabled));
- s_instance->collected_events_names_.push_back(name);
- s_instance->collected_events_timestamps_.push_back(timestamp);
- }
-};
-
-TraceEventCallbackTest* TraceEventCallbackTest::s_instance;
-
-TEST_F(TraceEventCallbackTest, TraceEventCallback) {
- TRACE_EVENT_INSTANT0("all", "before enable", TRACE_EVENT_SCOPE_THREAD);
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "event1", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("all", "event2", TRACE_EVENT_SCOPE_GLOBAL);
- {
- TRACE_EVENT0("all", "duration");
- TRACE_EVENT_INSTANT0("all", "event3", TRACE_EVENT_SCOPE_GLOBAL);
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("all", "after callback removed",
- TRACE_EVENT_SCOPE_GLOBAL);
- ASSERT_EQ(5u, collected_events_names_.size());
- EXPECT_EQ("event1", collected_events_names_[0]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[0]);
- EXPECT_EQ("event2", collected_events_names_[1]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[1]);
- EXPECT_EQ("duration", collected_events_names_[2]);
- EXPECT_EQ(TRACE_EVENT_PHASE_BEGIN, collected_events_phases_[2]);
- EXPECT_EQ("event3", collected_events_names_[3]);
- EXPECT_EQ(TRACE_EVENT_PHASE_INSTANT, collected_events_phases_[3]);
- EXPECT_EQ("duration", collected_events_names_[4]);
- EXPECT_EQ(TRACE_EVENT_PHASE_END, collected_events_phases_[4]);
- for (size_t i = 1; i < collected_events_timestamps_.size(); i++) {
- EXPECT_LE(collected_events_timestamps_[i - 1],
- collected_events_timestamps_[i]);
- }
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackWhileFull) {
- TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
- TraceLog::RECORDING_MODE);
- do {
- TRACE_EVENT_INSTANT0("all", "badger badger", TRACE_EVENT_SCOPE_GLOBAL);
- } while (!TraceLog::GetInstance()->BufferIsFull());
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- TRACE_EVENT_INSTANT0("all", "a snake", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- ASSERT_EQ(1u, collected_events_names_.size());
- EXPECT_EQ("a snake", collected_events_names_[0]);
-}
-
-// 1: Enable callback, enable recording, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording1) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-// 2: Enable callback, enable recording, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording2) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(3, 1);
-}
-
-// 3: Enable recording, enable callback, disable callback, disable recording.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording3) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(1, 3);
-}
-
-// 4: Enable recording, enable callback, disable recording, disable callback.
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecording4) {
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEnabled(TraceConfig("recording", ""),
- TraceLog::RECORDING_MODE);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackEnabled(TraceConfig("callback", ""),
- Callback);
- TRACE_EVENT_INSTANT0("recording", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- EndTraceAndFlush();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "yes", TRACE_EVENT_SCOPE_GLOBAL);
- TraceLog::GetInstance()->SetEventCallbackDisabled();
- TRACE_EVENT_INSTANT0("recording", "no", TRACE_EVENT_SCOPE_GLOBAL);
- TRACE_EVENT_INSTANT0("callback", "no", TRACE_EVENT_SCOPE_GLOBAL);
-
- DropTracedMetadataRecords();
- VerifyCallbackAndRecordedEvents(2, 2);
-}
-
-TEST_F(TraceEventCallbackTest, TraceEventCallbackAndRecordingDuration) {
- TraceLog::GetInstance()->SetEventCallbackEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), Callback);
- {
- TRACE_EVENT0("callback", "duration1");
- TraceLog::GetInstance()->SetEnabled(
- TraceConfig(kRecordAllCategoryFilter, ""), TraceLog::RECORDING_MODE);
- TRACE_EVENT0("callback", "duration2");
- EndTraceAndFlush();
- TRACE_EVENT0("callback", "duration3");
- }
- TraceLog::GetInstance()->SetEventCallbackDisabled();
-
- ASSERT_EQ(6u, collected_events_names_.size());
- VerifyCollectedEvent(0, TRACE_EVENT_PHASE_BEGIN, "callback", "duration1");
- VerifyCollectedEvent(1, TRACE_EVENT_PHASE_BEGIN, "callback", "duration2");
- VerifyCollectedEvent(2, TRACE_EVENT_PHASE_BEGIN, "callback", "duration3");
- VerifyCollectedEvent(3, TRACE_EVENT_PHASE_END, "callback", "duration3");
- VerifyCollectedEvent(4, TRACE_EVENT_PHASE_END, "callback", "duration2");
- VerifyCollectedEvent(5, TRACE_EVENT_PHASE_END, "callback", "duration1");
-}
-
TEST_F(TraceEventTestFixture, TraceBufferVectorReportFull) {
TraceLog* trace_log = TraceLog::GetInstance();
trace_log->SetEnabled(
@@ -3027,29 +2731,9 @@ TEST_F(TraceEventTestFixture, ConvertTraceConfigToInternalOptions) {
trace_log->GetInternalOptionsFromTraceConfig(
TraceConfig(kRecordAllCategoryFilter, ECHO_TO_CONSOLE)));
- EXPECT_EQ(
- TraceLog::kInternalRecordUntilFull | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-until-full,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalRecordContinuously | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "record-continuously,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig(kRecordAllCategoryFilter,
- "trace-to-console,enable-sampling")));
-
- EXPECT_EQ(
- TraceLog::kInternalEchoToConsole | TraceLog::kInternalEnableSampling,
- trace_log->GetInternalOptionsFromTraceConfig(
- TraceConfig("*",
- "trace-to-console,enable-sampling,enable-systrace")));
+ EXPECT_EQ(TraceLog::kInternalEchoToConsole,
+ trace_log->GetInternalOptionsFromTraceConfig(
+ TraceConfig("*", "trace-to-console,enable-systrace")));
}
void SetBlockingFlagAndBlockUntilStopped(WaitableEvent* task_start_event,
@@ -3276,30 +2960,142 @@ class TestEventFilter : public TraceLog::TraceEventFilter {
public:
bool FilterTraceEvent(const TraceEvent& trace_event) const override {
filter_trace_event_hit_count_++;
- return true;
+ return filter_return_value_;
}
void EndEvent(const char* category_group, const char* name) override {
end_event_hit_count_++;
}
+ static void set_filter_return_value(bool value) {
+ filter_return_value_ = value;
+ }
+
static size_t filter_trace_event_hit_count() {
return filter_trace_event_hit_count_;
}
static size_t end_event_hit_count() { return end_event_hit_count_; }
+ static void clear_counts() {
+ filter_trace_event_hit_count_ = 0;
+ end_event_hit_count_ = 0;
+ }
+
private:
static size_t filter_trace_event_hit_count_;
static size_t end_event_hit_count_;
+ static bool filter_return_value_;
};
size_t TestEventFilter::filter_trace_event_hit_count_ = 0;
size_t TestEventFilter::end_event_hit_count_ = 0;
+bool TestEventFilter::filter_return_value_ = false;
std::unique_ptr<TraceLog::TraceEventFilter> ConstructTestEventFilter() {
return WrapUnique(new TestEventFilter);
}
+TEST_F(TraceEventTestFixture, TraceFilteringMode) {
+ const char config_json[] =
+ "{"
+ " \"event_filters\": ["
+ " {"
+ " \"filter_predicate\": \"testing_predicate\", "
+ " \"included_categories\": [\"*\"]"
+ " }"
+ " ]"
+ "}";
+
+ // Run RECORDING_MODE within FILTERING_MODE:
+ TestEventFilter::set_filter_return_value(true);
+ TraceLog::SetTraceEventFilterConstructorForTesting(ConstructTestEventFilter);
+
+ // Only filtering mode is enabled with test filters.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ void* ptr = this;
+ TRACE_EVENT0("c0", "name0");
+ TRACE_EVENT_ASYNC_BEGIN0("c1", "name1", ptr);
+ TRACE_EVENT_INSTANT0("c0", "name0", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_ASYNC_END0("c1", "name1", ptr);
+ }
+
+ // Recording mode is enabled when filtering mode is turned on.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Only recording mode is disabled and filtering mode will continue to run.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::FILTERING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+ // Filtering mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_FALSE(FindMatchingValue("cat", "c0"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name0"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_EQ(6u, TestEventFilter::filter_trace_event_hit_count());
+ EXPECT_EQ(3u, TestEventFilter::end_event_hit_count());
+ Clear();
+ TestEventFilter::clear_counts();
+
+ // Run FILTERING_MODE within RECORDING_MODE:
+ // Only recording mode is enabled and all events must be recorded.
+ TraceLog::GetInstance()->SetEnabled(TraceConfig("", ""),
+ TraceLog::RECORDING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c0", "name0");
+ }
+
+ // Filtering mode is also enabled and all events must be filtered-out.
+ TestEventFilter::set_filter_return_value(false);
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
+ TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE,
+ TraceLog::GetInstance()->enabled_modes());
+ {
+ TRACE_EVENT0("c1", "name1");
+ }
+ // Only filtering mode is disabled and recording mode should continue to run
+ // with all events being recorded.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::FILTERING_MODE);
+ EXPECT_EQ(TraceLog::RECORDING_MODE, TraceLog::GetInstance()->enabled_modes());
+
+ {
+ TRACE_EVENT0("c2", "name2");
+ }
+ // Recording mode is disabled and no tracing mode should be enabled.
+ TraceLog::GetInstance()->SetDisabled(TraceLog::RECORDING_MODE);
+ EXPECT_EQ(0, TraceLog::GetInstance()->enabled_modes());
+
+ EndTraceAndFlush();
+ EXPECT_TRUE(FindMatchingValue("cat", "c0"));
+ EXPECT_TRUE(FindMatchingValue("cat", "c2"));
+ EXPECT_TRUE(FindMatchingValue("name", "name0"));
+ EXPECT_TRUE(FindMatchingValue("name", "name2"));
+ EXPECT_FALSE(FindMatchingValue("cat", "c1"));
+ EXPECT_FALSE(FindMatchingValue("name", "name1"));
+ EXPECT_EQ(1u, TestEventFilter::filter_trace_event_hit_count());
+ EXPECT_EQ(1u, TestEventFilter::end_event_hit_count());
+ Clear();
+ TestEventFilter::clear_counts();
+}
+
TEST_F(TraceEventTestFixture, EventFiltering) {
const char config_json[] =
"{"
@@ -3315,9 +3111,11 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
" ]"
"}";
+ TestEventFilter::set_filter_return_value(true);
TraceLog::SetTraceEventFilterConstructorForTesting(ConstructTestEventFilter);
TraceConfig trace_config(config_json);
- TraceLog::GetInstance()->SetEnabled(trace_config, TraceLog::RECORDING_MODE);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
ASSERT_TRUE(TraceLog::GetInstance()->IsEnabled());
TRACE_EVENT0("filtered_cat", "a snake");
@@ -3331,6 +3129,7 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
EXPECT_EQ(3u, TestEventFilter::filter_trace_event_hit_count());
EXPECT_EQ(1u, TestEventFilter::end_event_hit_count());
+ TestEventFilter::clear_counts();
}
TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
@@ -3354,7 +3153,8 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
TraceLog::TraceEventFilter::kEventWhitelistPredicate);
TraceConfig trace_config(config_json);
- TraceLog::GetInstance()->SetEnabled(trace_config, TraceLog::RECORDING_MODE);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
TRACE_EVENT0("filtered_cat", "a snake");
@@ -3385,7 +3185,8 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
TraceLog::TraceEventFilter::kHeapProfilerPredicate);
TraceConfig trace_config(config_json);
- TraceLog::GetInstance()->SetEnabled(trace_config, TraceLog::RECORDING_MODE);
+ TraceLog::GetInstance()->SetEnabled(
+ trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
EXPECT_TRUE(TraceLog::GetInstance()->IsEnabled());
TRACE_EVENT0("filtered_cat", "a snake");
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 9f0b50d460a..f76393cf230 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -26,12 +26,12 @@
#include "base/strings/string_tokenizer.h"
#include "base/strings/stringprintf.h"
#include "base/sys_info.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/threading/worker_pool.h"
#include "base/time/time.h"
+#include "base/trace_event/category_registry.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/memory_dump_manager.h"
@@ -40,16 +40,12 @@
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
-#include "base/trace_event/trace_sampling_thread.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/trace_event/trace_event_etw_export_win.h"
#endif
-// The thread buckets for the sampling profiler.
-BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
-
namespace base {
namespace internal {
@@ -88,23 +84,6 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-#define MAX_CATEGORY_GROUPS 200
-
-// Parallel arrays g_category_groups and g_category_group_enabled are separate
-// so that a pointer to a member of g_category_group_enabled can be easily
-// converted to an index into g_category_groups. This allows macros to deal
-// only with char enabled pointers from g_category_group_enabled, and we can
-// convert internally to determine the category name from the char enabled
-// pointer.
-const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
- "toplevel",
- "tracing already shutdown",
- "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
- "__metadata"};
-
-// The enabled flag is char instead of bool so that the API can be used from C.
-unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
-
const char kEventNameWhitelist[] = "event_name_whitelist";
#define MAX_TRACE_EVENT_FILTERS 32
@@ -113,9 +92,6 @@ const char kEventNameWhitelist[] = "event_name_whitelist";
base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>::
Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER;
-// Stores a bitmap of filters enabled for each category group.
-uint32_t g_category_group_filters_enabled[MAX_CATEGORY_GROUPS] = {0};
-
class EventNameFilter : public TraceLog::TraceEventFilter {
public:
EventNameFilter(const base::DictionaryValue* filter_args) {
@@ -185,14 +161,6 @@ class HeapProfilerFilter : public TraceLog::TraceEventFilter {
TraceLog::TraceEventFilterConstructorForTesting
g_trace_event_filter_constructor_for_testing = nullptr;
-// Indexes here have to match the g_category_groups array indexes above.
-const int kCategoryAlreadyShutdown = 1;
-const int kCategoryCategoriesExhausted = 2;
-const int kCategoryMetadata = 3;
-const int kNumBuiltinCategories = 4;
-// Skip default categories.
-base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
-
// The name of the current thread. This is used to decide if the current
// thread name has changed. We combine all the seen thread names into the
// output name for the thread.
@@ -221,7 +189,7 @@ void InitializeMetadataEvent(TraceEvent* trace_event,
TimeTicks(),
ThreadTicks(),
TRACE_EVENT_PHASE_METADATA,
- &g_category_group_enabled[kCategoryMetadata],
+ CategoryRegistry::kCategoryMetadata->state_ptr(),
metadata_name,
trace_event_internal::kGlobalScope, // scope
trace_event_internal::kNoId, // id
@@ -263,27 +231,12 @@ void MakeHandle(uint32_t chunk_seq,
handle->event_index = static_cast<uint16_t>(event_index);
}
-uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) {
- // Calculate the index of the category group by finding
- // category_group_enabled in g_category_group_enabled array.
- uintptr_t category_begin =
- reinterpret_cast<uintptr_t>(g_category_group_enabled);
- uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
- DCHECK(category_ptr >= category_begin);
- DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
- MAX_CATEGORY_GROUPS))
- << "out of bounds category pointer";
- uintptr_t category_index =
- (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
-
- return category_index;
-}
-
template <typename Function>
void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled,
Function filter_fn) {
- uint32_t filter_bitmap = g_category_group_filters_enabled[GetCategoryIndex(
- category_group_enabled)];
+ const TraceCategory* category =
+ CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
+ uint32_t filter_bitmap = category->enabled_filters();
int index = 0;
while (filter_bitmap) {
if (filter_bitmap & 1 && g_category_group_filters.Get()[index])
@@ -466,33 +419,19 @@ TraceLog* TraceLog::GetInstance() {
}
TraceLog::TraceLog()
- : mode_(DISABLED),
+ : enabled_modes_(0),
num_traces_recorded_(0),
- event_callback_(0),
dispatching_to_observer_list_(false),
process_sort_index_(0),
process_id_hash_(0),
process_id_(0),
- watch_category_(0),
trace_options_(kInternalRecordUntilFull),
- sampling_thread_handle_(0),
trace_config_(TraceConfig()),
- event_callback_trace_config_(TraceConfig()),
thread_shared_chunk_index_(0),
generation_(0),
use_worker_thread_(false) {
- // Trace is enabled or disabled on one thread while other threads are
- // accessing the enabled flag. We don't care whether edge-case events are
- // traced or not, so we allow races on the enabled flag to keep the trace
- // macros fast.
- // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
- // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
- // sizeof(g_category_group_enabled),
- // "trace_event category enabled");
- for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
- ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
- "trace_event category enabled");
- }
+ CategoryRegistry::Initialize();
+
#if defined(OS_NACL) // NaCl shouldn't expose the process id.
SetProcessID(0);
#else
@@ -551,48 +490,52 @@ const unsigned char* TraceLog::GetCategoryGroupEnabled(
const char* category_group) {
TraceLog* tracelog = GetInstance();
if (!tracelog) {
- DCHECK(!g_category_group_enabled[kCategoryAlreadyShutdown]);
- return &g_category_group_enabled[kCategoryAlreadyShutdown];
+ DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
+ return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
}
- return tracelog->GetCategoryGroupEnabledInternal(category_group);
+ TraceCategory* category = nullptr;
+ bool is_new_category =
+ CategoryRegistry::GetOrCreateCategoryByName(category_group, &category);
+ if (is_new_category)
+ tracelog->UpdateCategoryState(category);
+ DCHECK(category->state_ptr());
+ return category->state_ptr();
}
const char* TraceLog::GetCategoryGroupName(
const unsigned char* category_group_enabled) {
- return g_category_groups[GetCategoryIndex(category_group_enabled)];
+ return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled)
+ ->name();
}
-void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
- unsigned char enabled_flag = 0;
- const char* category_group = g_category_groups[category_index];
- if (mode_ == RECORDING_MODE &&
- trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_RECORDING;
+void TraceLog::UpdateCategoryState(TraceCategory* category) {
+ DCHECK(category->is_valid());
+ unsigned char state_flags = 0;
+ if (enabled_modes_ & RECORDING_MODE &&
+ trace_config_.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
- if (event_callback_ &&
- event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+ // TODO(primiano): this is a temporary workaround for catapult:#2341,
+ // to guarantee that metadata events are always added even if the category
+ // filter is "-*". See crbug.com/618054 for more details and long-term fix.
+ if (enabled_modes_ & RECORDING_MODE &&
+ category == CategoryRegistry::kCategoryMetadata) {
+ state_flags |= TraceCategory::ENABLED_FOR_RECORDING;
}
#if defined(OS_WIN)
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- enabled_flag |= ENABLED_FOR_ETW_EXPORT;
+ category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT;
}
#endif
- // TODO(primiano): this is a temporary workaround for catapult:#2341,
- // to guarantee that metadata events are always added even if the category
- // filter is "-*". See crbug.com/618054 for more details and long-term fix.
- if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata"))
- enabled_flag |= ENABLED_FOR_RECORDING;
-
uint32_t enabled_filters_bitmap = 0;
int index = 0;
- for (const auto& event_filter : trace_config_.event_filters()) {
- if (event_filter.IsCategoryGroupEnabled(category_group)) {
- enabled_flag |= ENABLED_FOR_FILTERING;
+ for (const auto& event_filter : enabled_event_filters_) {
+ if (event_filter.IsCategoryGroupEnabled(category->name())) {
+ state_flags |= TraceCategory::ENABLED_FOR_FILTERING;
DCHECK(g_category_group_filters.Get()[index]);
enabled_filters_bitmap |= 1 << index;
}
@@ -601,25 +544,27 @@ void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
break;
}
}
- g_category_group_filters_enabled[category_index] = enabled_filters_bitmap;
-
- g_category_group_enabled[category_index] = enabled_flag;
+ category->set_enabled_filters(enabled_filters_bitmap);
+ category->set_state(state_flags);
}
-void TraceLog::UpdateCategoryGroupEnabledFlags() {
+void TraceLog::UpdateCategoryRegistry() {
CreateFiltersForTraceConfig();
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = 0; i < category_index; i++)
- UpdateCategoryGroupEnabledFlag(i);
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
+ UpdateCategoryState(&category);
+ }
}
void TraceLog::CreateFiltersForTraceConfig() {
+ if (!(enabled_modes_ & FILTERING_MODE))
+ return;
+
// Filters were already added and tracing could be enabled. Filters list
// cannot be changed when trace events are using them.
if (g_category_group_filters.Get().size())
return;
- for (auto& event_filter : trace_config_.event_filters()) {
+ for (auto& event_filter : enabled_event_filters_) {
if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) {
NOTREACHED()
<< "Too many trace event filters installed in the current session";
@@ -672,67 +617,16 @@ void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
}
}
-const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
- const char* category_group) {
- DCHECK(!strchr(category_group, '"'))
- << "Category groups may not contain double quote";
- // The g_category_groups is append only, avoid using a lock for the fast path.
- size_t current_category_index = base::subtle::Acquire_Load(&g_category_index);
-
- // Search for pre-existing category group.
- for (size_t i = 0; i < current_category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold to lock when actually appending a new category, and
- // check the categories groups again.
- AutoLock lock(lock_);
- size_t category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_category_groups[i], category_group) == 0) {
- return &g_category_group_enabled[i];
- }
- }
-
- // Create a new category group.
- DCHECK(category_index < MAX_CATEGORY_GROUPS)
- << "must increase MAX_CATEGORY_GROUPS";
- unsigned char* category_group_enabled = nullptr;
- if (category_index < MAX_CATEGORY_GROUPS) {
- // Don't hold on to the category_group pointer, so that we can create
- // category groups with strings not known at compile time (this is
- // required by SetWatchEvent).
- const char* new_group = strdup(category_group);
- ANNOTATE_LEAKING_OBJECT_PTR(new_group);
- g_category_groups[category_index] = new_group;
- DCHECK(!g_category_group_enabled[category_index]);
- // Note that if both included and excluded patterns in the
- // TraceConfig are empty, we exclude nothing,
- // thereby enabling this category group.
- UpdateCategoryGroupEnabledFlag(category_index);
- category_group_enabled = &g_category_group_enabled[category_index];
- // Update the max index now.
- base::subtle::Release_Store(&g_category_index, category_index + 1);
- } else {
- category_group_enabled =
- &g_category_group_enabled[kCategoryCategoriesExhausted];
- }
- return category_group_enabled;
-}
-
void TraceLog::GetKnownCategoryGroups(
std::vector<std::string>* category_groups) {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
- for (size_t i = kNumBuiltinCategories; i < category_index; i++)
- category_groups->push_back(g_category_groups[i]);
+ for (const auto& category : CategoryRegistry::GetAllCategories()) {
+ if (!CategoryRegistry::IsBuiltinCategory(&category))
+ category_groups->push_back(category.name());
+ }
}
-void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
+void TraceLog::SetEnabled(const TraceConfig& trace_config,
+ uint8_t modes_to_enable) {
std::vector<EnabledStateObserver*> observer_list;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map;
{
@@ -746,32 +640,58 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
InternalTraceOptions old_options = trace_options();
- if (IsEnabled()) {
- if (new_options != old_options) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different "
- << "set of options.";
- }
-
- if (mode != mode_) {
- DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
- }
-
- DCHECK(!trace_config.event_filters().size())
- << "Adding new filters while tracing was already enabled is not "
- "supported.";
-
- trace_config_.Merge(trace_config);
- UpdateCategoryGroupEnabledFlags();
- return;
- }
-
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = mode;
+ // Clear all filters from previous tracing session. These filters are not
+ // cleared at the end of tracing because some threads which hit trace event
+ // when disabling, could try to use the filters.
+ if (!enabled_modes_)
+ g_category_group_filters.Get().clear();
+
+ // Update trace config for recording.
+ const bool already_recording = enabled_modes_ & RECORDING_MODE;
+ if (modes_to_enable & RECORDING_MODE) {
+ if (already_recording) {
+ // TODO(ssid): Stop suporting enabling of RECODING_MODE when already
+ // enabled crbug.com/625170.
+ DCHECK_EQ(new_options, old_options) << "Attempting to re-enable "
+ "tracing with a different set "
+ "of options.";
+ trace_config_.Merge(trace_config);
+ } else {
+ trace_config_ = trace_config;
+ }
+ }
+
+ // Update event filters.
+ if (modes_to_enable & FILTERING_MODE) {
+ DCHECK(!trace_config.event_filters().empty())
+ << "Attempting to enable filtering without any filters";
+ DCHECK(enabled_event_filters_.empty()) << "Attempting to re-enable "
+ "filtering when filters are "
+ "already enabled.";
+
+ // Use the given event filters only if filtering was not enabled.
+ if (enabled_event_filters_.empty())
+ enabled_event_filters_ = trace_config.event_filters();
+ }
+ // Keep the |trace_config_| updated with only enabled filters in case anyone
+ // tries to read it using |GetCurrentTraceConfig| (even if filters are
+ // empty).
+ trace_config_.SetEventFilters(enabled_event_filters_);
+
+ enabled_modes_ |= modes_to_enable;
+ UpdateCategoryRegistry();
+
+ // Do not notify observers or create trace buffer if only enabled for
+ // filtering or if recording was already enabled.
+ if (!(modes_to_enable & RECORDING_MODE) || already_recording)
+ return;
if (new_options != old_options) {
subtle::NoBarrier_Store(&trace_options_, new_options);
@@ -780,32 +700,9 @@ void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
num_traces_recorded_++;
- // Clear all filters from previous tracing session. These filters are not
- // cleared at the end of tracing because some threads which hit trace event
- // when disabling, could try to use the filters.
- g_category_group_filters.Get().clear();
-
- trace_config_ = TraceConfig(trace_config);
- UpdateCategoryGroupEnabledFlags();
+ UpdateCategoryRegistry();
UpdateSyntheticDelaysFromTraceConfig();
- if (new_options & kInternalEnableSampling) {
- sampling_thread_.reset(new TraceSamplingThread);
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[0], "bucket0",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[1], "bucket1",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- sampling_thread_->RegisterSampleBucket(
- &g_trace_state[2], "bucket2",
- Bind(&TraceSamplingThread::DefaultSamplingCallback));
- if (!PlatformThread::Create(0, sampling_thread_.get(),
- &sampling_thread_handle_)) {
- NOTREACHED() << "failed to create thread";
- }
- }
-
dispatching_to_observer_list_ = true;
observer_list = enabled_state_observer_list_;
observer_map = async_observers_;
@@ -835,10 +732,9 @@ void TraceLog::SetArgumentFilterPredicate(
TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
const TraceConfig& config) {
- InternalTraceOptions ret =
- config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
- if (config.IsArgumentFilterEnabled())
- ret |= kInternalEnableArgumentFilter;
+ InternalTraceOptions ret = config.IsArgumentFilterEnabled()
+ ? kInternalEnableArgumentFilter
+ : kInternalNone;
switch (config.GetTraceRecordMode()) {
case RECORD_UNTIL_FULL:
return ret | kInternalRecordUntilFull;
@@ -860,37 +756,45 @@ TraceConfig TraceLog::GetCurrentTraceConfig() const {
void TraceLog::SetDisabled() {
AutoLock lock(lock_);
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
+}
+
+void TraceLog::SetDisabled(uint8_t modes_to_disable) {
+ AutoLock lock(lock_);
+ SetDisabledWhileLocked(modes_to_disable);
}
-void TraceLog::SetDisabledWhileLocked() {
+void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
lock_.AssertAcquired();
- if (!IsEnabled())
+ if (!(enabled_modes_ & modes_to_disable))
return;
if (dispatching_to_observer_list_) {
+ // TODO(ssid): Change to NOTREACHED after fixing crbug.com/625170.
DLOG(ERROR)
<< "Cannot manipulate TraceLog::Enabled state from an observer.";
return;
}
- mode_ = DISABLED;
+ bool is_recording_mode_disabled =
+ (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE);
+ enabled_modes_ &= ~modes_to_disable;
- if (sampling_thread_) {
- // Stop the sampling thread.
- sampling_thread_->Stop();
- lock_.Release();
- PlatformThread::Join(sampling_thread_handle_);
- lock_.Acquire();
- sampling_thread_handle_ = PlatformThreadHandle();
- sampling_thread_.reset();
+ if (modes_to_disable & FILTERING_MODE)
+ enabled_event_filters_.clear();
+
+ if (modes_to_disable & RECORDING_MODE) {
+ trace_config_.Clear();
}
- trace_config_.Clear();
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_.clear();
- UpdateCategoryGroupEnabledFlags();
+ UpdateCategoryRegistry();
+
+ // Add metadata events and notify observers only if recording mode was
+ // disabled now.
+ if (!is_recording_mode_disabled)
+ return;
+
AddMetadataEventsWhileLocked();
// Remove metadata events so they will not get added to a subsequent trace.
@@ -990,25 +894,10 @@ void TraceLog::CheckIfBufferIsFullWhileLocked() {
if (buffer_limit_reached_timestamp_.is_null()) {
buffer_limit_reached_timestamp_ = OffsetNow();
}
- SetDisabledWhileLocked();
+ SetDisabledWhileLocked(RECORDING_MODE);
}
}
-void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb) {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_,
- reinterpret_cast<subtle::AtomicWord>(cb));
- event_callback_trace_config_ = trace_config;
- UpdateCategoryGroupEnabledFlags();
-}
-
-void TraceLog::SetEventCallbackDisabled() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&event_callback_, 0);
- UpdateCategoryGroupEnabledFlags();
-}
-
// Flush() works as the following:
// 1. Flush() is called in thread A whose task runner is saved in
// flush_task_runner_;
@@ -1377,10 +1266,13 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp);
ThreadTicks thread_now = ThreadNow();
- // |thread_local_event_buffer_| can be null if the current thread doesn't have
- // a message loop or the message loop is blocked.
- InitializeThreadLocalEventBufferIfSupported();
- auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
+ ThreadLocalEventBuffer* thread_local_event_buffer = nullptr;
+ if (*category_group_enabled & RECORDING_MODE) {
+ // |thread_local_event_buffer_| can be null if the current thread doesn't
+ // have a message loop or the message loop is blocked.
+ InitializeThreadLocalEventBufferIfSupported();
+ thread_local_event_buffer = thread_local_event_buffer_.Get();
+ }
// Check and update the current thread name only if the event is for the
// current thread to avoid locks in most cases.
@@ -1422,7 +1314,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
#if defined(OS_WIN)
// This is done sooner rather than later, to avoid creating the event and
// acquiring the lock, which is not needed for ETW as it's already threadsafe.
- if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
num_args, arg_names, arg_types, arg_values,
convertable_values);
@@ -1431,7 +1323,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
std::string console_message;
std::unique_ptr<TraceEvent> filtered_trace_event;
bool disabled_by_filters = false;
- if (*category_group_enabled & ENABLED_FOR_FILTERING) {
+ if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) {
std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
phase, category_group_enabled, name, scope, id,
@@ -1451,7 +1343,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
// If enabled for recording, the event should be added only if one of the
// filters indicates or category is not enabled for filtering.
- if ((*category_group_enabled & ENABLED_FOR_RECORDING) &&
+ if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) &&
!disabled_by_filters) {
OptionalAutoLock lock(&lock_);
@@ -1488,33 +1380,6 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
if (!console_message.empty())
LOG(ERROR) << console_message;
- if (reinterpret_cast<const unsigned char*>(
- subtle::NoBarrier_Load(&watch_category_)) == category_group_enabled) {
- bool event_name_matches;
- WatchEventCallback watch_event_callback_copy;
- {
- AutoLock lock(lock_);
- event_name_matches = watch_event_name_ == name;
- watch_event_callback_copy = watch_event_callback_;
- }
- if (event_name_matches) {
- if (!watch_event_callback_copy.is_null())
- watch_event_callback_copy.Run();
- }
- }
-
- if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- offset_event_timestamp,
- phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
- category_group_enabled, name, scope, id, num_args, arg_names,
- arg_types, arg_values, flags);
- }
- }
-
return handle;
}
@@ -1624,12 +1489,12 @@ void TraceLog::UpdateTraceEventDuration(
#if defined(OS_WIN)
// Generate an ETW event that marks the end of a complete event.
- if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT)
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
TraceEventETWExport::AddCompleteEndEvent(name);
#endif // OS_WIN
std::string console_message;
- if (category_group_enabled_local & ENABLED_FOR_RECORDING) {
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
OptionalAutoLock lock(&lock_);
TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
@@ -1662,40 +1527,10 @@ void TraceLog::UpdateTraceEventDuration(
if (!console_message.empty())
LOG(ERROR) << console_message;
- if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
- EventCallback event_callback = reinterpret_cast<EventCallback>(
- subtle::NoBarrier_Load(&event_callback_));
- if (event_callback) {
- event_callback(
- now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
- }
- }
-
- if (category_group_enabled_local & ENABLED_FOR_FILTERING)
+ if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING)
EndFilteredEvent(category_group_enabled, name, handle);
}
-void TraceLog::SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback) {
- const unsigned char* category =
- GetCategoryGroupEnabled(category_name.c_str());
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_,
- reinterpret_cast<subtle::AtomicWord>(category));
- watch_event_name_ = event_name;
- watch_event_callback_ = callback;
-}
-
-void TraceLog::CancelWatchEvent() {
- AutoLock lock(lock_);
- subtle::NoBarrier_Store(&watch_category_, 0);
- watch_event_name_.clear();
- watch_event_callback_.Reset();
-}
-
uint64_t TraceLog::MangleEventId(uint64_t id) {
return id ^ process_id_hash_;
}
@@ -1765,14 +1600,9 @@ void TraceLog::AddMetadataEventsWhileLocked() {
}
}
-void TraceLog::WaitSamplingEventForTesting() {
- if (!sampling_thread_)
- return;
- sampling_thread_->WaitSamplingEventForTesting();
-}
-
void TraceLog::DeleteForTesting() {
internal::DeleteTraceLogForTesting::Delete();
+ CategoryRegistry::ResetForTesting();
}
void TraceLog::SetTraceEventFilterConstructorForTesting(
@@ -1889,18 +1719,14 @@ TraceBuffer* TraceLog::CreateTraceBuffer() {
#if defined(OS_WIN)
void TraceLog::UpdateETWCategoryGroupEnabledFlags() {
- AutoLock lock(lock_);
- size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
// Go through each category and set/clear the ETW bit depending on whether the
// category is enabled.
- for (size_t i = 0; i < category_index; i++) {
- const char* category_group = g_category_groups[i];
- DCHECK(category_group);
+ for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
- category_group)) {
- g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT;
+ category.name())) {
+ category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
} else {
- g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT;
+ category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT);
}
}
}
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index 3eb79b819f7..68a7fbbcb17 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -26,15 +26,16 @@ namespace base {
template <typename Type>
struct DefaultSingletonTraits;
+class MessageLoop;
class RefCountedString;
namespace trace_event {
+struct TraceCategory;
class TraceBuffer;
class TraceBufferChunk;
class TraceEvent;
class TraceEventMemoryOverhead;
-class TraceSamplingThread;
struct BASE_EXPORT TraceLogStatus {
TraceLogStatus();
@@ -45,24 +46,14 @@ struct BASE_EXPORT TraceLogStatus {
class BASE_EXPORT TraceLog : public MemoryDumpProvider {
public:
- enum Mode {
- DISABLED = 0,
- RECORDING_MODE
- };
-
- // The pointer returned from GetCategoryGroupEnabledInternal() points to a
- // value with zero or more of the following bits. Used in this class only.
- // The TRACE_EVENT macros should only use the value as a bool.
- // These values must be in sync with macro values in TraceEvent.h in Blink.
- enum CategoryGroupEnabledFlags {
- // Category group enabled for the recording mode.
- ENABLED_FOR_RECORDING = 1 << 0,
- // Category group enabled by SetEventCallbackEnabled().
- ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
- // Category group enabled to export events to ETW.
- ENABLED_FOR_ETW_EXPORT = 1 << 3,
- // Category group being filtered before logged.
- ENABLED_FOR_FILTERING = 1 << 4
+ // Argument passed to TraceLog::SetEnabled.
+ enum Mode : uint8_t {
+ // Enables normal tracing (recording trace events in the trace buffer).
+ RECORDING_MODE = 1 << 0,
+
+ // Trace events are enabled just for filtering but not for recording. Only
+ // event filters config of |trace_config| argument is used.
+ FILTERING_MODE = 1 << 1
};
static TraceLog* GetInstance();
@@ -78,16 +69,30 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// if the current thread supports that (has a message loop).
void InitializeThreadLocalEventBufferIfSupported();
- // Enables normal tracing (recording trace events in the trace buffer).
- // See TraceConfig comments for details on how to control what categories
- // will be traced. If tracing has already been enabled, |category_filter| will
- // be merged into the current category filter.
- void SetEnabled(const TraceConfig& trace_config, Mode mode);
-
- // Disables normal tracing for all categories.
+ // See TraceConfig comments for details on how to control which categories
+ // will be traced. SetDisabled must be called distinctly for each mode that is
+ // enabled. If tracing has already been enabled for recording, category filter
+ // (enabled and disabled categories) will be merged into the current category
+ // filter. Enabling RECORDING_MODE does not enable filters. Trace event
+ // filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
+ // Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
+ // i.e. filters can only be enabled if not previously enabled.
+ void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
+
+ // TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
+ // Mode as argument.
+
+ // Disables tracing for all categories for the specified |modes_to_disable|
+ // only. Only RECORDING_MODE is taken as default |modes_to_disable|.
void SetDisabled();
+ void SetDisabled(uint8_t modes_to_disable);
- bool IsEnabled() { return mode_ != DISABLED; }
+ // Returns true if TraceLog is enabled on recording mode.
+ // Note: Returns false even if FILTERING_MODE is enabled.
+ bool IsEnabled() { return enabled_modes_ & RECORDING_MODE; }
+
+ // Returns a bitmap of enabled modes from TraceLog::Mode.
+ uint8_t enabled_modes() { return enabled_modes_; }
// The number of times we have begun recording traces. If tracing is off,
// returns -1. If tracing is on, then it returns the number of times we have
@@ -150,31 +155,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// objects.
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
- // Not using base::Callback because of its limited by 7 parameters.
- // Also, using primitive type allows directly passing callback from WebCore.
- // WARNING: It is possible for the previously set callback to be called
- // after a call to SetEventCallbackEnabled() that replaces or a call to
- // SetEventCallbackDisabled() that disables the callback.
- // This callback may be invoked on any thread.
- // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs
- // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the
- // interface simple.
- typedef void (*EventCallback)(TimeTicks timestamp,
- char phase,
- const unsigned char* category_group_enabled,
- const char* name,
- const char* scope,
- unsigned long long id,
- int num_args,
- const char* const arg_names[],
- const unsigned char arg_types[],
- const unsigned long long arg_values[],
- unsigned int flags);
-
- // Enable tracing for EventCallback.
- void SetEventCallbackEnabled(const TraceConfig& trace_config,
- EventCallback cb);
- void SetEventCallbackDisabled();
void SetArgumentFilterPredicate(
const ArgumentFilterPredicate& argument_filter_predicate);
@@ -292,23 +272,12 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
const char* name,
TraceEventHandle handle);
- // For every matching event, the callback will be called.
- typedef base::Callback<void()> WatchEventCallback;
- void SetWatchEvent(const std::string& category_name,
- const std::string& event_name,
- const WatchEventCallback& callback);
- // Cancel the watch event. If tracing is enabled, this may race with the
- // watch event notification firing.
- void CancelWatchEvent();
-
int process_id() const { return process_id_; }
uint64_t MangleEventId(uint64_t id);
// Exposed for unittesting:
- void WaitSamplingEventForTesting();
-
// Allows deleting our singleton instance.
static void DeleteForTesting();
@@ -396,12 +365,12 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
ProcessMemoryDump* pmd) override;
// Enable/disable each category group based on the current mode_,
- // category_filter_, event_callback_ and event_callback_category_filter_.
- // Enable the category group in the enabled mode if category_filter_ matches
- // the category group, or event_callback_ is not null and
- // event_callback_category_filter_ matches the category group.
- void UpdateCategoryGroupEnabledFlags();
- void UpdateCategoryGroupEnabledFlag(size_t category_index);
+ // category_filter_ and event_filters_enabled_.
+ // Enable the category group in the recording mode if category_filter_ matches
+ // the category group, is not null. Enable category for filtering if any
+ // filter in event_filters_enabled_ enables it.
+ void UpdateCategoryRegistry();
+ void UpdateCategoryState(TraceCategory* category);
void CreateFiltersForTraceConfig();
@@ -418,7 +387,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceLog();
~TraceLog() override;
- const unsigned char* GetCategoryGroupEnabledInternal(const char* name);
void AddMetadataEventsWhileLocked();
InternalTraceOptions trace_options() const {
@@ -436,7 +404,7 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
bool check_buffer_is_full);
void CheckIfBufferIsFullWhileLocked();
- void SetDisabledWhileLocked();
+ void SetDisabledWhileLocked(uint8_t modes);
TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
OptionalAutoLock* lock);
@@ -475,7 +443,6 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
static const InternalTraceOptions kInternalRecordUntilFull;
static const InternalTraceOptions kInternalRecordContinuously;
static const InternalTraceOptions kInternalEchoToConsole;
- static const InternalTraceOptions kInternalEnableSampling;
static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
static const InternalTraceOptions kInternalEnableArgumentFilter;
@@ -485,11 +452,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// This lock protects accesses to thread_names_, thread_event_start_times_
// and thread_colors_.
Lock thread_info_lock_;
- Mode mode_;
+ uint8_t enabled_modes_; // See TraceLog::Mode.
int num_traces_recorded_;
std::unique_ptr<TraceBuffer> logged_events_;
std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
- subtle::AtomicWord /* EventCallback */ event_callback_;
bool dispatching_to_observer_list_;
std::vector<EnabledStateObserver*> enabled_state_observer_list_;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
@@ -514,19 +480,10 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
TimeDelta time_offset_;
- // Allow tests to wake up when certain events occur.
- WatchEventCallback watch_event_callback_;
- subtle::AtomicWord /* const unsigned char* */ watch_category_;
- std::string watch_event_name_;
-
subtle::AtomicWord /* Options */ trace_options_;
- // Sampling thread handles.
- std::unique_ptr<TraceSamplingThread> sampling_thread_;
- PlatformThreadHandle sampling_thread_handle_;
-
TraceConfig trace_config_;
- TraceConfig event_callback_trace_config_;
+ TraceConfig::EventFilters enabled_event_filters_;
ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
ThreadLocalBoolean thread_blocks_message_loop_;
diff --git a/chromium/base/trace_event/trace_log_constants.cc b/chromium/base/trace_event/trace_log_constants.cc
index 9d4fe07f0d6..b72ca1b3b4a 100644
--- a/chromium/base/trace_event/trace_log_constants.cc
+++ b/chromium/base/trace_event/trace_log_constants.cc
@@ -14,8 +14,7 @@ const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordUntilFull = 1 << 0;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordContinuously = 1 << 1;
-const TraceLog::InternalTraceOptions
- TraceLog::kInternalEnableSampling = 1 << 2;
+// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
const TraceLog::InternalTraceOptions
TraceLog::kInternalEchoToConsole = 1 << 3;
const TraceLog::InternalTraceOptions
diff --git a/chromium/base/trace_event/trace_sampling_thread.cc b/chromium/base/trace_event/trace_sampling_thread.cc
deleted file mode 100644
index 5a0d2f8a024..00000000000
--- a/chromium/base/trace_event/trace_sampling_thread.cc
+++ /dev/null
@@ -1,107 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include <stddef.h>
-
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/trace_event_impl.h"
-#include "base/trace_event/trace_log.h"
-#include "base/trace_event/trace_sampling_thread.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData {
- public:
- TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback);
- ~TraceBucketData();
-
- TRACE_EVENT_API_ATOMIC_WORD* bucket;
- const char* bucket_name;
- TraceSampleCallback callback;
-};
-
-TraceSamplingThread::TraceSamplingThread()
- : thread_running_(false),
- waitable_event_for_testing_(WaitableEvent::ResetPolicy::AUTOMATIC,
- WaitableEvent::InitialState::NOT_SIGNALED) {}
-
-TraceSamplingThread::~TraceSamplingThread() {}
-
-void TraceSamplingThread::ThreadMain() {
- PlatformThread::SetName("Sampling Thread");
- thread_running_ = true;
- const int kSamplingFrequencyMicroseconds = 1000;
- while (!cancellation_flag_.IsSet()) {
- PlatformThread::Sleep(
- TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
- GetSamples();
- waitable_event_for_testing_.Signal();
- }
-}
-
-// static
-void TraceSamplingThread::DefaultSamplingCallback(
- TraceBucketData* bucket_data) {
- TRACE_EVENT_API_ATOMIC_WORD category_and_name =
- TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
- if (!category_and_name)
- return;
- const char* const combined =
- reinterpret_cast<const char* const>(category_and_name);
- const char* category_group;
- const char* name;
- ExtractCategoryAndName(combined, &category_group, &name);
- TRACE_EVENT_API_ADD_TRACE_EVENT(
- TRACE_EVENT_PHASE_SAMPLE,
- TraceLog::GetCategoryGroupEnabled(category_group), name,
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
- NULL, NULL, NULL, NULL, 0);
-}
-
-void TraceSamplingThread::GetSamples() {
- for (size_t i = 0; i < sample_buckets_.size(); ++i) {
- TraceBucketData* bucket_data = &sample_buckets_[i];
- bucket_data->callback.Run(bucket_data);
- }
-}
-
-void TraceSamplingThread::RegisterSampleBucket(
- TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback) {
- // Access to sample_buckets_ doesn't cause races with the sampling thread
- // that uses the sample_buckets_, because it is guaranteed that
- // RegisterSampleBucket is called before the sampling thread is created.
- DCHECK(!thread_running_);
- sample_buckets_.push_back(TraceBucketData(bucket, name, callback));
-}
-
-// static
-void TraceSamplingThread::ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name) {
- *category = combined;
- *name = &combined[strlen(combined) + 1];
-}
-
-void TraceSamplingThread::Stop() {
- cancellation_flag_.Set();
-}
-
-void TraceSamplingThread::WaitSamplingEventForTesting() {
- waitable_event_for_testing_.Wait();
-}
-
-TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
- const char* name,
- TraceSampleCallback callback)
- : bucket(bucket), bucket_name(name), callback(callback) {}
-
-TraceBucketData::~TraceBucketData() {}
-
-} // namespace trace_event
-} // namespace base
diff --git a/chromium/base/trace_event/trace_sampling_thread.h b/chromium/base/trace_event/trace_sampling_thread.h
deleted file mode 100644
index f976a80e07c..00000000000
--- a/chromium/base/trace_event/trace_sampling_thread.h
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-#define BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
-
-#include "base/synchronization/cancellation_flag.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/trace_event/trace_event.h"
-
-namespace base {
-namespace trace_event {
-
-class TraceBucketData;
-typedef base::Callback<void(TraceBucketData*)> TraceSampleCallback;
-
-// This object must be created on the IO thread.
-class TraceSamplingThread : public PlatformThread::Delegate {
- public:
- TraceSamplingThread();
- ~TraceSamplingThread() override;
-
- // Implementation of PlatformThread::Delegate:
- void ThreadMain() override;
-
- static void DefaultSamplingCallback(TraceBucketData* bucket_data);
-
- void Stop();
- void WaitSamplingEventForTesting();
-
- private:
- friend class TraceLog;
-
- void GetSamples();
- // Not thread-safe. Once the ThreadMain has been called, this can no longer
- // be called.
- void RegisterSampleBucket(TRACE_EVENT_API_ATOMIC_WORD* bucket,
- const char* const name,
- TraceSampleCallback callback);
- // Splits a combined "category\0name" into the two component parts.
- static void ExtractCategoryAndName(const char* combined,
- const char** category,
- const char** name);
- std::vector<TraceBucketData> sample_buckets_;
- bool thread_running_;
- CancellationFlag cancellation_flag_;
- WaitableEvent waitable_event_for_testing_;
-};
-
-} // namespace trace_event
-} // namespace base
-
-#endif // BASE_TRACE_EVENT_TRACE_SAMPLING_THREAD_H_
diff --git a/chromium/base/unguessable_token.h b/chromium/base/unguessable_token.h
index b1b38707233..9f38783a3ca 100644
--- a/chromium/base/unguessable_token.h
+++ b/chromium/base/unguessable_token.h
@@ -45,7 +45,7 @@ class BASE_EXPORT UnguessableToken {
// Creates an empty UnguessableToken.
// Assign to it with Create() before using it.
- UnguessableToken() = default;
+ constexpr UnguessableToken() = default;
// NOTE: Serializing an empty UnguessableToken is an illegal operation.
uint64_t GetHighForSerialization() const {
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 7c14e81beec..f00a03f8d30 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -397,10 +397,11 @@ void DictionaryValue::Set(StringPiece path, std::unique_ptr<Value> in_value) {
delimiter_position = current_path.find('.')) {
// Assume that we're indexing into a dictionary.
StringPiece key = current_path.substr(0, delimiter_position);
- DictionaryValue* child_dictionary = NULL;
+ DictionaryValue* child_dictionary = nullptr;
if (!current_dictionary->GetDictionary(key, &child_dictionary)) {
child_dictionary = new DictionaryValue;
- current_dictionary->SetWithoutPathExpansion(key, child_dictionary);
+ current_dictionary->SetWithoutPathExpansion(
+ key, base::WrapUnique(child_dictionary));
}
current_dictionary = child_dictionary;
@@ -447,27 +448,30 @@ void DictionaryValue::SetWithoutPathExpansion(StringPiece key,
void DictionaryValue::SetBooleanWithoutPathExpansion(StringPiece path,
bool in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+ SetWithoutPathExpansion(path,
+ base::MakeUnique<base::FundamentalValue>(in_value));
}
void DictionaryValue::SetIntegerWithoutPathExpansion(StringPiece path,
int in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+ SetWithoutPathExpansion(path,
+ base::MakeUnique<base::FundamentalValue>(in_value));
}
void DictionaryValue::SetDoubleWithoutPathExpansion(StringPiece path,
double in_value) {
- SetWithoutPathExpansion(path, new FundamentalValue(in_value));
+ SetWithoutPathExpansion(path,
+ base::MakeUnique<base::FundamentalValue>(in_value));
}
void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
StringPiece in_value) {
- SetWithoutPathExpansion(path, new StringValue(in_value));
+ SetWithoutPathExpansion(path, base::MakeUnique<base::StringValue>(in_value));
}
void DictionaryValue::SetStringWithoutPathExpansion(StringPiece path,
const string16& in_value) {
- SetWithoutPathExpansion(path, new StringValue(in_value));
+ SetWithoutPathExpansion(path, base::MakeUnique<base::StringValue>(in_value));
}
bool DictionaryValue::Get(StringPiece path,
@@ -795,7 +799,8 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
}
}
// All other cases: Make a copy and hook it up.
- SetWithoutPathExpansion(it.key(), merge_value->DeepCopy());
+ SetWithoutPathExpansion(it.key(),
+ base::WrapUnique(merge_value->DeepCopy()));
}
}
@@ -1040,7 +1045,7 @@ void ListValue::Append(std::unique_ptr<Value> in_value) {
list_.push_back(std::move(in_value));
}
-#if !defined(OS_LINUX) || defined(OS_CHROMEOS)
+#if !defined(OS_LINUX)
void ListValue::Append(Value* in_value) {
DCHECK(in_value);
Append(WrapUnique(in_value));
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 43d400c6599..258a9603a61 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -89,8 +89,10 @@ class BASE_EXPORT Value {
virtual bool GetAsString(string16* out_value) const;
virtual bool GetAsString(const StringValue** out_value) const;
virtual bool GetAsBinary(const BinaryValue** out_value) const;
+ // ListValue::From is the equivalent for std::unique_ptr conversions.
virtual bool GetAsList(ListValue** out_value);
virtual bool GetAsList(const ListValue** out_value) const;
+ // DictionaryValue::From is the equivalent for std::unique_ptr conversions.
virtual bool GetAsDictionary(DictionaryValue** out_value);
virtual bool GetAsDictionary(const DictionaryValue** out_value) const;
// Note: Do not add more types. See the file-level comment above for why.
@@ -457,7 +459,7 @@ class BASE_EXPORT ListValue : public Value {
// Appends a Value to the end of the list.
void Append(std::unique_ptr<Value> in_value);
-#if !defined(OS_LINUX) || defined(OS_CHROMEOS)
+#if !defined(OS_LINUX)
// Deprecated version of the above. TODO(estade): remove.
void Append(Value* in_value);
#endif
diff --git a/chromium/base/win/BUILD.gn b/chromium/base/win/BUILD.gn
index 01092735b3a..ff2a754ae2f 100644
--- a/chromium/base/win/BUILD.gn
+++ b/chromium/base/win/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/buildflag_header.gni")
+import("//build/win/message_compiler.gni")
declare_args() {
# Indicates if the handle verifier should operate in a single module mode. By
@@ -21,3 +22,29 @@ buildflag_header("base_win_features") {
header_dir = "base/win"
flags = [ "SINGLE_MODULE_MODE_HANDLE_VERIFIER=$single_module_mode_handle_verifier" ]
}
+
+message_compiler("eventlog_messages") {
+ visibility = [
+ "//base:base",
+ ":eventlog_provider",
+ ]
+
+ sources = [
+ "eventlog_messages.mc",
+ ]
+
+ user_mode_logging = false
+ compile_generated_code = false
+}
+shared_library("eventlog_provider") {
+ sources = [
+ "$root_gen_dir/base/win/eventlog_messages.rc",
+ "eventlog_provider.cc",
+ ]
+
+ ldflags = [ "/NOENTRY" ]
+
+ deps = [
+ "//base/win:eventlog_messages",
+ ]
+}
diff --git a/chromium/base/win/eventlog_messages.mc b/chromium/base/win/eventlog_messages.mc
new file mode 100644
index 00000000000..bfc16319bb1
--- /dev/null
+++ b/chromium/base/win/eventlog_messages.mc
@@ -0,0 +1,32 @@
+;// Copyright 2016 The Chromium Authors. All rights reserved.
+;// Use of this source code is governed by a BSD-style license that can be
+;// found in the LICENSE file.
+;//
+;// Defines the names and types of messages that are logged with the SYSLOG
+;// macro.
+SeverityNames=(Informational=0x0:STATUS_SEVERITY_INFORMATIONAL
+ Warning=0x1:STATUS_SEVERITY_WARNING
+ Error=0x2:STATUS_SEVERITY_ERROR
+ Fatal=0x3:STATUS_SEVERITY_FATAL
+ )
+FacilityNames=(Browser=0x0:FACILITY_SYSTEM)
+LanguageNames=(English=0x409:MSG00409)
+
+;// TODO(pastarmovj): Subdivide into more categories if needed.
+MessageIdTypedef=WORD
+
+MessageId=0x1
+SymbolicName=BROWSER_CATEGORY
+Language=English
+Browser Events
+.
+
+MessageIdTypedef=DWORD
+
+MessageId=0x100
+Severity=Error
+Facility=Browser
+SymbolicName=MSG_LOG_MESSAGE
+Language=English
+%1!S!
+.
diff --git a/chromium/base/win/eventlog_provider.cc b/chromium/base/win/eventlog_provider.cc
new file mode 100644
index 00000000000..d4116c26584
--- /dev/null
+++ b/chromium/base/win/eventlog_provider.cc
@@ -0,0 +1,9 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Force the generation of a .lib file for the .dll since Ninja expects shared
+// libraries to generate a .dll and a .lib file.
+__declspec(dllexport) bool fn() {
+ return true;
+}
diff --git a/chromium/base/win/scoped_hdc.h b/chromium/base/win/scoped_hdc.h
index fa686dd0500..890e34a82c5 100644
--- a/chromium/base/win/scoped_hdc.h
+++ b/chromium/base/win/scoped_hdc.h
@@ -7,6 +7,7 @@
#include <windows.h>
+#include "base/debug/gdi_debug_util_win.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/win/scoped_handle.h"
@@ -28,7 +29,8 @@ class ScopedGetDC {
// If GetDC(NULL) returns NULL, something really bad has happened, like
// GDI handle exhaustion. In this case Chrome is going to behave badly no
// matter what, so we may as well just force a crash now.
- CHECK(hdc_);
+ if (!hdc_)
+ base::debug::CollectGDIUsageAndDie();
}
}
diff --git a/chromium/base/win/windows_version.cc b/chromium/base/win/windows_version.cc
index 3cf22d8ccda..4b7f8baaca9 100644
--- a/chromium/base/win/windows_version.cc
+++ b/chromium/base/win/windows_version.cc
@@ -14,8 +14,8 @@
#include "base/strings/utf_string_conversions.h"
#include "base/win/registry.h"
-#if !defined(__clang__) && _MSC_FULL_VER < 190023918
-#error VS 2015 Update 2 or higher is required
+#if !defined(__clang__) && _MSC_FULL_VER < 190024213
+#error VS 2015 Update 3 with Cumulative Servicing Release or higher is required
#endif
namespace {
@@ -83,6 +83,27 @@ Version GetVersionFromKernel32() {
return VERSION_WIN_LAST;
}
+// Returns the the "UBR" value from the registry. Introduced in Windows 10,
+// this undocumented value appears to be similar to a patch number.
+// Returns 0 if the value does not exist or it could not be read.
+int GetUBR() {
+ // The values under the CurrentVersion registry hive are mirrored under
+ // the corresponding Wow6432 hive.
+ static constexpr wchar_t kRegKeyWindowsNTCurrentVersion[] =
+ L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion";
+
+ base::win::RegKey key;
+ if (key.Open(HKEY_LOCAL_MACHINE, kRegKeyWindowsNTCurrentVersion,
+ KEY_QUERY_VALUE) != ERROR_SUCCESS) {
+ return 0;
+ }
+
+ DWORD ubr = 0;
+ key.ReadValueDW(L"UBR", &ubr);
+
+ return static_cast<int>(ubr);
+}
+
} // namespace
// static
@@ -112,6 +133,7 @@ OSInfo::OSInfo()
version_number_.major = version_info.dwMajorVersion;
version_number_.minor = version_info.dwMinorVersion;
version_number_.build = version_info.dwBuildNumber;
+ version_number_.patch = GetUBR();
version_ = MajorMinorBuildToVersion(
version_number_.major, version_number_.minor, version_number_.build);
service_pack_.major = version_info.wServicePackMajor;
diff --git a/chromium/base/win/windows_version.h b/chromium/base/win/windows_version.h
index 6eaf2df4516..9969597471b 100644
--- a/chromium/base/win/windows_version.h
+++ b/chromium/base/win/windows_version.h
@@ -56,6 +56,7 @@ class BASE_EXPORT OSInfo {
int major;
int minor;
int build;
+ int patch;
};
struct ServicePack {