summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-05 14:08:31 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2017-04-11 07:46:53 +0000
commit6a4cabb866f66d4128a97cdc6d9d08ce074f1247 (patch)
treeab00f70a5e89278d6a0d16ff0c42578dc4d84a2d /chromium/base
parente733310db58160074f574c429d48f8308c0afe17 (diff)
downloadqtwebengine-chromium-6a4cabb866f66d4128a97cdc6d9d08ce074f1247.tar.gz
BASELINE: Update Chromium to 57.0.2987.144
Change-Id: I29db402ff696c71a04c4dbaec822c2e53efe0267 Reviewed-by: Peter Varga <pvarga@inf.u-szeged.hu>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn110
-rw-r--r--chromium/base/OWNERS5
-rw-r--r--chromium/base/allocator/BUILD.gn12
-rw-r--r--chromium/base/allocator/allocator_shim.cc52
-rw-r--r--chromium/base/allocator/allocator_shim.h6
-rw-r--r--chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h24
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc11
-rw-r--r--chromium/base/allocator/oom.h26
-rw-r--r--chromium/base/allocator/partition_allocator/OWNERS5
-rw-r--r--chromium/base/allocator/partition_allocator/PartitionAlloc.md97
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc130
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.h16
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc278
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h124
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc1415
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h905
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc2080
-rw-r--r--chromium/base/at_exit.cc10
-rw-r--r--chromium/base/at_exit.h4
-rw-r--r--chromium/base/base_paths_posix.cc29
-rw-r--r--chromium/base/bind_helpers.h47
-rw-r--r--chromium/base/bind_internal.h69
-rw-r--r--chromium/base/bind_unittest.cc2
-rw-r--r--chromium/base/bind_unittest.nc23
-rw-r--r--chromium/base/bit_cast.h33
-rw-r--r--chromium/base/bit_cast_unittest.cc1
-rw-r--r--chromium/base/bits.h59
-rw-r--r--chromium/base/bits_unittest.cc20
-rw-r--r--chromium/base/callback.h39
-rw-r--r--chromium/base/callback_list_unittest.nc2
-rw-r--r--chromium/base/callback_unittest.cc11
-rw-r--r--chromium/base/check_example.cc14
-rw-r--r--chromium/base/compiler_specific.h12
-rw-r--r--chromium/base/containers/mru_cache.h8
-rw-r--r--chromium/base/containers/scoped_ptr_hash_map.h175
-rw-r--r--chromium/base/containers/scoped_ptr_hash_map_unittest.cc108
-rw-r--r--chromium/base/cpu.cc23
-rw-r--r--chromium/base/debug/activity_analyzer.cc89
-rw-r--r--chromium/base/debug/activity_analyzer.h27
-rw-r--r--chromium/base/debug/activity_analyzer_unittest.cc162
-rw-r--r--chromium/base/debug/activity_tracker.cc370
-rw-r--r--chromium/base/debug/activity_tracker.h237
-rw-r--r--chromium/base/debug/activity_tracker_unittest.cc45
-rw-r--r--chromium/base/debug/dump_without_crashing.cc7
-rw-r--r--chromium/base/debug/dump_without_crashing.h3
-rw-r--r--chromium/base/debug/leak_tracker_unittest.cc2
-rw-r--r--chromium/base/debug/proc_maps_linux_unittest.cc4
-rw-r--r--chromium/base/debug/stack_trace_posix.cc5
-rw-r--r--chromium/base/debug/thread_heap_usage_tracker.cc16
-rw-r--r--chromium/base/feature_list.cc81
-rw-r--r--chromium/base/feature_list.h15
-rw-r--r--chromium/base/feature_list_unittest.cc65
-rw-r--r--chromium/base/files/file.h65
-rw-r--r--chromium/base/files/file_locking_unittest.cc6
-rw-r--r--chromium/base/files/file_path.cc3
-rw-r--r--chromium/base/files/file_path.h7
-rw-r--r--chromium/base/files/file_path_watcher.h35
-rw-r--r--chromium/base/files/file_path_watcher_fsevents.cc94
-rw-r--r--chromium/base/files/file_path_watcher_fsevents.h9
-rw-r--r--chromium/base/files/file_path_watcher_kqueue.cc58
-rw-r--r--chromium/base/files/file_path_watcher_kqueue.h15
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc95
-rw-r--r--chromium/base/files/file_path_watcher_mac.cc19
-rw-r--r--chromium/base/files/file_path_watcher_stub.cc12
-rw-r--r--chromium/base/files/file_path_watcher_win.cc71
-rw-r--r--chromium/base/files/file_posix.cc2
-rw-r--r--chromium/base/files/file_unittest.cc155
-rw-r--r--chromium/base/files/file_util_posix.cc5
-rw-r--r--chromium/base/files/file_util_proxy.cc24
-rw-r--r--chromium/base/files/file_util_proxy.h8
-rw-r--r--chromium/base/files/file_util_win.cc5
-rw-r--r--chromium/base/files/file_win.cc8
-rw-r--r--chromium/base/files/memory_mapped_file_posix.cc9
-rw-r--r--chromium/base/i18n/character_encoding.cc42
-rw-r--r--chromium/base/i18n/character_encoding.h20
-rw-r--r--chromium/base/i18n/character_encoding_unittest.cc23
-rw-r--r--chromium/base/i18n/rtl.h2
-rw-r--r--chromium/base/i18n/time_formatting.cc57
-rw-r--r--chromium/base/i18n/time_formatting.h29
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc101
-rw-r--r--chromium/base/id_map.h49
-rw-r--r--chromium/base/id_map_unittest.cc70
-rw-r--r--chromium/base/json/json_parser.cc24
-rw-r--r--chromium/base/json/json_parser_unittest.cc6
-rw-r--r--chromium/base/json/json_reader_unittest.cc58
-rw-r--r--chromium/base/json/json_value_converter.h144
-rw-r--r--chromium/base/json/json_value_converter_unittest.cc11
-rw-r--r--chromium/base/json/json_value_serializer_unittest.cc4
-rw-r--r--chromium/base/json/json_writer.cc20
-rw-r--r--chromium/base/logging.cc13
-rw-r--r--chromium/base/logging.h105
-rw-r--r--chromium/base/logging_unittest.cc91
-rw-r--r--chromium/base/mac/scoped_authorizationref.h2
-rw-r--r--chromium/base/memory/discardable_memory_allocator.cc7
-rw-r--r--chromium/base/memory/linked_ptr.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_chromeos.h2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.cc79
-rw-r--r--chromium/base/memory/memory_pressure_monitor_mac.h5
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.cc2
-rw-r--r--chromium/base/memory/memory_pressure_monitor_win.h2
-rw-r--r--chromium/base/memory/ref_counted.h3
-rw-r--r--chromium/base/memory/ref_counted_delete_on_message_loop.h73
-rw-r--r--chromium/base/memory/ref_counted_delete_on_sequence.h70
-rw-r--r--chromium/base/memory/scoped_vector_unittest.cc2
-rw-r--r--chromium/base/memory/shared_memory.h22
-rw-r--r--chromium/base/memory/shared_memory_handle.h55
-rw-r--r--chromium/base/memory/shared_memory_handle_mac.cc145
-rw-r--r--chromium/base/memory/shared_memory_helper.cc98
-rw-r--r--chromium/base/memory/shared_memory_helper.h33
-rw-r--r--chromium/base/memory/shared_memory_mac.cc165
-rw-r--r--chromium/base/memory/shared_memory_posix.cc123
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc23
-rw-r--r--chromium/base/message_loop/message_loop.cc8
-rw-r--r--chromium/base/message_loop/message_loop.h4
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc3
-rw-r--r--chromium/base/message_loop/message_pump_default.cc39
-rw-r--r--chromium/base/message_loop/message_pump_glib.h2
-rw-r--r--chromium/base/message_loop/message_pump_win.cc137
-rw-r--r--chromium/base/message_loop/message_pump_win.h33
-rw-r--r--chromium/base/metrics/field_trial.cc620
-rw-r--r--chromium/base/metrics/field_trial.h159
-rw-r--r--chromium/base/metrics/field_trial_param_associator.cc27
-rw-r--r--chromium/base/metrics/field_trial_param_associator.h18
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc189
-rw-r--r--chromium/base/metrics/histogram.h3
-rw-r--r--chromium/base/metrics/histogram_base.cc6
-rw-r--r--chromium/base/metrics/histogram_macros.h33
-rw-r--r--chromium/base/metrics/histogram_samples.h21
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager.cc2
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc107
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.h8
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc107
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.h280
-rw-r--r--chromium/base/metrics/persistent_memory_allocator_unittest.cc75
-rw-r--r--chromium/base/metrics/persistent_sample_map.cc62
-rw-r--r--chromium/base/metrics/persistent_sample_map_unittest.cc18
-rw-r--r--chromium/base/metrics/sample_vector_unittest.cc26
-rw-r--r--chromium/base/metrics/statistics_recorder.cc146
-rw-r--r--chromium/base/metrics/statistics_recorder.h9
-rw-r--r--chromium/base/numerics/safe_conversions.h249
-rw-r--r--chromium/base/numerics/safe_conversions_impl.h682
-rw-r--r--chromium/base/numerics/safe_math.h531
-rw-r--r--chromium/base/numerics/safe_math_impl.h1022
-rw-r--r--chromium/base/numerics/safe_numerics_unittest.cc639
-rw-r--r--chromium/base/numerics/saturated_arithmetic.h101
-rw-r--r--chromium/base/numerics/saturated_arithmetic_arm.h102
-rw-r--r--chromium/base/numerics/saturated_arithmetic_unittest.cc141
-rw-r--r--chromium/base/observer_list_threadsafe.h28
-rw-r--r--chromium/base/pickle.cc1
-rw-r--r--chromium/base/posix/global_descriptors.h11
-rw-r--r--chromium/base/post_task_and_reply_with_result_internal.h35
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.cc4
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.h2
-rw-r--r--chromium/base/process/kill_win.cc79
-rw-r--r--chromium/base/process/process.h4
-rw-r--r--chromium/base/process/process_metrics.h3
-rw-r--r--chromium/base/process/process_metrics_win.cc116
-rw-r--r--chromium/base/process/process_posix.cc4
-rw-r--r--chromium/base/process/process_util_unittest.cc21
-rw-r--r--chromium/base/process/process_win.cc4
-rw-r--r--chromium/base/profiler/native_stack_sampler.h10
-rw-r--r--chromium/base/profiler/native_stack_sampler_posix.cc1
-rw-r--r--chromium/base/profiler/native_stack_sampler_win.cc43
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc77
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h49
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc118
-rw-r--r--chromium/base/run_loop.cc4
-rw-r--r--chromium/base/run_loop.h3
-rw-r--r--chromium/base/scoped_generic.h8
-rw-r--r--chromium/base/stl_util.h40
-rw-r--r--chromium/base/strings/string_number_conversions.cc4
-rw-r--r--chromium/base/strings/utf_string_conversions.cc4
-rw-r--r--chromium/base/supports_user_data.cc17
-rw-r--r--chromium/base/supports_user_data.h16
-rw-r--r--chromium/base/synchronization/spin_lock.cc82
-rw-r--r--chromium/base/synchronization/spin_lock.h52
-rw-r--r--chromium/base/synchronization/waitable_event.h14
-rw-r--r--chromium/base/synchronization/waitable_event_posix.cc15
-rw-r--r--chromium/base/synchronization/waitable_event_unittest.cc57
-rw-r--r--chromium/base/synchronization/waitable_event_win.cc91
-rw-r--r--chromium/base/sys_byteorder.h16
-rw-r--r--chromium/base/sys_byteorder_unittest.cc20
-rw-r--r--chromium/base/sys_info.h12
-rw-r--r--chromium/base/sys_info_chromeos.cc10
-rw-r--r--chromium/base/sys_info_unittest.cc10
-rw-r--r--chromium/base/syslog_logging.cc27
-rw-r--r--chromium/base/syslog_logging.h5
-rw-r--r--chromium/base/task/cancelable_task_tracker.cc41
-rw-r--r--chromium/base/task/cancelable_task_tracker.h28
-rw-r--r--chromium/base/task/cancelable_task_tracker_unittest.cc7
-rw-r--r--chromium/base/task_runner_util.h27
-rw-r--r--chromium/base/task_scheduler/post_task.cc18
-rw-r--r--chromium/base/task_scheduler/post_task.h68
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.cc45
-rw-r--r--chromium/base/task_scheduler/scheduler_worker.h24
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_params.h24
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc93
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h29
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc95
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.cc10
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_params.h40
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc5
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc143
-rw-r--r--chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc41
-rw-r--r--chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.h36
-rw-r--r--chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc26
-rw-r--r--chromium/base/task_scheduler/sequence_unittest.cc2
-rw-r--r--chromium/base/task_scheduler/task.cc2
-rw-r--r--chromium/base/task_scheduler/task.h2
-rw-r--r--chromium/base/task_scheduler/task_scheduler.cc16
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h36
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc16
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h11
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl_unittest.cc89
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc67
-rw-r--r--chromium/base/task_scheduler/task_tracker.h16
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc134
-rw-r--r--chromium/base/task_scheduler/task_traits.cc15
-rw-r--r--chromium/base/task_scheduler/task_traits.h59
-rw-r--r--chromium/base/task_scheduler/task_traits_unittest.cc32
-rw-r--r--chromium/base/template_util.h67
-rw-r--r--chromium/base/test/BUILD.gn53
-rw-r--r--chromium/base/third_party/libevent/BUILD.gn31
-rw-r--r--chromium/base/third_party/symbolize/symbolize.cc7
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle.cc43
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc230
-rw-r--r--chromium/base/threading/sequenced_worker_pool.h42
-rw-r--r--chromium/base/threading/sequenced_worker_pool_unittest.cc16
-rw-r--r--chromium/base/threading/thread.cc7
-rw-r--r--chromium/base/threading/thread_restrictions.h22
-rw-r--r--chromium/base/threading/worker_pool_posix.cc26
-rw-r--r--chromium/base/threading/worker_pool_posix.h5
-rw-r--r--chromium/base/threading/worker_pool_posix_unittest.cc6
-rw-r--r--chromium/base/time/time.cc14
-rw-r--r--chromium/base/time/time.h38
-rw-r--r--chromium/base/time/time_posix.cc32
-rw-r--r--chromium/base/time/time_unittest.cc11
-rw-r--r--chromium/base/timer/timer.cc4
-rw-r--r--chromium/base/timer/timer.h4
-rw-r--r--chromium/base/timer/timer_unittest.cc511
-rw-r--r--chromium/base/trace_event/category_registry.cc50
-rw-r--r--chromium/base/trace_event/category_registry.h38
-rw-r--r--chromium/base/trace_event/event_name_filter.cc26
-rw-r--r--chromium/base/trace_event/event_name_filter.h46
-rw-r--r--chromium/base/trace_event/event_name_filter_unittest.cc41
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc27
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker.h9
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc64
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.cc8
-rw-r--r--chromium/base/trace_event/heap_profiler_allocation_register.h15
-rw-r--r--chromium/base/trace_event/heap_profiler_event_filter.cc67
-rw-r--r--chromium/base/trace_event/heap_profiler_event_filter.h40
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc20
-rw-r--r--chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h2
-rw-r--r--chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc8
-rw-r--r--chromium/base/trace_event/malloc_dump_provider.cc114
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h5
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc131
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h26
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc135
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h20
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.cc17
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h18
-rw-r--r--chromium/base/trace_event/memory_dump_session_state.cc6
-rw-r--r--chromium/base/trace_event/memory_dump_session_state.h5
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc9
-rw-r--r--chromium/base/trace_event/memory_usage_estimator.h27
-rw-r--r--chromium/base/trace_event/trace_category_unittest.cc23
-rw-r--r--chromium/base/trace_event/trace_config.cc59
-rw-r--r--chromium/base/trace_event/trace_config.h8
-rw-r--r--chromium/base/trace_event/trace_config_memory_test_util.h177
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc55
-rw-r--r--chromium/base/trace_event/trace_event.h236
-rw-r--r--chromium/base/trace_event/trace_event_argument.cc32
-rw-r--r--chromium/base/trace_event/trace_event_filter.cc17
-rw-r--r--chromium/base/trace_event/trace_event_filter.h51
-rw-r--r--chromium/base/trace_event/trace_event_filter_test_utils.cc61
-rw-r--r--chromium/base/trace_event/trace_event_filter_test_utils.h53
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc46
-rw-r--r--chromium/base/trace_event/trace_event_memory_overhead.cc16
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc107
-rw-r--r--chromium/base/trace_event/trace_log.cc144
-rw-r--r--chromium/base/trace_event/trace_log.h28
-rw-r--r--chromium/base/trace_event/trace_log_constants.cc6
-rw-r--r--chromium/base/tracked_objects.cc338
-rw-r--r--chromium/base/tracked_objects.h286
-rw-r--r--chromium/base/tracked_objects_unittest.cc262
-rw-r--r--chromium/base/tuple.h53
-rw-r--r--chromium/base/values.cc108
-rw-r--r--chromium/base/values.h34
-rw-r--r--chromium/base/values_unittest.cc32
-rw-r--r--chromium/base/win/BUILD.gn5
-rw-r--r--chromium/base/win/object_watcher.cc12
-rw-r--r--chromium/base/win/object_watcher.h23
-rw-r--r--chromium/base/win/scoped_bstr.h2
-rw-r--r--chromium/base/win/scoped_comptr.h2
298 files changed, 16593 insertions, 5453 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 3754be5f383..bdecf4a2575 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -19,6 +19,7 @@
import("//build/buildflag_header.gni")
import("//build/config/allocator.gni")
+import("//build/config/arm.gni")
import("//build/config/chromecast_build.gni")
import("//build/config/clang/clang.gni")
import("//build/config/compiler/compiler.gni")
@@ -35,6 +36,14 @@ declare_args() {
# See //base/build_time.cc and //build/write_build_date_header.py for more
# details and the expected format.
override_build_date = "N/A"
+
+ # Turn on memory profiling in the task profiler when the heap shim is
+ # available, except for official builds for now.
+ enable_memory_task_profiler =
+ use_experimental_allocator_shim && (!is_official_build || is_syzyasan)
+
+ # Partition alloc is included by default except iOS.
+ use_partition_alloc = !is_ios
}
if (is_android) {
@@ -140,6 +149,8 @@ component("base") {
"allocator/allocator_check.h",
"allocator/allocator_extension.cc",
"allocator/allocator_extension.h",
+ "allocator/allocator_shim.h",
+ "allocator/oom.h",
"android/animation_frame_time_histogram.cc",
"android/animation_frame_time_histogram.h",
"android/apk_assets.cc",
@@ -161,6 +172,7 @@ component("base") {
"android/context_utils.cc",
"android/context_utils.h",
"android/cpu_features.cc",
+ "android/cpu_features.h",
"android/cxa_demangle_stub.cc",
"android/early_trace_event_binding.cc",
"android/early_trace_event_binding.h",
@@ -168,8 +180,6 @@ component("base") {
"android/event_log.h",
"android/field_trial_list.cc",
"android/field_trial_list.h",
- "android/fifo_utils.cc",
- "android/fifo_utils.h",
"android/important_file_writer_android.cc",
"android/important_file_writer_android.h",
"android/java_handler_thread.cc",
@@ -181,6 +191,8 @@ component("base") {
"android/jni_android.h",
"android/jni_array.cc",
"android/jni_array.h",
+ "android/jni_generator/jni_generator_helper.h",
+ "android/jni_int_wrapper.h",
"android/jni_registrar.cc",
"android/jni_registrar.h",
"android/jni_string.cc",
@@ -212,6 +224,8 @@ component("base") {
"android/statistics_recorder_android.h",
"android/sys_utils.cc",
"android/sys_utils.h",
+ "android/time_utils.cc",
+ "android/time_utils.h",
"android/trace_event_binding.cc",
"android/trace_event_binding.h",
"android/unguessable_token_android.cc",
@@ -221,6 +235,7 @@ component("base") {
"atomic_ref_count.h",
"atomic_sequence_num.h",
"atomicops.h",
+ "atomicops_internals_atomicword_compat.h",
"atomicops_internals_portable.h",
"atomicops_internals_x86_msvc.h",
"auto_reset.h",
@@ -243,10 +258,12 @@ component("base") {
"build_time.cc",
"build_time.h",
"callback.h",
+ "callback_forward.h",
"callback_helpers.cc",
"callback_helpers.h",
"callback_internal.cc",
"callback_internal.h",
+ "callback_list.h",
"cancelable_callback.h",
"command_line.cc",
"command_line.h",
@@ -255,7 +272,6 @@ component("base") {
"containers/hash_tables.h",
"containers/linked_list.h",
"containers/mru_cache.h",
- "containers/scoped_ptr_hash_map.h",
"containers/small_map.h",
"containers/stack_container.h",
"cpu.cc",
@@ -304,6 +320,7 @@ component("base") {
"deferred_sequenced_task_runner.h",
"environment.cc",
"environment.h",
+ "event_types.h",
"feature_list.cc",
"feature_list.h",
"file_descriptor_posix.h",
@@ -316,6 +333,7 @@ component("base") {
"files/dir_reader_linux.h",
"files/dir_reader_posix.h",
"files/file.cc",
+ "files/file.h",
"files/file_descriptor_watcher_posix.cc",
"files/file_descriptor_watcher_posix.h",
"files/file_enumerator.cc",
@@ -492,7 +510,7 @@ component("base") {
"memory/raw_scoped_refptr_mismatch_checker.h",
"memory/ref_counted.cc",
"memory/ref_counted.h",
- "memory/ref_counted_delete_on_message_loop.h",
+ "memory/ref_counted_delete_on_sequence.h",
"memory/ref_counted_memory.cc",
"memory/ref_counted_memory.h",
"memory/scoped_policy.h",
@@ -502,6 +520,8 @@ component("base") {
"memory/shared_memory_handle.h",
"memory/shared_memory_handle_mac.cc",
"memory/shared_memory_handle_win.cc",
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"memory/shared_memory_mac.cc",
"memory/shared_memory_nacl.cc",
"memory/shared_memory_posix.cc",
@@ -532,6 +552,7 @@ component("base") {
"message_loop/message_pump_mac.mm",
"message_loop/message_pump_win.cc",
"message_loop/message_pump_win.h",
+ "message_loop/timer_slack.h",
"metrics/bucket_ranges.cc",
"metrics/bucket_ranges.h",
"metrics/field_trial.cc",
@@ -548,6 +569,8 @@ component("base") {
"metrics/histogram_functions.cc",
"metrics/histogram_functions.h",
"metrics/histogram_macros.h",
+ "metrics/histogram_macros_internal.h",
+ "metrics/histogram_macros_local.h",
"metrics/histogram_samples.cc",
"metrics/histogram_samples.h",
"metrics/histogram_snapshot_manager.cc",
@@ -585,6 +608,8 @@ component("base") {
"numerics/safe_conversions_impl.h",
"numerics/safe_math.h",
"numerics/safe_math_impl.h",
+ "numerics/saturated_arithmetic.h",
+ "numerics/saturated_arithmetic_arm.h",
"observer_list.h",
"observer_list_threadsafe.h",
"optional.h",
@@ -600,12 +625,14 @@ component("base") {
"pickle.h",
"posix/eintr_wrapper.h",
"posix/file_descriptor_shuffle.cc",
+ "posix/file_descriptor_shuffle.h",
"posix/global_descriptors.cc",
"posix/global_descriptors.h",
"posix/safe_strerror.cc",
"posix/safe_strerror.h",
"posix/unix_domain_socket_linux.cc",
"posix/unix_domain_socket_linux.h",
+ "post_task_and_reply_with_result_internal.h",
"power_monitor/power_monitor.cc",
"power_monitor/power_monitor.h",
"power_monitor/power_monitor_device_source.cc",
@@ -635,6 +662,7 @@ component("base") {
"process/port_provider_mac.h",
"process/process.h",
"process/process_handle.cc",
+ "process/process_handle.h",
#"process/process_handle_freebsd.cc", # Unused in Chromium build.
"process/process_handle_linux.cc",
@@ -690,6 +718,7 @@ component("base") {
"rand_util_win.cc",
"run_loop.cc",
"run_loop.h",
+ "scoped_clear_errno.h",
"scoped_generic.h",
"scoped_native_library.cc",
"scoped_native_library.h",
@@ -773,6 +802,7 @@ component("base") {
"sys_info.h",
"sys_info_android.cc",
"sys_info_chromeos.cc",
+ "sys_info_internal.h",
"syslog_logging.cc",
"syslog_logging.h",
@@ -811,6 +841,8 @@ component("base") {
"task_scheduler/scheduler_worker_pool_params.h",
"task_scheduler/scheduler_worker_stack.cc",
"task_scheduler/scheduler_worker_stack.h",
+ "task_scheduler/scoped_set_task_priority_for_current_thread.cc",
+ "task_scheduler/scoped_set_task_priority_for_current_thread.h",
"task_scheduler/sequence.cc",
"task_scheduler/sequence.h",
"task_scheduler/sequence_sort_key.cc",
@@ -828,6 +860,7 @@ component("base") {
"task_scheduler/task_traits.cc",
"task_scheduler/task_traits.h",
"template_util.h",
+ "test/malloc_wrapper.h",
"third_party/dmg_fp/dmg_fp.h",
"third_party/dmg_fp/dtoa_wrapper.cc",
"third_party/dmg_fp/g_fmt.cc",
@@ -836,6 +869,7 @@ component("base") {
"third_party/nspr/prtime.cc",
"third_party/nspr/prtime.h",
"third_party/superfasthash/superfasthash.c",
+ "third_party/valgrind/memcheck.h",
"threading/non_thread_safe.h",
"threading/non_thread_safe_impl.cc",
"threading/non_thread_safe_impl.h",
@@ -909,6 +943,8 @@ component("base") {
"trace_event/category_registry.cc",
"trace_event/category_registry.h",
"trace_event/common/trace_event_common.h",
+ "trace_event/event_name_filter.cc",
+ "trace_event/event_name_filter.h",
"trace_event/heap_profiler.h",
"trace_event/heap_profiler_allocation_context.cc",
"trace_event/heap_profiler_allocation_context.h",
@@ -918,6 +954,8 @@ component("base") {
"trace_event/heap_profiler_allocation_register.h",
"trace_event/heap_profiler_allocation_register_posix.cc",
"trace_event/heap_profiler_allocation_register_win.cc",
+ "trace_event/heap_profiler_event_filter.cc",
+ "trace_event/heap_profiler_event_filter.h",
"trace_event/heap_profiler_heap_dump_writer.cc",
"trace_event/heap_profiler_heap_dump_writer.h",
"trace_event/heap_profiler_stack_frame_deduplicator.cc",
@@ -960,6 +998,8 @@ component("base") {
"trace_event/trace_event_argument.h",
"trace_event/trace_event_etw_export_win.cc",
"trace_event/trace_event_etw_export_win.h",
+ "trace_event/trace_event_filter.cc",
+ "trace_event/trace_event_filter.h",
"trace_event/trace_event_impl.cc",
"trace_event/trace_event_impl.h",
"trace_event/trace_event_memory_overhead.cc",
@@ -1066,7 +1106,7 @@ component("base") {
]
# Needed for <atomic> if using newer C++ library than sysroot
- if (!use_sysroot && (is_android || is_linux)) {
+ if (!use_sysroot && (is_android || (is_linux && !is_chromecast))) {
libs = [ "atomic" ]
}
@@ -1150,6 +1190,8 @@ component("base") {
"memory/discardable_memory_allocator.h",
"memory/discardable_shared_memory.cc",
"memory/discardable_shared_memory.h",
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"memory/shared_memory_posix.cc",
"native_library.cc",
"native_library_posix.cc",
@@ -1208,6 +1250,28 @@ component("base") {
"rand_util_nacl.cc",
"synchronization/read_write_lock_nacl.cc",
]
+
+ if (use_partition_alloc) {
+ # Add stuff that doesn't work in NaCl.
+ sources += [
+ # PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
+ "allocator/partition_allocator/address_space_randomization.cc",
+ "allocator/partition_allocator/address_space_randomization.h",
+ "allocator/partition_allocator/page_allocator.cc",
+ "allocator/partition_allocator/page_allocator.h",
+ "allocator/partition_allocator/partition_alloc.cc",
+ "allocator/partition_allocator/partition_alloc.h",
+ ]
+ }
+ }
+
+ # SpinLock uses inline assembly that doesn't work on NaCl, and for which there
+ # is no code for ARMv6.
+ if (!is_nacl && (current_cpu != "arm" || arm_version >= 7)) {
+ sources += [
+ "synchronization/spin_lock.cc",
+ "synchronization/spin_lock.h",
+ ]
}
# Windows.
@@ -1219,6 +1283,8 @@ component("base") {
]
sources -= [
+ "memory/shared_memory_helper.cc",
+ "memory/shared_memory_helper.h",
"message_loop/message_pump_libevent.cc",
"strings/string16.cc",
]
@@ -1316,7 +1382,10 @@ component("base") {
# Desktop Mac.
if (is_mac) {
- sources += [ "power_monitor/power_monitor_device_source_mac.mm" ]
+ sources += [
+ "mac/scoped_typeref.h",
+ "power_monitor/power_monitor_device_source_mac.mm",
+ ]
libs = [
"ApplicationServices.framework",
@@ -1494,7 +1563,10 @@ component("base") {
buildflag_header("debugging_flags") {
header = "debugging_flags.h"
header_dir = "base/debug"
- flags = [ "ENABLE_PROFILING=$enable_profiling" ]
+ flags = [
+ "ENABLE_PROFILING=$enable_profiling",
+ "ENABLE_MEMORY_TASK_PROFILER=$enable_memory_task_profiler",
+ ]
}
# This is the subset of files from base that should not be used with a dynamic
@@ -1538,6 +1610,8 @@ component("i18n") {
"i18n/case_conversion.h",
"i18n/char_iterator.cc",
"i18n/char_iterator.h",
+ "i18n/character_encoding.cc",
+ "i18n/character_encoding.h",
"i18n/encoding_detection.cc",
"i18n/encoding_detection.h",
"i18n/file_util_icu.cc",
@@ -1787,7 +1861,6 @@ test("base_unittests") {
"containers/hash_tables_unittest.cc",
"containers/linked_list_unittest.cc",
"containers/mru_cache_unittest.cc",
- "containers/scoped_ptr_hash_map_unittest.cc",
"containers/small_map_unittest.cc",
"containers/stack_container_unittest.cc",
"cpu_unittest.cc",
@@ -1822,6 +1895,7 @@ test("base_unittests") {
"i18n/break_iterator_unittest.cc",
"i18n/case_conversion_unittest.cc",
"i18n/char_iterator_unittest.cc",
+ "i18n/character_encoding_unittest.cc",
"i18n/file_util_icu_unittest.cc",
"i18n/icu_string_conversions_unittest.cc",
"i18n/message_formatter_unittest.cc",
@@ -1892,6 +1966,7 @@ test("base_unittests") {
"metrics/statistics_recorder_unittest.cc",
"native_library_unittest.cc",
"numerics/safe_numerics_unittest.cc",
+ "numerics/saturated_arithmetic_unittest.cc",
"observer_list_unittest.cc",
"optional_unittest.cc",
"os_compat_android_unittest.cc",
@@ -1954,18 +2029,22 @@ test("base_unittests") {
"task_scheduler/scheduler_worker_pool_impl_unittest.cc",
"task_scheduler/scheduler_worker_stack_unittest.cc",
"task_scheduler/scheduler_worker_unittest.cc",
+ "task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc",
"task_scheduler/sequence_sort_key_unittest.cc",
"task_scheduler/sequence_unittest.cc",
"task_scheduler/task_scheduler_impl_unittest.cc",
"task_scheduler/task_tracker_posix_unittest.cc",
"task_scheduler/task_tracker_unittest.cc",
+ "task_scheduler/task_traits_unittest.cc",
"task_scheduler/task_unittest.cc",
"task_scheduler/test_task_factory.cc",
"task_scheduler/test_task_factory.h",
"task_scheduler/test_utils.h",
"template_util_unittest.cc",
"test/histogram_tester_unittest.cc",
+ "test/mock_callback_unittest.cc",
"test/scoped_mock_time_message_loop_task_runner_unittest.cc",
+ "test/scoped_task_scheduler_unittest.cc",
"test/test_pending_task_unittest.cc",
"test/test_reg_util_win_unittest.cc",
"test/trace_event_analyzer_unittest.cc",
@@ -1993,6 +2072,7 @@ test("base_unittests") {
"timer/timer_unittest.cc",
"tools_sanity_unittest.cc",
"trace_event/blame_context_unittest.cc",
+ "trace_event/event_name_filter_unittest.cc",
"trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
"trace_event/heap_profiler_allocation_register_unittest.cc",
"trace_event/heap_profiler_heap_dump_writer_unittest.cc",
@@ -2006,6 +2086,8 @@ test("base_unittests") {
"trace_event/trace_category_unittest.cc",
"trace_event/trace_config_unittest.cc",
"trace_event/trace_event_argument_unittest.cc",
+ "trace_event/trace_event_filter_test_utils.cc",
+ "trace_event/trace_event_filter_test_utils.h",
"trace_event/trace_event_synthetic_delay_unittest.cc",
"trace_event/trace_event_system_stats_monitor_unittest.cc",
"trace_event/trace_event_unittest.cc",
@@ -2081,10 +2163,15 @@ test("base_unittests") {
}
if (is_android) {
+ sources -= [
+ "process/process_unittest.cc",
+ "process/process_util_unittest.cc",
+ ]
deps += [
":base_java",
":base_java_unittest_support",
"//base/android/jni_generator:jni_generator_tests",
+ "//base/test:test_support_java",
]
}
@@ -2115,6 +2202,10 @@ test("base_unittests") {
# TODO(GYP): dep on copy_test_data_ios action.
}
+ if (use_partition_alloc) {
+ sources += [ "allocator/partition_allocator/partition_alloc_unittest.cc" ]
+ }
+
if (is_mac) {
libs = [
"CoreFoundation.framework",
@@ -2254,6 +2345,7 @@ if (is_android) {
"android/java/src/org/chromium/base/SysUtils.java",
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/TimeUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
"android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
@@ -2328,6 +2420,7 @@ if (is_android) {
"android/java/src/org/chromium/base/SysUtils.java",
"android/java/src/org/chromium/base/SystemMessageHandler.java",
"android/java/src/org/chromium/base/ThreadUtils.java",
+ "android/java/src/org/chromium/base/TimeUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
"android/java/src/org/chromium/base/UnguessableToken.java",
"android/java/src/org/chromium/base/VisibleForTesting.java",
@@ -2370,6 +2463,7 @@ if (is_android) {
deps = [
":base_java",
":base_java_test_support",
+ "//third_party/android_support_test_runner:runner_java",
]
java_files = [
"android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index b58c9bb15d4..06f165f68c6 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -17,11 +17,12 @@
# multiple consumers across the codebase, consider placing it in a new directory
# under components/ instead.
+danakj@chromium.org
+dcheng@chromium.org
+gab@chromium.org
mark@chromium.org
thakis@chromium.org
-danakj@chromium.org
thestig@chromium.org
-dcheng@chromium.org
# For Bind/Callback:
per-file bind*=tzik@chromium.org
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index 1f69f9538c7..401519662f3 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -216,17 +216,17 @@ if (use_allocator == "tcmalloc") {
"$tcmalloc_dir/src",
]
- configs -= [
- "//build/config/compiler:chromium_code",
-
- # The asm code in tcmalloc is not currently thumb compatible
- "//build/config/compiler:compiler_arm_thumb",
- ]
+ configs -= [ "//build/config/compiler:chromium_code" ]
configs += [
"//build/config/compiler:no_chromium_code",
":tcmalloc_flags",
]
+ # Thumb mode disabled due to bug in clang integrated assembler
+ # TODO(https://llvm.org/bugs/show_bug.cgi?id=31058)
+ configs -= [ "//build/config/compiler:compiler_arm_thumb" ]
+ configs += [ "//build/config/compiler:compiler_arm" ]
+
# TODO(crbug.com/633719) Make tcmalloc work with AFDO if possible.
configs -= [ "//build/config/compiler:afdo" ]
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 95480ea4b6b..a9fc095b905 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -39,18 +39,6 @@ bool g_call_new_handler_on_malloc_failure = false;
subtle::Atomic32 g_new_handler_lock = 0;
#endif
-// In theory this should be just base::ThreadChecker. But we can't afford
-// the luxury of a LazyInstance<ThreadChecker> here as it would cause a new().
-bool CalledOnValidThread() {
- using subtle::Atomic32;
- const Atomic32 kInvalidTID = static_cast<Atomic32>(kInvalidThreadId);
- static Atomic32 g_tid = kInvalidTID;
- Atomic32 cur_tid = static_cast<Atomic32>(PlatformThread::CurrentId());
- Atomic32 prev_tid =
- subtle::NoBarrier_CompareAndSwap(&g_tid, kInvalidTID, cur_tid);
- return prev_tid == kInvalidTID || prev_tid == cur_tid;
-}
-
inline size_t GetCachedPageSize() {
static size_t pagesize = 0;
if (!pagesize)
@@ -112,25 +100,35 @@ void* UncheckedAlloc(size_t size) {
}
void InsertAllocatorDispatch(AllocatorDispatch* dispatch) {
- // Ensure this is always called on the same thread.
- DCHECK(CalledOnValidThread());
-
- dispatch->next = GetChainHead();
-
- // This function does not guarantee to be thread-safe w.r.t. concurrent
- // insertions, but still has to guarantee that all the threads always
- // see a consistent chain, hence the MemoryBarrier() below.
- // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
- // we don't really want this to be a release-store with a corresponding
- // acquire-load during malloc().
- subtle::MemoryBarrier();
+ // Loop in case of (an unlikely) race on setting the list head.
+ size_t kMaxRetries = 7;
+ for (size_t i = 0; i < kMaxRetries; ++i) {
+ const AllocatorDispatch* chain_head = GetChainHead();
+ dispatch->next = chain_head;
+
+ // This function guarantees to be thread-safe w.r.t. concurrent
+ // insertions. It also has to guarantee that all the threads always
+ // see a consistent chain, hence the MemoryBarrier() below.
+ // InsertAllocatorDispatch() is NOT a fastpath, as opposite to malloc(), so
+ // we don't really want this to be a release-store with a corresponding
+ // acquire-load during malloc().
+ subtle::MemoryBarrier();
+ subtle::AtomicWord old_value =
+ reinterpret_cast<subtle::AtomicWord>(chain_head);
+ // Set the chain head to the new dispatch atomically. If we lose the race,
+ // the comparison will fail, and the new head of chain will be returned.
+ if (subtle::NoBarrier_CompareAndSwap(
+ &g_chain_head, old_value,
+ reinterpret_cast<subtle::AtomicWord>(dispatch)) == old_value) {
+ // Success.
+ return;
+ }
+ }
- subtle::NoBarrier_Store(&g_chain_head,
- reinterpret_cast<subtle::AtomicWord>(dispatch));
+ CHECK(false); // Too many retries, this shouldn't happen.
}
void RemoveAllocatorDispatchForTesting(AllocatorDispatch* dispatch) {
- DCHECK(CalledOnValidThread());
DCHECK_EQ(GetChainHead(), dispatch);
subtle::NoBarrier_Store(&g_chain_head,
reinterpret_cast<subtle::AtomicWord>(dispatch->next));
diff --git a/chromium/base/allocator/allocator_shim.h b/chromium/base/allocator/allocator_shim.h
index aca13d2bcbc..8fd060fca6a 100644
--- a/chromium/base/allocator/allocator_shim.h
+++ b/chromium/base/allocator/allocator_shim.h
@@ -86,10 +86,10 @@ BASE_EXPORT void SetCallNewHandlerOnMallocFailure(bool value);
// regardless of SetCallNewHandlerOnMallocFailure().
BASE_EXPORT void* UncheckedAlloc(size_t size);
-// Inserts |dispatch| in front of the allocator chain. This method is NOT
+// Inserts |dispatch| in front of the allocator chain. This method is
// thread-safe w.r.t concurrent invocations of InsertAllocatorDispatch().
-// The callers have the responsibility of linearizing the changes to the chain
-// (or more likely call these always on the same thread).
+// The callers have responsibility for inserting a single dispatch no more
+// than once.
BASE_EXPORT void InsertAllocatorDispatch(AllocatorDispatch* dispatch);
// Test-only. Rationale: (1) lack of use cases; (2) dealing safely with a
diff --git a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
index 2aa872f1b3f..b8d0d91ff57 100644
--- a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
+++ b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
@@ -63,6 +63,30 @@ __declspec(restrict) void* calloc(size_t n, size_t size) {
return ShimCalloc(n, size);
}
+// The symbols
+// * __acrt_heap
+// * __acrt_initialize_heap
+// * __acrt_uninitialize_heap
+// * _get_heap_handle
+// must be overridden all or none, as they are otherwise supplied
+// by heap_handle.obj in the ucrt.lib file.
+HANDLE __acrt_heap = nullptr;
+
+bool __acrt_initialize_heap() {
+ __acrt_heap = ::HeapCreate(0, 0, 0);
+ return true;
+}
+
+bool __acrt_uninitialize_heap() {
+ ::HeapDestroy(__acrt_heap);
+ __acrt_heap = nullptr;
+ return true;
+}
+
+intptr_t _get_heap_handle(void) {
+ return reinterpret_cast<intptr_t>(__acrt_heap);
+}
+
// The default dispatch translation unit has to define also the following
// symbols (unless they are ultimately routed to the system symbols):
// void malloc_stats(void);
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index d67183ea943..e45b03dc539 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -12,6 +12,7 @@
#include <new>
#include <vector>
+#include "base/allocator/features.h"
#include "base/atomicops.h"
#include "base/process/process_metrics.h"
#include "base/synchronization/waitable_event.h"
@@ -20,7 +21,9 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#if !defined(OS_WIN)
+#if defined(OS_WIN)
+#include <windows.h>
+#else
#include <unistd.h>
#endif
@@ -329,6 +332,12 @@ TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls());
}
+#if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
+ ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
+}
+#endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
+
} // namespace
} // namespace allocator
} // namespace base
diff --git a/chromium/base/allocator/oom.h b/chromium/base/allocator/oom.h
new file mode 100644
index 00000000000..68dfae76b88
--- /dev/null
+++ b/chromium/base/allocator/oom.h
@@ -0,0 +1,26 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_OOM_H
+#define BASE_ALLOCATOR_OOM_H
+
+#include "base/logging.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#endif
+
+// OOM_CRASH() - Specialization of IMMEDIATE_CRASH which will raise a custom
+// exception on Windows to signal this is OOM and not a normal assert.
+#if defined(OS_WIN)
+#define OOM_CRASH() \
+ do { \
+ ::RaiseException(0xE0000008, EXCEPTION_NONCONTINUABLE, 0, nullptr); \
+ IMMEDIATE_CRASH(); \
+ } while (0)
+#else
+#define OOM_CRASH() IMMEDIATE_CRASH()
+#endif
+
+#endif // BASE_ALLOCATOR_OOM_H
diff --git a/chromium/base/allocator/partition_allocator/OWNERS b/chromium/base/allocator/partition_allocator/OWNERS
new file mode 100644
index 00000000000..374d1aed926
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/OWNERS
@@ -0,0 +1,5 @@
+haraken@chromium.org
+palmer@chromium.org
+
+# TEAM: platform-architecture-dev@chromium.org
+# COMPONENT: Blink>MemoryAllocator
diff --git a/chromium/base/allocator/partition_allocator/PartitionAlloc.md b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
new file mode 100644
index 00000000000..dbcc0061c2b
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
@@ -0,0 +1,97 @@
+# PartitionAlloc Design
+
+This document explains a high-level design of PartitionAlloc.
+If you're interested in its in-depth implementation, see comments
+in PartitionAlloc.h.
+
+[TOC]
+
+## Overview
+
+PartitionAlloc is a memory allocator optimized for performance and security
+in Blink. All objects in Blink are expected to be allocated with
+PartitionAlloc or Oilpan (but not yet done).
+
+## Partitions and buckets
+
+PartitionAlloc has three partitions. A partition is a heap that contains
+certain types of objects. Specifically, PartitionAlloc allocates objects
+on either of the following three partitions depending on their types:
+
+* LayoutObject partition: A partition to allocate LayoutObjects.
+
+* Buffer partition: A partition to allocate objects that have a strong risk
+that the length and/or the contents are exploited by user scripts.
+Specifically, Vectors, HashTables, ArrayBufferContents and Strings are
+allocated on the Buffer partition.
+
+* FastMalloc partition: A partition to allocate all other objects.
+Objects marked with USING_FAST_MALLOC are allocated on the FastMalloc partition.
+
+Each partition holds multiple buckets. A bucket is a region in a partition
+that contains similar-sized objects. Each object allocation must be aligned
+with the closest bucket size. For example, if a partition has three buckets
+for 64 bytes, 256 bytes and 1024 bytes, then an object of 128 bytes is
+rounded up to 256 bytes and allocated on the second bucket.
+
+The LayoutObject partition has buckets for all N * sizeof(void*) (N = 1, 2, ..., N_max).
+This means that no extra padding is needed to allocate a LayoutObject object.
+Different sizes of LayoutObjects are allocated in different buckets.
+
+The Buffer partition and the FastMalloc partition have many buckets.
+They support any arbitrary size of allocations but padding may be added
+to align the allocation with the closest bucket size. The bucket sizes are
+chosen to keep the worst-case memory overhead less than 10%.
+
+Large allocations (> 1 MB) are realized by direct memory mmapping.
+
+## Performance
+
+PartitionAlloc doesn't acquire a lock when allocating on the LayoutObject
+partition, because it's guaranteed that LayoutObjects are allocated
+only by the main thread.
+
+PartitionAlloc acquires a lock when allocating on the Buffer partition and
+the FastMalloc partition. PartitionAlloc uses a spin lock because thread contention
+would be rare in Blink.
+
+PartitionAlloc is designed to be extremely fast in fast paths. Just two
+(reasonably predictable) branches are required for the fast paths of an
+allocation and deallocation. The number of operations in the fast paths
+is minimized, leading to the possibility of inlining.
+
+Having a dedicated partition for LayoutObjects is helpful to improve cache
+locality and thus help improve performance.
+
+## Security
+
+Security is one of the most important goals of PartitionAlloc.
+
+Different partitions are guaranteed to exist in separate address spaces.
+When objects contained in a page in a partition are all freed,
+the physical memory is returned to the system but the address space
+remains reserved. The address space may be reused later only for the partition.
+Remember that PartitionAlloc puts LayoutObjects into a dedicated partition.
+This is because LayoutObjects are likely to be a source of use-after-free.
+Simiarly, PartitionAlloc puts Strings, Vectors etc into the Buffer partition
+because the length and/or contents may be exploited by user scripts.
+This means that PartitionAlloc greedily uses virtual address spaces in favor of
+security hardening.
+
+Also the following security properties are provided:
+
+* Linear overflows cannot corrupt into the partition.
+
+* Linear overflows cannot corrupt out of the partition.
+
+* Metadata is recorded in a dedicated region (not next to each object).
+Linear overflow or underflow cannot corrupt the metadata.
+
+* Buckets are helpful to allocate different-sized objects on different addresses.
+One page can contain only similar-sized objects.
+
+* Dereference of a freelist pointer should fault.
+
+* Partial pointer overwrite of freelist pointer should fault.
+
+* Large allocations are guard-paged at the beginning and end.
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
new file mode 100644
index 00000000000..1be5baf189c
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/synchronization/spin_lock.h"
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#else
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+
+namespace base {
+
+namespace {
+
+// This is the same PRNG as used by tcmalloc for mapping address randomness;
+// see http://burtleburtle.net/bob/rand/smallprng.html
+struct ranctx {
+ subtle::SpinLock lock;
+ bool initialized;
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+};
+
+#define rot(x, k) (((x) << (k)) | ((x) >> (32 - (k))))
+
+uint32_t ranvalInternal(ranctx* x) {
+ uint32_t e = x->a - rot(x->b, 27);
+ x->a = x->b ^ rot(x->c, 17);
+ x->b = x->c + x->d;
+ x->c = x->d + e;
+ x->d = e + x->a;
+ return x->d;
+}
+
+#undef rot
+
+uint32_t ranval(ranctx* x) {
+ subtle::SpinLock::Guard guard(x->lock);
+ if (UNLIKELY(!x->initialized)) {
+ x->initialized = true;
+ char c;
+ uint32_t seed = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(&c));
+ uint32_t pid;
+ uint32_t usec;
+#if defined(OS_WIN)
+ pid = GetCurrentProcessId();
+ SYSTEMTIME st;
+ GetSystemTime(&st);
+ usec = static_cast<uint32_t>(st.wMilliseconds * 1000);
+#else
+ pid = static_cast<uint32_t>(getpid());
+ struct timeval tv;
+ gettimeofday(&tv, 0);
+ usec = static_cast<uint32_t>(tv.tv_usec);
+#endif
+ seed ^= pid;
+ seed ^= usec;
+ x->a = 0xf1ea5eed;
+ x->b = x->c = x->d = seed;
+ for (int i = 0; i < 20; ++i) {
+ (void)ranvalInternal(x);
+ }
+ }
+ uint32_t ret = ranvalInternal(x);
+ return ret;
+}
+
+static struct ranctx s_ranctx;
+
+} // namespace
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+void* GetRandomPageBase() {
+ uintptr_t random;
+ random = static_cast<uintptr_t>(ranval(&s_ranctx));
+#if defined(ARCH_CPU_X86_64)
+ random <<= 32UL;
+ random |= static_cast<uintptr_t>(ranval(&s_ranctx));
+// This address mask gives a low likelihood of address space collisions. We
+// handle the situation gracefully if there is a collision.
+#if defined(OS_WIN)
+ // 64-bit Windows has a bizarrely small 8TB user address space. Allocates in
+ // the 1-5TB region. TODO(palmer): See if Windows >= 8.1 has the full 47 bits,
+ // and use it if so. crbug.com/672219
+ random &= 0x3ffffffffffUL;
+ random += 0x10000000000UL;
+#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ // This range is copied from the TSan source, but works for all tools.
+ random &= 0x007fffffffffUL;
+ random += 0x7e8000000000UL;
+#else
+ // Linux and OS X support the full 47-bit user space of x64 processors.
+ random &= 0x3fffffffffffUL;
+#endif
+#elif defined(ARCH_CPU_ARM64)
+ // ARM64 on Linux has 39-bit user space.
+ random &= 0x3fffffffffUL;
+ random += 0x1000000000UL;
+#else // !defined(ARCH_CPU_X86_64) && !defined(ARCH_CPU_ARM64)
+#if defined(OS_WIN)
+ // On win32 host systems the randomization plus huge alignment causes
+ // excessive fragmentation. Plus most of these systems lack ASLR, so the
+ // randomization isn't buying anything. In that case we just skip it.
+ // TODO(jschuh): Just dump the randomization when HE-ASLR is present.
+ static BOOL isWow64 = -1;
+ if (isWow64 == -1 && !IsWow64Process(GetCurrentProcess(), &isWow64))
+ isWow64 = FALSE;
+ if (!isWow64)
+ return nullptr;
+#endif // defined(OS_WIN)
+ // This is a good range on Windows, Linux and Mac.
+ // Allocates in the 0.5-1.5GB region.
+ random &= 0x3fffffff;
+ random += 0x20000000;
+#endif // defined(ARCH_CPU_X86_64)
+ random &= kPageAllocationGranularityBaseMask;
+ return reinterpret_cast<void*>(random);
+}
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.h b/chromium/base/allocator/partition_allocator/address_space_randomization.h
new file mode 100644
index 00000000000..19069b42b0c
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.h
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
+
+namespace base {
+
+// Calculates a random preferred mapping address. In calculating an address, we
+// balance good ASLR against not fragmenting the address space too badly.
+void* GetRandomPageBase();
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_SPACE_RANDOMIZATION
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
new file mode 100644
index 00000000000..1884c4690ed
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -0,0 +1,278 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+
+#include <limits.h>
+
+#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+#include "build/build_config.h"
+
+#if defined(OS_POSIX)
+
+#include <errno.h>
+#include <sys/mman.h>
+
+#ifndef MADV_FREE
+#define MADV_FREE MADV_DONTNEED
+#endif
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+
+// On POSIX |mmap| uses a nearby address if the hint address is blocked.
+static const bool kHintIsAdvisory = true;
+static volatile base::subtle::Atomic32 s_allocPageErrorCode = 0;
+
+#elif defined(OS_WIN)
+
+#include <windows.h>
+
+// |VirtualAlloc| will fail if allocation at the hint address is blocked.
+static const bool kHintIsAdvisory = false;
+static base::subtle::Atomic32 s_allocPageErrorCode = ERROR_SUCCESS;
+
+#else
+#error Unknown OS
+#endif // defined(OS_POSIX)
+
+namespace base {
+
+// This internal function wraps the OS-specific page allocation call:
+// |VirtualAlloc| on Windows, and |mmap| on POSIX.
+static void* SystemAllocPages(
+ void* hint,
+ size_t length,
+ PageAccessibilityConfiguration page_accessibility) {
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ void* ret;
+#if defined(OS_WIN)
+ DWORD access_flag =
+ page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS;
+ ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag);
+ if (!ret)
+ base::subtle::Release_Store(&s_allocPageErrorCode, GetLastError());
+#else
+ int access_flag = page_accessibility == PageAccessible
+ ? (PROT_READ | PROT_WRITE)
+ : PROT_NONE;
+ ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ if (ret == MAP_FAILED) {
+ base::subtle::Release_Store(&s_allocPageErrorCode, errno);
+ ret = 0;
+ }
+#endif
+ return ret;
+}
+
+// Trims base to given length and alignment. Windows returns null on failure and
+// frees base.
+static void* TrimMapping(void* base,
+ size_t base_length,
+ size_t trim_length,
+ uintptr_t align,
+ PageAccessibilityConfiguration page_accessibility) {
+ size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1);
+ if (pre_slack)
+ pre_slack = align - pre_slack;
+ size_t post_slack = base_length - pre_slack - trim_length;
+ DCHECK(base_length >= trim_length || pre_slack || post_slack);
+ DCHECK(pre_slack < base_length);
+ DCHECK(post_slack < base_length);
+ void* ret = base;
+
+#if defined(OS_POSIX) // On POSIX we can resize the allocation run.
+ (void)page_accessibility;
+ if (pre_slack) {
+ int res = munmap(base, pre_slack);
+ CHECK(!res);
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ }
+ if (post_slack) {
+ int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
+ CHECK(!res);
+ }
+#else // On Windows we can't resize the allocation run.
+ if (pre_slack || post_slack) {
+ ret = reinterpret_cast<char*>(base) + pre_slack;
+ FreePages(base, base_length);
+ ret = SystemAllocPages(ret, trim_length, page_accessibility);
+ }
+#endif
+
+ return ret;
+}
+
+void* AllocPages(void* address,
+ size_t length,
+ size_t align,
+ PageAccessibilityConfiguration page_accessibility) {
+ DCHECK(length >= kPageAllocationGranularity);
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ DCHECK(align >= kPageAllocationGranularity);
+ DCHECK(!(align & kPageAllocationGranularityOffsetMask));
+ DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
+ uintptr_t align_offset_mask = align - 1;
+ uintptr_t align_base_mask = ~align_offset_mask;
+ DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+
+ // If the client passed null as the address, choose a good one.
+ if (!address) {
+ address = GetRandomPageBase();
+ address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+ align_base_mask);
+ }
+
+ // First try to force an exact-size, aligned allocation from our random base.
+ for (int count = 0; count < 3; ++count) {
+ void* ret = SystemAllocPages(address, length, page_accessibility);
+ if (kHintIsAdvisory || ret) {
+ // If the alignment is to our liking, we're done.
+ if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
+ return ret;
+ FreePages(ret, length);
+#if defined(ARCH_CPU_32_BITS)
+ address = reinterpret_cast<void*>(
+ (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
+#endif
+ } else if (!address) { // We know we're OOM when an unhinted allocation
+ // fails.
+ return nullptr;
+ } else {
+#if defined(ARCH_CPU_32_BITS)
+ address = reinterpret_cast<char*>(address) + align;
+#endif
+ }
+
+#if !defined(ARCH_CPU_32_BITS)
+ // Keep trying random addresses on systems that have a large address space.
+ address = GetRandomPageBase();
+ address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
+ align_base_mask);
+#endif
+ }
+
+ // Map a larger allocation so we can force alignment, but continue randomizing
+ // only on 64-bit POSIX.
+ size_t try_length = length + (align - kPageAllocationGranularity);
+ CHECK(try_length >= length);
+ void* ret;
+
+ do {
+ // Don't continue to burn cycles on mandatory hints (Windows).
+ address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
+ ret = SystemAllocPages(address, try_length, page_accessibility);
+ // The retries are for Windows, where a race can steal our mapping on
+ // resize.
+ } while (ret &&
+ (ret = TrimMapping(ret, try_length, length, align,
+ page_accessibility)) == nullptr);
+
+ return ret;
+}
+
+void FreePages(void* address, size_t length) {
+ DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
+ DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+#if defined(OS_POSIX)
+ int ret = munmap(address, length);
+ CHECK(!ret);
+#else
+ BOOL ret = VirtualFree(address, 0, MEM_RELEASE);
+ CHECK(ret);
+#endif
+}
+
+void SetSystemPagesInaccessible(void* address, size_t length) {
+ DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+ int ret = mprotect(address, length, PROT_NONE);
+ CHECK(!ret);
+#else
+ BOOL ret = VirtualFree(address, length, MEM_DECOMMIT);
+ CHECK(ret);
+#endif
+}
+
+bool SetSystemPagesAccessible(void* address, size_t length) {
+ DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+ return !mprotect(address, length, PROT_READ | PROT_WRITE);
+#else
+ return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE);
+#endif
+}
+
+void DecommitSystemPages(void* address, size_t length) {
+ DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+ int ret = madvise(address, length, MADV_FREE);
+ if (ret != 0 && errno == EINVAL) {
+ // MADV_FREE only works on Linux 4.5+ . If request failed,
+ // retry with older MADV_DONTNEED . Note that MADV_FREE
+ // being defined at compile time doesn't imply runtime support.
+ ret = madvise(address, length, MADV_DONTNEED);
+ }
+ CHECK(!ret);
+#else
+ SetSystemPagesInaccessible(address, length);
+#endif
+}
+
+void RecommitSystemPages(void* address, size_t length) {
+ DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+ (void)address;
+#else
+ CHECK(SetSystemPagesAccessible(address, length));
+#endif
+}
+
+void DiscardSystemPages(void* address, size_t length) {
+ DCHECK(!(length & kSystemPageOffsetMask));
+#if defined(OS_POSIX)
+ // On POSIX, the implementation detail is that discard and decommit are the
+ // same, and lead to pages that are returned to the system immediately and
+ // get replaced with zeroed pages when touched. So we just call
+ // DecommitSystemPages() here to avoid code duplication.
+ DecommitSystemPages(address, length);
+#else
+ // On Windows discarded pages are not returned to the system immediately and
+ // not guaranteed to be zeroed when returned to the application.
+ using DiscardVirtualMemoryFunction =
+ DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
+ static DiscardVirtualMemoryFunction discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1);
+ if (discard_virtual_memory ==
+ reinterpret_cast<DiscardVirtualMemoryFunction>(-1))
+ discard_virtual_memory =
+ reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
+ GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
+ // Use DiscardVirtualMemory when available because it releases faster than
+ // MEM_RESET.
+ DWORD ret = 1;
+ if (discard_virtual_memory)
+ ret = discard_virtual_memory(address, length);
+ // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
+ // failure.
+ if (ret) {
+ void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
+ CHECK(ret);
+ }
+#endif
+}
+
+uint32_t GetAllocPageErrorCode() {
+ return base::subtle::Acquire_Load(&s_allocPageErrorCode);
+}
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
new file mode 100644
index 00000000000..f57beb7b37f
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -0,0 +1,124 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
+
+#include <stdint.h>
+
+#include <cstddef>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+namespace base {
+
+#if defined(OS_WIN)
+static const size_t kPageAllocationGranularityShift = 16; // 64KB
+#else
+static const size_t kPageAllocationGranularityShift = 12; // 4KB
+#endif
+static const size_t kPageAllocationGranularity =
+ 1 << kPageAllocationGranularityShift;
+static const size_t kPageAllocationGranularityOffsetMask =
+ kPageAllocationGranularity - 1;
+static const size_t kPageAllocationGranularityBaseMask =
+ ~kPageAllocationGranularityOffsetMask;
+
+// All Blink-supported systems have 4096 sized system pages and can handle
+// permissions and commit / decommit at this granularity.
+static const size_t kSystemPageSize = 4096;
+static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
+static const size_t kSystemPageBaseMask = ~kSystemPageOffsetMask;
+
+enum PageAccessibilityConfiguration {
+ PageAccessible,
+ PageInaccessible,
+};
+
+// Allocate one or more pages.
+// The requested address is just a hint; the actual address returned may
+// differ. The returned address will be aligned at least to align bytes.
+// len is in bytes, and must be a multiple of kPageAllocationGranularity.
+// align is in bytes, and must be a power-of-two multiple of
+// kPageAllocationGranularity.
+// If addr is null, then a suitable and randomized address will be chosen
+// automatically.
+// PageAccessibilityConfiguration controls the permission of the
+// allocated pages.
+// This call will return null if the allocation cannot be satisfied.
+BASE_EXPORT void* AllocPages(void* address,
+ size_t len,
+ size_t align,
+ PageAccessibilityConfiguration);
+
+// Free one or more pages.
+// addr and len must match a previous call to allocPages().
+BASE_EXPORT void FreePages(void* address, size_t length);
+
+// Mark one or more system pages as being inaccessible.
+// Subsequently accessing any address in the range will fault, and the
+// addresses will not be re-used by future allocations.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void SetSystemPagesInaccessible(void* address, size_t length);
+
+// Mark one or more system pages as being accessible.
+// The pages will be readable and writeable.
+// len must be a multiple of kSystemPageSize bytes.
+// The result bool value indicates whether the permission
+// change succeeded or not. You must check the result
+// (in most cases you need to CHECK that it is true).
+BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccessible(void* address,
+ size_t length);
+
+// Decommit one or more system pages. Decommitted means that the physical memory
+// is released to the system, but the virtual address space remains reserved.
+// System pages are re-committed by calling recommitSystemPages(). Touching
+// a decommitted page _may_ fault.
+// Clients should not make any assumptions about the contents of decommitted
+// system pages, before or after they write to the page. The only guarantee
+// provided is that the contents of the system page will be deterministic again
+// after recommitting and writing to it. In particlar note that system pages are
+// not guaranteed to be zero-filled upon re-commit. len must be a multiple of
+// kSystemPageSize bytes.
+BASE_EXPORT void DecommitSystemPages(void* address, size_t length);
+
+// Recommit one or more system pages. Decommitted system pages must be
+// recommitted before they are read are written again.
+// Note that this operation may be a no-op on some platforms.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void RecommitSystemPages(void* address, size_t length);
+
+// Discard one or more system pages. Discarding is a hint to the system that
+// the page is no longer required. The hint may:
+// - Do nothing.
+// - Discard the page immediately, freeing up physical pages.
+// - Discard the page at some time in the future in response to memory pressure.
+// Only committed pages should be discarded. Discarding a page does not
+// decommit it, and it is valid to discard an already-discarded page.
+// A read or write to a discarded page will not fault.
+// Reading from a discarded page may return the original page content, or a
+// page full of zeroes.
+// Writing to a discarded page is the only guaranteed way to tell the system
+// that the page is required again. Once written to, the content of the page is
+// guaranteed stable once more. After being written to, the page content may be
+// based on the original page content, or a page of zeroes.
+// len must be a multiple of kSystemPageSize bytes.
+BASE_EXPORT void DiscardSystemPages(void* address, size_t length);
+
+ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) {
+ return (address + kSystemPageOffsetMask) & kSystemPageBaseMask;
+}
+
+ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) {
+ return address & kSystemPageBaseMask;
+}
+
+// Returns errno (or GetLastError code) when mmap (or VirtualAlloc) fails.
+BASE_EXPORT uint32_t GetAllocPageErrorCode();
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PAGE_ALLOCATOR_H
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
new file mode 100644
index 00000000000..29cde931fef
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -0,0 +1,1415 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+#include <string.h>
+
+#include "base/allocator/oom.h"
+#include "base/compiler_specific.h"
+#include "base/synchronization/spin_lock.h"
+
+// Two partition pages are used as guard / metadata page so make sure the super
+// page size is bigger.
+static_assert(base::kPartitionPageSize * 4 <= base::kSuperPageSize,
+ "ok super page size");
+static_assert(!(base::kSuperPageSize % base::kPartitionPageSize),
+ "ok super page multiple");
+// Four system pages gives us room to hack out a still-guard-paged piece
+// of metadata in the middle of a guard partition page.
+static_assert(base::kSystemPageSize * 4 <= base::kPartitionPageSize,
+ "ok partition page size");
+static_assert(!(base::kPartitionPageSize % base::kSystemPageSize),
+ "ok partition page multiple");
+static_assert(sizeof(base::PartitionPage) <= base::kPageMetadataSize,
+ "PartitionPage should not be too big");
+static_assert(sizeof(base::PartitionBucket) <= base::kPageMetadataSize,
+ "PartitionBucket should not be too big");
+static_assert(sizeof(base::PartitionSuperPageExtentEntry) <=
+ base::kPageMetadataSize,
+ "PartitionSuperPageExtentEntry should not be too big");
+static_assert(base::kPageMetadataSize * base::kNumPartitionPagesPerSuperPage <=
+ base::kSystemPageSize,
+ "page metadata fits in hole");
+// Check that some of our zanier calculations worked out as expected.
+static_assert(base::kGenericSmallestBucket == 8, "generic smallest bucket");
+static_assert(base::kGenericMaxBucketed == 983040, "generic max bucketed");
+static_assert(base::kMaxSystemPagesPerSlotSpan < (1 << 8),
+ "System pages per slot span must be less than 128.");
+
+namespace base {
+
+subtle::SpinLock PartitionRootBase::gInitializedLock;
+bool PartitionRootBase::gInitialized = false;
+PartitionPage PartitionRootBase::gSeedPage;
+PartitionBucket PartitionRootBase::gPagedBucket;
+void (*PartitionRootBase::gOomHandlingFunction)() = nullptr;
+PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
+ nullptr;
+PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
+
+static uint8_t PartitionBucketNumSystemPages(size_t size) {
+ // This works out reasonably for the current bucket sizes of the generic
+ // allocator, and the current values of partition page size and constants.
+ // Specifically, we have enough room to always pack the slots perfectly into
+ // some number of system pages. The only waste is the waste associated with
+ // unfaulted pages (i.e. wasted address space).
+ // TODO: we end up using a lot of system pages for very small sizes. For
+ // example, we'll use 12 system pages for slot size 24. The slot size is
+ // so small that the waste would be tiny with just 4, or 1, system pages.
+ // Later, we can investigate whether there are anti-fragmentation benefits
+ // to using fewer system pages.
+ double best_waste_ratio = 1.0f;
+ uint16_t best_pages = 0;
+ if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ DCHECK(!(size % kSystemPageSize));
+ best_pages = static_cast<uint16_t>(size / kSystemPageSize);
+ CHECK(best_pages < (1 << 8));
+ return static_cast<uint8_t>(best_pages);
+ }
+ DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
+ i <= kMaxSystemPagesPerSlotSpan; ++i) {
+ size_t page_size = kSystemPageSize * i;
+ size_t num_slots = page_size / size;
+ size_t waste = page_size - (num_slots * size);
+ // Leaving a page unfaulted is not free; the page will occupy an empty page
+ // table entry. Make a simple attempt to account for that.
+ size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1);
+ size_t num_unfaulted_pages =
+ num_remainder_pages
+ ? (kNumSystemPagesPerPartitionPage - num_remainder_pages)
+ : 0;
+ waste += sizeof(void*) * num_unfaulted_pages;
+ double waste_ratio = (double)waste / (double)page_size;
+ if (waste_ratio < best_waste_ratio) {
+ best_waste_ratio = waste_ratio;
+ best_pages = i;
+ }
+ }
+ DCHECK(best_pages > 0);
+ CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+ return static_cast<uint8_t>(best_pages);
+}
+
+static void PartitionAllocBaseInit(PartitionRootBase* root) {
+ DCHECK(!root->initialized);
+ {
+ subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock);
+ if (!PartitionRootBase::gInitialized) {
+ PartitionRootBase::gInitialized = true;
+ // We mark the seed page as free to make sure it is skipped by our
+ // logic to find a new active page.
+ PartitionRootBase::gPagedBucket.active_pages_head =
+ &PartitionRootGeneric::gSeedPage;
+ }
+ }
+
+ root->initialized = true;
+ root->total_size_of_committed_pages = 0;
+ root->total_size_of_super_pages = 0;
+ root->total_size_of_direct_mapped_pages = 0;
+ root->next_super_page = 0;
+ root->next_partition_page = 0;
+ root->next_partition_page_end = 0;
+ root->first_extent = 0;
+ root->current_extent = 0;
+ root->direct_map_list = 0;
+
+ memset(&root->global_empty_page_ring, '\0',
+ sizeof(root->global_empty_page_ring));
+ root->global_empty_page_ring_index = 0;
+
+ // This is a "magic" value so we can test if a root pointer is valid.
+ root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
+}
+
+static void PartitionBucketInitBase(PartitionBucket* bucket,
+ PartitionRootBase* root) {
+ bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
+ bucket->empty_pages_head = 0;
+ bucket->decommitted_pages_head = 0;
+ bucket->num_full_pages = 0;
+ bucket->num_system_pages_per_slot_span =
+ PartitionBucketNumSystemPages(bucket->slot_size);
+}
+
+void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
+ DCHECK(oom_handling_function);
+ PartitionRootBase::gOomHandlingFunction = oom_handling_function;
+}
+
+void PartitionAllocInit(PartitionRoot* root,
+ size_t num_buckets,
+ size_t max_allocation) {
+ PartitionAllocBaseInit(root);
+
+ root->num_buckets = num_buckets;
+ root->max_allocation = max_allocation;
+ size_t i;
+ for (i = 0; i < root->num_buckets; ++i) {
+ PartitionBucket* bucket = &root->buckets()[i];
+ if (!i)
+ bucket->slot_size = kAllocationGranularity;
+ else
+ bucket->slot_size = i << kBucketShift;
+ PartitionBucketInitBase(bucket, root);
+ }
+}
+
+void PartitionAllocGenericInit(PartitionRootGeneric* root) {
+ subtle::SpinLock::Guard guard(root->lock);
+
+ PartitionAllocBaseInit(root);
+
+ // Precalculate some shift and mask constants used in the hot path.
+ // Example: malloc(41) == 101001 binary.
+ // Order is 6 (1 << 6-1) == 32 is highest bit set.
+ // order_index is the next three MSB == 010 == 2.
+ // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01
+ // for
+ // the sub_order_index).
+ size_t order;
+ for (order = 0; order <= kBitsPerSizeT; ++order) {
+ size_t order_index_shift;
+ if (order < kGenericNumBucketsPerOrderBits + 1)
+ order_index_shift = 0;
+ else
+ order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1);
+ root->order_index_shifts[order] = order_index_shift;
+ size_t sub_order_index_mask;
+ if (order == kBitsPerSizeT) {
+ // This avoids invoking undefined behavior for an excessive shift.
+ sub_order_index_mask =
+ static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1);
+ } else {
+ sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >>
+ (kGenericNumBucketsPerOrderBits + 1);
+ }
+ root->order_sub_index_masks[order] = sub_order_index_mask;
+ }
+
+ // Set up the actual usable buckets first.
+ // Note that typical values (i.e. min allocation size of 8) will result in
+ // pseudo buckets (size==9 etc. or more generally, size is not a multiple
+ // of the smallest allocation granularity).
+ // We avoid them in the bucket lookup map, but we tolerate them to keep the
+ // code simpler and the structures more generic.
+ size_t i, j;
+ size_t current_size = kGenericSmallestBucket;
+ size_t currentIncrement =
+ kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits;
+ PartitionBucket* bucket = &root->buckets[0];
+ for (i = 0; i < kGenericNumBucketedOrders; ++i) {
+ for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+ bucket->slot_size = current_size;
+ PartitionBucketInitBase(bucket, root);
+ // Disable psuedo buckets so that touching them faults.
+ if (current_size % kGenericSmallestBucket)
+ bucket->active_pages_head = 0;
+ current_size += currentIncrement;
+ ++bucket;
+ }
+ currentIncrement <<= 1;
+ }
+ DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
+ DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+
+ // Then set up the fast size -> bucket lookup table.
+ bucket = &root->buckets[0];
+ PartitionBucket** bucketPtr = &root->bucket_lookups[0];
+ for (order = 0; order <= kBitsPerSizeT; ++order) {
+ for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
+ if (order < kGenericMinBucketedOrder) {
+ // Use the bucket of the finest granularity for malloc(0) etc.
+ *bucketPtr++ = &root->buckets[0];
+ } else if (order > kGenericMaxBucketedOrder) {
+ *bucketPtr++ = &PartitionRootGeneric::gPagedBucket;
+ } else {
+ PartitionBucket* validBucket = bucket;
+ // Skip over invalid buckets.
+ while (validBucket->slot_size % kGenericSmallestBucket)
+ validBucket++;
+ *bucketPtr++ = validBucket;
+ bucket++;
+ }
+ }
+ }
+ DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets);
+ DCHECK(bucketPtr ==
+ &root->bucket_lookups[0] +
+ ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+ // And there's one last bucket lookup that will be hit for e.g. malloc(-1),
+ // which tries to overflow to a non-existant order.
+ *bucketPtr = &PartitionRootGeneric::gPagedBucket;
+}
+
+#if !defined(ARCH_CPU_64_BITS)
+static NOINLINE void partitionOutOfMemoryWithLotsOfUncommitedPages() {
+ OOM_CRASH();
+}
+#endif
+
+static NOINLINE void partitionOutOfMemory(const PartitionRootBase* root) {
+#if !defined(ARCH_CPU_64_BITS)
+ // Check whether this OOM is due to a lot of super pages that are allocated
+ // but not committed, probably due to http://crbug.com/421387.
+ if (root->total_size_of_super_pages +
+ root->total_size_of_direct_mapped_pages -
+ root->total_size_of_committed_pages >
+ kReasonableSizeOfUnusedPages) {
+ partitionOutOfMemoryWithLotsOfUncommitedPages();
+ }
+#endif
+ if (PartitionRootBase::gOomHandlingFunction)
+ (*PartitionRootBase::gOomHandlingFunction)();
+ OOM_CRASH();
+}
+
+static NOINLINE void partitionExcessiveAllocationSize() {
+ OOM_CRASH();
+}
+
+static NOINLINE void partitionBucketFull() {
+ OOM_CRASH();
+}
+
+// partitionPageStateIs*
+// Note that it's only valid to call these functions on pages found on one of
+// the page lists. Specifically, you can't call these functions on full pages
+// that were detached from the active list.
+static bool ALWAYS_INLINE
+PartitionPageStateIsActive(const PartitionPage* page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ DCHECK(!page->page_offset);
+ return (page->num_allocated_slots > 0 &&
+ (page->freelist_head || page->num_unprovisioned_slots));
+}
+
+static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ DCHECK(!page->page_offset);
+ bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket));
+ if (ret) {
+ DCHECK(!page->freelist_head);
+ DCHECK(!page->num_unprovisioned_slots);
+ }
+ return ret;
+}
+
+static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ DCHECK(!page->page_offset);
+ return (!page->num_allocated_slots && page->freelist_head);
+}
+
+static bool ALWAYS_INLINE
+PartitionPageStateIsDecommitted(const PartitionPage* page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ DCHECK(!page->page_offset);
+ bool ret = (!page->num_allocated_slots && !page->freelist_head);
+ if (ret) {
+ DCHECK(!page->num_unprovisioned_slots);
+ DCHECK(page->empty_cache_index == -1);
+ }
+ return ret;
+}
+
+static void partitionIncreaseCommittedPages(PartitionRootBase* root,
+ size_t len) {
+ root->total_size_of_committed_pages += len;
+ DCHECK(root->total_size_of_committed_pages <=
+ root->total_size_of_super_pages +
+ root->total_size_of_direct_mapped_pages);
+}
+
+static void partitionDecreaseCommittedPages(PartitionRootBase* root,
+ size_t len) {
+ root->total_size_of_committed_pages -= len;
+ DCHECK(root->total_size_of_committed_pages <=
+ root->total_size_of_super_pages +
+ root->total_size_of_direct_mapped_pages);
+}
+
+static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root,
+ void* address,
+ size_t length) {
+ DecommitSystemPages(address, length);
+ partitionDecreaseCommittedPages(root, length);
+}
+
+static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root,
+ void* address,
+ size_t length) {
+ RecommitSystemPages(address, length);
+ partitionIncreaseCommittedPages(root, length);
+}
+
+static ALWAYS_INLINE void* PartitionAllocPartitionPages(
+ PartitionRootBase* root,
+ int flags,
+ uint16_t num_partition_pages) {
+ DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+ kPartitionPageSize));
+ DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+ kPartitionPageSize));
+ DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+ size_t total_size = kPartitionPageSize * num_partition_pages;
+ size_t num_partition_pages_left =
+ (root->next_partition_page_end - root->next_partition_page) >>
+ kPartitionPageShift;
+ if (LIKELY(num_partition_pages_left >= num_partition_pages)) {
+ // In this case, we can still hand out pages from the current super page
+ // allocation.
+ char* ret = root->next_partition_page;
+ root->next_partition_page += total_size;
+ partitionIncreaseCommittedPages(root, total_size);
+ return ret;
+ }
+
+ // Need a new super page. We want to allocate super pages in a continguous
+ // address region as much as possible. This is important for not causing
+ // page table bloat and not fragmenting address spaces in 32 bit
+ // architectures.
+ char* requestedAddress = root->next_super_page;
+ char* super_page = reinterpret_cast<char*>(AllocPages(
+ requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible));
+ if (UNLIKELY(!super_page))
+ return 0;
+
+ root->total_size_of_super_pages += kSuperPageSize;
+ partitionIncreaseCommittedPages(root, total_size);
+
+ root->next_super_page = super_page + kSuperPageSize;
+ char* ret = super_page + kPartitionPageSize;
+ root->next_partition_page = ret + total_size;
+ root->next_partition_page_end = root->next_super_page - kPartitionPageSize;
+ // Make the first partition page in the super page a guard page, but leave a
+ // hole in the middle.
+ // This is where we put page metadata and also a tiny amount of extent
+ // metadata.
+ SetSystemPagesInaccessible(super_page, kSystemPageSize);
+ SetSystemPagesInaccessible(super_page + (kSystemPageSize * 2),
+ kPartitionPageSize - (kSystemPageSize * 2));
+ // Also make the last partition page a guard page.
+ SetSystemPagesInaccessible(super_page + (kSuperPageSize - kPartitionPageSize),
+ kPartitionPageSize);
+
+ // If we were after a specific address, but didn't get it, assume that
+ // the system chose a lousy address. Here most OS'es have a default
+ // algorithm that isn't randomized. For example, most Linux
+ // distributions will allocate the mapping directly before the last
+ // successful mapping, which is far from random. So we just get fresh
+ // randomness for the next mapping attempt.
+ if (requestedAddress && requestedAddress != super_page)
+ root->next_super_page = 0;
+
+ // We allocated a new super page so update super page metadata.
+ // First check if this is a new extent or not.
+ PartitionSuperPageExtentEntry* latest_extent =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ PartitionSuperPageToMetadataArea(super_page));
+ // By storing the root in every extent metadata object, we have a fast way
+ // to go from a pointer within the partition to the root object.
+ latest_extent->root = root;
+ // Most new extents will be part of a larger extent, and these three fields
+ // are unused, but we initialize them to 0 so that we get a clear signal
+ // in case they are accidentally used.
+ latest_extent->super_page_base = 0;
+ latest_extent->super_pages_end = 0;
+ latest_extent->next = 0;
+
+ PartitionSuperPageExtentEntry* current_extent = root->current_extent;
+ bool isNewExtent = (super_page != requestedAddress);
+ if (UNLIKELY(isNewExtent)) {
+ if (UNLIKELY(!current_extent)) {
+ DCHECK(!root->first_extent);
+ root->first_extent = latest_extent;
+ } else {
+ DCHECK(current_extent->super_page_base);
+ current_extent->next = latest_extent;
+ }
+ root->current_extent = latest_extent;
+ latest_extent->super_page_base = super_page;
+ latest_extent->super_pages_end = super_page + kSuperPageSize;
+ } else {
+ // We allocated next to an existing extent so just nudge the size up a
+ // little.
+ DCHECK(current_extent->super_pages_end);
+ current_extent->super_pages_end += kSuperPageSize;
+ DCHECK(ret >= current_extent->super_page_base &&
+ ret < current_extent->super_pages_end);
+ }
+ return ret;
+}
+
+static ALWAYS_INLINE uint16_t
+partitionBucketPartitionPages(const PartitionBucket* bucket) {
+ return (bucket->num_system_pages_per_slot_span +
+ (kNumSystemPagesPerPartitionPage - 1)) /
+ kNumSystemPagesPerPartitionPage;
+}
+
+static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) {
+ DCHECK(PartitionPageStateIsDecommitted(page));
+
+ page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket);
+ DCHECK(page->num_unprovisioned_slots);
+
+ page->next_page = nullptr;
+}
+
+static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page,
+ PartitionBucket* bucket) {
+ // The bucket never changes. We set it up once.
+ page->bucket = bucket;
+ page->empty_cache_index = -1;
+
+ partitionPageReset(page);
+
+ // If this page has just a single slot, do not set up page offsets for any
+ // page metadata other than the first one. This ensures that attempts to
+ // touch invalid page metadata fail.
+ if (page->num_unprovisioned_slots == 1)
+ return;
+
+ uint16_t num_partition_pages = partitionBucketPartitionPages(bucket);
+ char* pageCharPtr = reinterpret_cast<char*>(page);
+ for (uint16_t i = 1; i < num_partition_pages; ++i) {
+ pageCharPtr += kPageMetadataSize;
+ PartitionPage* secondaryPage =
+ reinterpret_cast<PartitionPage*>(pageCharPtr);
+ secondaryPage->page_offset = i;
+ }
+}
+
+static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist(
+ PartitionPage* page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ uint16_t num_slots = page->num_unprovisioned_slots;
+ DCHECK(num_slots);
+ PartitionBucket* bucket = page->bucket;
+ // We should only get here when _every_ slot is either used or unprovisioned.
+ // (The third state is "on the freelist". If we have a non-empty freelist, we
+ // should not get here.)
+ DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket));
+ // Similarly, make explicitly sure that the freelist is empty.
+ DCHECK(!page->freelist_head);
+ DCHECK(page->num_allocated_slots >= 0);
+
+ size_t size = bucket->slot_size;
+ char* base = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ char* return_object = base + (size * page->num_allocated_slots);
+ char* firstFreelistPointer = return_object + size;
+ char* firstFreelistPointerExtent =
+ firstFreelistPointer + sizeof(PartitionFreelistEntry*);
+ // Our goal is to fault as few system pages as possible. We calculate the
+ // page containing the "end" of the returned slot, and then allow freelist
+ // pointers to be written up to the end of that page.
+ char* sub_page_limit = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer)));
+ char* slots_limit = return_object + (size * num_slots);
+ char* freelist_limit = sub_page_limit;
+ if (UNLIKELY(slots_limit < freelist_limit))
+ freelist_limit = slots_limit;
+
+ uint16_t num_new_freelist_entries = 0;
+ if (LIKELY(firstFreelistPointerExtent <= freelist_limit)) {
+ // Only consider used space in the slot span. If we consider wasted
+ // space, we may get an off-by-one when a freelist pointer fits in the
+ // wasted space, but a slot does not.
+ // We know we can fit at least one freelist pointer.
+ num_new_freelist_entries = 1;
+ // Any further entries require space for the whole slot span.
+ num_new_freelist_entries += static_cast<uint16_t>(
+ (freelist_limit - firstFreelistPointerExtent) / size);
+ }
+
+ // We always return an object slot -- that's the +1 below.
+ // We do not neccessarily create any new freelist entries, because we cross
+ // sub page boundaries frequently for large bucket sizes.
+ DCHECK(num_new_freelist_entries + 1 <= num_slots);
+ num_slots -= (num_new_freelist_entries + 1);
+ page->num_unprovisioned_slots = num_slots;
+ page->num_allocated_slots++;
+
+ if (LIKELY(num_new_freelist_entries)) {
+ char* freelist_pointer = firstFreelistPointer;
+ PartitionFreelistEntry* entry =
+ reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+ page->freelist_head = entry;
+ while (--num_new_freelist_entries) {
+ freelist_pointer += size;
+ PartitionFreelistEntry* nextEntry =
+ reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
+ entry->next = PartitionFreelistMask(nextEntry);
+ entry = nextEntry;
+ }
+ entry->next = PartitionFreelistMask(0);
+ } else {
+ page->freelist_head = 0;
+ }
+ return return_object;
+}
+
+// This helper function scans a bucket's active page list for a suitable new
+// active page.
+// When it finds a suitable new active page (one that has free slots and is not
+// empty), it is set as the new active page. If there is no suitable new
+// active page, the current active page is set to the seed page.
+// As potential pages are scanned, they are tidied up according to their state.
+// Empty pages are swept on to the empty page list, decommitted pages on to the
+// decommitted page list and full pages are unlinked from any list.
+static bool partitionSetNewActivePage(PartitionBucket* bucket) {
+ PartitionPage* page = bucket->active_pages_head;
+ if (page == &PartitionRootBase::gSeedPage)
+ return false;
+
+ PartitionPage* next_page;
+
+ for (; page; page = next_page) {
+ next_page = page->next_page;
+ DCHECK(page->bucket == bucket);
+ DCHECK(page != bucket->empty_pages_head);
+ DCHECK(page != bucket->decommitted_pages_head);
+
+ // Deal with empty and decommitted pages.
+ if (LIKELY(PartitionPageStateIsActive(page))) {
+ // This page is usable because it has freelist entries, or has
+ // unprovisioned slots we can create freelist entries from.
+ bucket->active_pages_head = page;
+ return true;
+ }
+ if (LIKELY(PartitionPageStateIsEmpty(page))) {
+ page->next_page = bucket->empty_pages_head;
+ bucket->empty_pages_head = page;
+ } else if (LIKELY(PartitionPageStateIsDecommitted(page))) {
+ page->next_page = bucket->decommitted_pages_head;
+ bucket->decommitted_pages_head = page;
+ } else {
+ DCHECK(PartitionPageStateIsFull(page));
+ // If we get here, we found a full page. Skip over it too, and also
+ // tag it as full (via a negative value). We need it tagged so that
+ // free'ing can tell, and move it back into the active page list.
+ page->num_allocated_slots = -page->num_allocated_slots;
+ ++bucket->num_full_pages;
+ // num_full_pages is a uint16_t for efficient packing so guard against
+ // overflow to be safe.
+ if (UNLIKELY(!bucket->num_full_pages))
+ partitionBucketFull();
+ // Not necessary but might help stop accidents.
+ page->next_page = 0;
+ }
+ }
+
+ bucket->active_pages_head = &PartitionRootGeneric::gSeedPage;
+ return false;
+}
+
+static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
+ PartitionPage* page) {
+ DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+ return reinterpret_cast<PartitionDirectMapExtent*>(
+ reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
+}
+
+static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page,
+ size_t size) {
+ size_t* raw_sizePtr = PartitionPageGetRawSizePtr(page);
+ if (UNLIKELY(raw_sizePtr != nullptr))
+ *raw_sizePtr = size;
+}
+
+static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root,
+ int flags,
+ size_t raw_size) {
+ size_t size = PartitionDirectMapSize(raw_size);
+
+ // Because we need to fake looking like a super page, we need to allocate
+ // a bunch of system pages more than "size":
+ // - The first few system pages are the partition page in which the super
+ // page metadata is stored. We fault just one system page out of a partition
+ // page sized clump.
+ // - We add a trailing guard page on 32-bit (on 64-bit we rely on the
+ // massive address space plus randomization instead).
+ size_t map_size = size + kPartitionPageSize;
+#if !defined(ARCH_CPU_64_BITS)
+ map_size += kSystemPageSize;
+#endif
+ // Round up to the allocation granularity.
+ map_size += kPageAllocationGranularityOffsetMask;
+ map_size &= kPageAllocationGranularityBaseMask;
+
+ // TODO: these pages will be zero-filled. Consider internalizing an
+ // allocZeroed() API so we can avoid a memset() entirely in this case.
+ char* ptr = reinterpret_cast<char*>(
+ AllocPages(0, map_size, kSuperPageSize, PageAccessible));
+ if (UNLIKELY(!ptr))
+ return nullptr;
+
+ size_t committedPageSize = size + kSystemPageSize;
+ root->total_size_of_direct_mapped_pages += committedPageSize;
+ partitionIncreaseCommittedPages(root, committedPageSize);
+
+ char* slot = ptr + kPartitionPageSize;
+ SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2),
+ kPartitionPageSize - (kSystemPageSize * 2));
+#if !defined(ARCH_CPU_64_BITS)
+ SetSystemPagesInaccessible(ptr, kSystemPageSize);
+ SetSystemPagesInaccessible(slot + size, kSystemPageSize);
+#endif
+
+ PartitionSuperPageExtentEntry* extent =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ PartitionSuperPageToMetadataArea(ptr));
+ extent->root = root;
+ // The new structures are all located inside a fresh system page so they
+ // will all be zeroed out. These DCHECKs are for documentation.
+ DCHECK(!extent->super_page_base);
+ DCHECK(!extent->super_pages_end);
+ DCHECK(!extent->next);
+ PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot);
+ PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>(
+ reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
+ DCHECK(!page->next_page);
+ DCHECK(!page->num_allocated_slots);
+ DCHECK(!page->num_unprovisioned_slots);
+ DCHECK(!page->page_offset);
+ DCHECK(!page->empty_cache_index);
+ page->bucket = bucket;
+ page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
+ PartitionFreelistEntry* nextEntry =
+ reinterpret_cast<PartitionFreelistEntry*>(slot);
+ nextEntry->next = PartitionFreelistMask(0);
+
+ DCHECK(!bucket->active_pages_head);
+ DCHECK(!bucket->empty_pages_head);
+ DCHECK(!bucket->decommitted_pages_head);
+ DCHECK(!bucket->num_system_pages_per_slot_span);
+ DCHECK(!bucket->num_full_pages);
+ bucket->slot_size = size;
+
+ PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page);
+ mapExtent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
+ mapExtent->bucket = bucket;
+
+ // Maintain the doubly-linked list of all direct mappings.
+ mapExtent->next_extent = root->direct_map_list;
+ if (mapExtent->next_extent)
+ mapExtent->next_extent->prev_extent = mapExtent;
+ mapExtent->prev_extent = nullptr;
+ root->direct_map_list = mapExtent;
+
+ return page;
+}
+
+static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) {
+ PartitionRootBase* root = PartitionPageToRoot(page);
+ const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
+ size_t unmap_size = extent->map_size;
+
+ // Maintain the doubly-linked list of all direct mappings.
+ if (extent->prev_extent) {
+ DCHECK(extent->prev_extent->next_extent == extent);
+ extent->prev_extent->next_extent = extent->next_extent;
+ } else {
+ root->direct_map_list = extent->next_extent;
+ }
+ if (extent->next_extent) {
+ DCHECK(extent->next_extent->prev_extent == extent);
+ extent->next_extent->prev_extent = extent->prev_extent;
+ }
+
+ // Add on the size of the trailing guard page and preceeding partition
+ // page.
+ unmap_size += kPartitionPageSize + kSystemPageSize;
+
+ size_t uncommittedPageSize = page->bucket->slot_size + kSystemPageSize;
+ partitionDecreaseCommittedPages(root, uncommittedPageSize);
+ DCHECK(root->total_size_of_direct_mapped_pages >= uncommittedPageSize);
+ root->total_size_of_direct_mapped_pages -= uncommittedPageSize;
+
+ DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+
+ char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ // Account for the mapping starting a partition page before the actual
+ // allocation address.
+ ptr -= kPartitionPageSize;
+
+ FreePages(ptr, unmap_size);
+}
+
+void* PartitionAllocSlowPath(PartitionRootBase* root,
+ int flags,
+ size_t size,
+ PartitionBucket* bucket) {
+ // The slow path is called when the freelist is empty.
+ DCHECK(!bucket->active_pages_head->freelist_head);
+
+ PartitionPage* newPage = nullptr;
+
+ // For the PartitionAllocGeneric API, we have a bunch of buckets marked
+ // as special cases. We bounce them through to the slow path so that we
+ // can still have a blazing fast hot path due to lack of corner-case
+ // branches.
+ bool returnNull = flags & PartitionAllocReturnNull;
+ if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
+ DCHECK(size > kGenericMaxBucketed);
+ DCHECK(bucket == &PartitionRootBase::gPagedBucket);
+ DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
+ if (size > kGenericMaxDirectMapped) {
+ if (returnNull)
+ return nullptr;
+ partitionExcessiveAllocationSize();
+ }
+ newPage = partitionDirectMap(root, flags, size);
+ } else if (LIKELY(partitionSetNewActivePage(bucket))) {
+ // First, did we find an active page in the active pages list?
+ newPage = bucket->active_pages_head;
+ DCHECK(PartitionPageStateIsActive(newPage));
+ } else if (LIKELY(bucket->empty_pages_head != nullptr) ||
+ LIKELY(bucket->decommitted_pages_head != nullptr)) {
+ // Second, look in our lists of empty and decommitted pages.
+ // Check empty pages first, which are preferred, but beware that an
+ // empty page might have been decommitted.
+ while (LIKELY((newPage = bucket->empty_pages_head) != nullptr)) {
+ DCHECK(newPage->bucket == bucket);
+ DCHECK(PartitionPageStateIsEmpty(newPage) ||
+ PartitionPageStateIsDecommitted(newPage));
+ bucket->empty_pages_head = newPage->next_page;
+ // Accept the empty page unless it got decommitted.
+ if (newPage->freelist_head) {
+ newPage->next_page = nullptr;
+ break;
+ }
+ DCHECK(PartitionPageStateIsDecommitted(newPage));
+ newPage->next_page = bucket->decommitted_pages_head;
+ bucket->decommitted_pages_head = newPage;
+ }
+ if (UNLIKELY(!newPage) &&
+ LIKELY(bucket->decommitted_pages_head != nullptr)) {
+ newPage = bucket->decommitted_pages_head;
+ DCHECK(newPage->bucket == bucket);
+ DCHECK(PartitionPageStateIsDecommitted(newPage));
+ bucket->decommitted_pages_head = newPage->next_page;
+ void* addr = PartitionPageToPointer(newPage);
+ partitionRecommitSystemPages(root, addr,
+ PartitionBucketBytes(newPage->bucket));
+ partitionPageReset(newPage);
+ }
+ DCHECK(newPage);
+ } else {
+ // Third. If we get here, we need a brand new page.
+ uint16_t num_partition_pages = partitionBucketPartitionPages(bucket);
+ void* rawPages =
+ PartitionAllocPartitionPages(root, flags, num_partition_pages);
+ if (LIKELY(rawPages != nullptr)) {
+ newPage = PartitionPointerToPageNoAlignmentCheck(rawPages);
+ partitionPageSetup(newPage, bucket);
+ }
+ }
+
+ // Bail if we had a memory allocation failure.
+ if (UNLIKELY(!newPage)) {
+ DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage);
+ if (returnNull)
+ return nullptr;
+ partitionOutOfMemory(root);
+ }
+
+ bucket = newPage->bucket;
+ DCHECK(bucket != &PartitionRootBase::gPagedBucket);
+ bucket->active_pages_head = newPage;
+ partitionPageSetRawSize(newPage, size);
+
+ // If we found an active page with free slots, or an empty page, we have a
+ // usable freelist head.
+ if (LIKELY(newPage->freelist_head != nullptr)) {
+ PartitionFreelistEntry* entry = newPage->freelist_head;
+ PartitionFreelistEntry* newHead = PartitionFreelistMask(entry->next);
+ newPage->freelist_head = newHead;
+ newPage->num_allocated_slots++;
+ return entry;
+ }
+ // Otherwise, we need to build the freelist.
+ DCHECK(newPage->num_unprovisioned_slots);
+ return partitionPageAllocAndFillFreelist(newPage);
+}
+
+static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root,
+ PartitionPage* page) {
+ DCHECK(PartitionPageStateIsEmpty(page));
+ DCHECK(!PartitionBucketIsDirectMapped(page->bucket));
+ void* addr = PartitionPageToPointer(page);
+ partitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket));
+
+ // We actually leave the decommitted page in the active list. We'll sweep
+ // it on to the decommitted page list when we next walk the active page
+ // list.
+ // Pulling this trick enables us to use a singly-linked page list for all
+ // cases, which is critical in keeping the page metadata structure down to
+ // 32 bytes in size.
+ page->freelist_head = 0;
+ page->num_unprovisioned_slots = 0;
+ DCHECK(PartitionPageStateIsDecommitted(page));
+}
+
+static void partitionDecommitPageIfPossible(PartitionRootBase* root,
+ PartitionPage* page) {
+ DCHECK(page->empty_cache_index >= 0);
+ DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+ DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
+ page->empty_cache_index = -1;
+ if (PartitionPageStateIsEmpty(page))
+ partitionDecommitPage(root, page);
+}
+
+static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) {
+ DCHECK(PartitionPageStateIsEmpty(page));
+ PartitionRootBase* root = PartitionPageToRoot(page);
+
+ // If the page is already registered as empty, give it another life.
+ if (page->empty_cache_index != -1) {
+ DCHECK(page->empty_cache_index >= 0);
+ DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
+ DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+ root->global_empty_page_ring[page->empty_cache_index] = 0;
+ }
+
+ int16_t currentIndex = root->global_empty_page_ring_index;
+ PartitionPage* pageToDecommit = root->global_empty_page_ring[currentIndex];
+ // The page might well have been re-activated, filled up, etc. before we get
+ // around to looking at it here.
+ if (pageToDecommit)
+ partitionDecommitPageIfPossible(root, pageToDecommit);
+
+ // We put the empty slot span on our global list of "pages that were once
+ // empty". thus providing it a bit of breathing room to get re-used before
+ // we really free it. This improves performance, particularly on Mac OS X
+ // which has subpar memory management performance.
+ root->global_empty_page_ring[currentIndex] = page;
+ page->empty_cache_index = currentIndex;
+ ++currentIndex;
+ if (currentIndex == kMaxFreeableSpans)
+ currentIndex = 0;
+ root->global_empty_page_ring_index = currentIndex;
+}
+
+static void partitionDecommitEmptyPages(PartitionRootBase* root) {
+ for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+ PartitionPage* page = root->global_empty_page_ring[i];
+ if (page)
+ partitionDecommitPageIfPossible(root, page);
+ root->global_empty_page_ring[i] = nullptr;
+ }
+}
+
+void PartitionFreeSlowPath(PartitionPage* page) {
+ PartitionBucket* bucket = page->bucket;
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ if (LIKELY(page->num_allocated_slots == 0)) {
+ // Page became fully unused.
+ if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) {
+ partitionDirectUnmap(page);
+ return;
+ }
+ // If it's the current active page, change it. We bounce the page to
+ // the empty list as a force towards defragmentation.
+ if (LIKELY(page == bucket->active_pages_head))
+ (void)partitionSetNewActivePage(bucket);
+ DCHECK(bucket->active_pages_head != page);
+
+ partitionPageSetRawSize(page, 0);
+ DCHECK(!PartitionPageGetRawSize(page));
+
+ partitionRegisterEmptyPage(page);
+ } else {
+ DCHECK(!PartitionBucketIsDirectMapped(bucket));
+ // Ensure that the page is full. That's the only valid case if we
+ // arrive here.
+ DCHECK(page->num_allocated_slots < 0);
+ // A transition of num_allocated_slots from 0 to -1 is not legal, and
+ // likely indicates a double-free.
+ CHECK(page->num_allocated_slots != -1);
+ page->num_allocated_slots = -page->num_allocated_slots - 2;
+ DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1);
+ // Fully used page became partially used. It must be put back on the
+ // non-full page list. Also make it the current page to increase the
+ // chances of it being filled up again. The old current page will be
+ // the next page.
+ DCHECK(!page->next_page);
+ if (LIKELY(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage))
+ page->next_page = bucket->active_pages_head;
+ bucket->active_pages_head = page;
+ --bucket->num_full_pages;
+ // Special case: for a partition page with just a single slot, it may
+ // now be empty and we want to run it through the empty logic.
+ if (UNLIKELY(page->num_allocated_slots == 0))
+ PartitionFreeSlowPath(page);
+ }
+}
+
+bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
+ PartitionPage* page,
+ size_t raw_size) {
+ DCHECK(PartitionBucketIsDirectMapped(page->bucket));
+
+ raw_size = PartitionCookieSizeAdjustAdd(raw_size);
+
+ // Note that the new size might be a bucketed size; this function is called
+ // whenever we're reallocating a direct mapped allocation.
+ size_t new_size = PartitionDirectMapSize(raw_size);
+ if (new_size < kGenericMinDirectMappedDownsize)
+ return false;
+
+ // bucket->slot_size is the current size of the allocation.
+ size_t current_size = page->bucket->slot_size;
+ if (new_size == current_size)
+ return true;
+
+ char* char_ptr = static_cast<char*>(PartitionPageToPointer(page));
+
+ if (new_size < current_size) {
+ size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
+
+ // Don't reallocate in-place if new size is less than 80 % of the full
+ // map size, to avoid holding on to too much unused address space.
+ if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4)
+ return false;
+
+ // Shrink by decommitting unneeded pages and making them inaccessible.
+ size_t decommitSize = current_size - new_size;
+ partitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
+ SetSystemPagesInaccessible(char_ptr + new_size, decommitSize);
+ } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
+ // Grow within the actually allocated memory. Just need to make the
+ // pages accessible again.
+ size_t recommit_size = new_size - current_size;
+ bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size);
+ CHECK(ret);
+ partitionRecommitSystemPages(root, char_ptr + current_size, recommit_size);
+
+#if DCHECK_IS_ON()
+ memset(char_ptr + current_size, kUninitializedByte, recommit_size);
+#endif
+ } else {
+ // We can't perform the realloc in-place.
+ // TODO: support this too when possible.
+ return false;
+ }
+
+#if DCHECK_IS_ON()
+ // Write a new trailing cookie.
+ PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
+#endif
+
+ partitionPageSetRawSize(page, raw_size);
+ DCHECK(PartitionPageGetRawSize(page) == raw_size);
+
+ page->bucket->slot_size = new_size;
+ return true;
+}
+
+void* PartitionReallocGeneric(PartitionRootGeneric* root,
+ void* ptr,
+ size_t new_size,
+ const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ return realloc(ptr, new_size);
+#else
+ if (UNLIKELY(!ptr))
+ return PartitionAllocGeneric(root, new_size, type_name);
+ if (UNLIKELY(!new_size)) {
+ PartitionFreeGeneric(root, ptr);
+ return 0;
+ }
+
+ if (new_size > kGenericMaxDirectMapped)
+ partitionExcessiveAllocationSize();
+
+ DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr)));
+
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+
+ if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) {
+ // We may be able to perform the realloc in place by changing the
+ // accessibility of memory pages and, if reducing the size, decommitting
+ // them.
+ if (partitionReallocDirectMappedInPlace(root, page, new_size)) {
+ PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name);
+ return ptr;
+ }
+ }
+
+ size_t actualNewSize = PartitionAllocActualSize(root, new_size);
+ size_t actualOldSize = PartitionAllocGetSize(ptr);
+
+ // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
+ // new size is a significant percentage smaller. We could do the same if we
+ // determine it is a win.
+ if (actualNewSize == actualOldSize) {
+ // Trying to allocate a block of size new_size would give us a block of
+ // the same size as the one we've already got, so no point in doing
+ // anything here.
+ return ptr;
+ }
+
+ // This realloc cannot be resized in-place. Sadness.
+ void* ret = PartitionAllocGeneric(root, new_size, type_name);
+ size_t copy_size = actualOldSize;
+ if (new_size < copy_size)
+ copy_size = new_size;
+
+ memcpy(ret, ptr, copy_size);
+ PartitionFreeGeneric(root, ptr);
+ return ret;
+#endif
+}
+
+static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
+ const PartitionBucket* bucket = page->bucket;
+ size_t slot_size = bucket->slot_size;
+ if (slot_size < kSystemPageSize || !page->num_allocated_slots)
+ return 0;
+
+ size_t bucket_num_slots = PartitionBucketSlots(bucket);
+ size_t discardable_bytes = 0;
+
+ size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+ if (raw_size) {
+ uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size));
+ discardable_bytes = bucket->slot_size - usedBytes;
+ if (discardable_bytes && discard) {
+ char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ ptr += usedBytes;
+ DiscardSystemPages(ptr, discardable_bytes);
+ }
+ return discardable_bytes;
+ }
+
+ const size_t maxSlotCount =
+ (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
+ DCHECK(bucket_num_slots <= maxSlotCount);
+ DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
+ size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
+ char slotUsage[maxSlotCount];
+ size_t lastSlot = static_cast<size_t>(-1);
+ memset(slotUsage, 1, num_slots);
+ char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ PartitionFreelistEntry* entry = page->freelist_head;
+ // First, walk the freelist for this page and make a bitmap of which slots
+ // are not in use.
+ while (entry) {
+ size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
+ DCHECK(slotIndex < num_slots);
+ slotUsage[slotIndex] = 0;
+ entry = PartitionFreelistMask(entry->next);
+ // If we have a slot where the masked freelist entry is 0, we can
+ // actually discard that freelist entry because touching a discarded
+ // page is guaranteed to return original content or 0.
+ // (Note that this optimization won't fire on big endian machines
+ // because the masking function is negation.)
+ if (!PartitionFreelistMask(entry))
+ lastSlot = slotIndex;
+ }
+
+ // If the slot(s) at the end of the slot span are not in used, we can
+ // truncate them entirely and rewrite the freelist.
+ size_t truncatedSlots = 0;
+ while (!slotUsage[num_slots - 1]) {
+ truncatedSlots++;
+ num_slots--;
+ DCHECK(num_slots);
+ }
+ // First, do the work of calculating the discardable bytes. Don't actually
+ // discard anything unless the discard flag was passed in.
+ char* beginPtr = nullptr;
+ char* endPtr = nullptr;
+ size_t unprovisionedBytes = 0;
+ if (truncatedSlots) {
+ beginPtr = ptr + (num_slots * slot_size);
+ endPtr = beginPtr + (slot_size * truncatedSlots);
+ beginPtr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr)));
+ // We round the end pointer here up and not down because we're at the
+ // end of a slot span, so we "own" all the way up the page boundary.
+ endPtr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(endPtr)));
+ DCHECK(endPtr <= ptr + PartitionBucketBytes(bucket));
+ if (beginPtr < endPtr) {
+ unprovisionedBytes = endPtr - beginPtr;
+ discardable_bytes += unprovisionedBytes;
+ }
+ }
+ if (unprovisionedBytes && discard) {
+ DCHECK(truncatedSlots > 0);
+ size_t numNewEntries = 0;
+ page->num_unprovisioned_slots += static_cast<uint16_t>(truncatedSlots);
+ // Rewrite the freelist.
+ PartitionFreelistEntry** entryPtr = &page->freelist_head;
+ for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) {
+ if (slotUsage[slotIndex])
+ continue;
+ PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>(
+ ptr + (slot_size * slotIndex));
+ *entryPtr = PartitionFreelistMask(entry);
+ entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry);
+ numNewEntries++;
+ }
+ // Terminate the freelist chain.
+ *entryPtr = nullptr;
+ // The freelist head is stored unmasked.
+ page->freelist_head = PartitionFreelistMask(page->freelist_head);
+ DCHECK(numNewEntries == num_slots - page->num_allocated_slots);
+ // Discard the memory.
+ DiscardSystemPages(beginPtr, unprovisionedBytes);
+ }
+
+ // Next, walk the slots and for any not in use, consider where the system
+ // page boundaries occur. We can release any system pages back to the
+ // system as long as we don't interfere with a freelist pointer or an
+ // adjacent slot.
+ for (size_t i = 0; i < num_slots; ++i) {
+ if (slotUsage[i])
+ continue;
+ // The first address we can safely discard is just after the freelist
+ // pointer. There's one quirk: if the freelist pointer is actually a
+ // null, we can discard that pointer value too.
+ char* beginPtr = ptr + (i * slot_size);
+ char* endPtr = beginPtr + slot_size;
+ if (i != lastSlot)
+ beginPtr += sizeof(PartitionFreelistEntry);
+ beginPtr = reinterpret_cast<char*>(
+ RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr)));
+ endPtr = reinterpret_cast<char*>(
+ RoundDownToSystemPage(reinterpret_cast<size_t>(endPtr)));
+ if (beginPtr < endPtr) {
+ size_t partialSlotBytes = endPtr - beginPtr;
+ discardable_bytes += partialSlotBytes;
+ if (discard)
+ DiscardSystemPages(beginPtr, partialSlotBytes);
+ }
+ }
+ return discardable_bytes;
+}
+
+static void partitionPurgeBucket(PartitionBucket* bucket) {
+ if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
+ for (PartitionPage* page = bucket->active_pages_head; page;
+ page = page->next_page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ (void)PartitionPurgePage(page, true);
+ }
+ }
+}
+
+void PartitionPurgeMemory(PartitionRoot* root, int flags) {
+ if (flags & PartitionPurgeDecommitEmptyPages)
+ partitionDecommitEmptyPages(root);
+ // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
+ // here because that flag is only useful for allocations >= system page
+ // size. We only have allocations that large inside generic partitions
+ // at the moment.
+}
+
+void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) {
+ subtle::SpinLock::Guard guard(root->lock);
+ if (flags & PartitionPurgeDecommitEmptyPages)
+ partitionDecommitEmptyPages(root);
+ if (flags & PartitionPurgeDiscardUnusedSystemPages) {
+ for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+ PartitionBucket* bucket = &root->buckets[i];
+ if (bucket->slot_size >= kSystemPageSize)
+ partitionPurgeBucket(bucket);
+ }
+ }
+}
+
+static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
+ const PartitionPage* page) {
+ uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket);
+
+ if (PartitionPageStateIsDecommitted(page)) {
+ ++stats_out->num_decommitted_pages;
+ return;
+ }
+
+ stats_out->discardable_bytes +=
+ PartitionPurgePage(const_cast<PartitionPage*>(page), false);
+
+ size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page));
+ if (raw_size)
+ stats_out->active_bytes += static_cast<uint32_t>(raw_size);
+ else
+ stats_out->active_bytes +=
+ (page->num_allocated_slots * stats_out->bucket_slot_size);
+
+ size_t page_bytes_resident =
+ RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
+ stats_out->bucket_slot_size);
+ stats_out->resident_bytes += page_bytes_resident;
+ if (PartitionPageStateIsEmpty(page)) {
+ stats_out->decommittable_bytes += page_bytes_resident;
+ ++stats_out->num_empty_pages;
+ } else if (PartitionPageStateIsFull(page)) {
+ ++stats_out->num_full_pages;
+ } else {
+ DCHECK(PartitionPageStateIsActive(page));
+ ++stats_out->num_active_pages;
+ }
+}
+
+static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
+ const PartitionBucket* bucket) {
+ DCHECK(!PartitionBucketIsDirectMapped(bucket));
+ stats_out->is_valid = false;
+ // If the active page list is empty (== &PartitionRootGeneric::gSeedPage),
+ // the bucket might still need to be reported if it has a list of empty,
+ // decommitted or full pages.
+ if (bucket->active_pages_head == &PartitionRootGeneric::gSeedPage &&
+ !bucket->empty_pages_head && !bucket->decommitted_pages_head &&
+ !bucket->num_full_pages)
+ return;
+
+ memset(stats_out, '\0', sizeof(*stats_out));
+ stats_out->is_valid = true;
+ stats_out->is_direct_map = false;
+ stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages);
+ stats_out->bucket_slot_size = bucket->slot_size;
+ uint16_t bucket_num_slots = PartitionBucketSlots(bucket);
+ size_t bucketUsefulStorage = stats_out->bucket_slot_size * bucket_num_slots;
+ stats_out->allocated_page_size = PartitionBucketBytes(bucket);
+ stats_out->active_bytes = bucket->num_full_pages * bucketUsefulStorage;
+ stats_out->resident_bytes =
+ bucket->num_full_pages * stats_out->allocated_page_size;
+
+ for (const PartitionPage* page = bucket->empty_pages_head; page;
+ page = page->next_page) {
+ DCHECK(PartitionPageStateIsEmpty(page) ||
+ PartitionPageStateIsDecommitted(page));
+ PartitionDumpPageStats(stats_out, page);
+ }
+ for (const PartitionPage* page = bucket->decommitted_pages_head; page;
+ page = page->next_page) {
+ DCHECK(PartitionPageStateIsDecommitted(page));
+ PartitionDumpPageStats(stats_out, page);
+ }
+
+ if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) {
+ for (const PartitionPage* page = bucket->active_pages_head; page;
+ page = page->next_page) {
+ DCHECK(page != &PartitionRootGeneric::gSeedPage);
+ PartitionDumpPageStats(stats_out, page);
+ }
+ }
+}
+
+void PartitionDumpStatsGeneric(PartitionRootGeneric* partition,
+ const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
+ PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets];
+ static const size_t kMaxReportableDirectMaps = 4096;
+ uint32_t direct_map_lengths[kMaxReportableDirectMaps];
+ size_t num_direct_mapped_allocations = 0;
+
+ {
+ subtle::SpinLock::Guard guard(partition->lock);
+
+ for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+ const PartitionBucket* bucket = &partition->buckets[i];
+ // Don't report the pseudo buckets that the generic allocator sets up in
+ // order to preserve a fast size->bucket map (see
+ // PartitionAllocGenericInit for details).
+ if (!bucket->active_pages_head)
+ bucket_stats[i].is_valid = false;
+ else
+ PartitionDumpBucketStats(&bucket_stats[i], bucket);
+ }
+
+ for (PartitionDirectMapExtent* extent = partition->direct_map_list; extent;
+ extent = extent->next_extent) {
+ DCHECK(!extent->next_extent ||
+ extent->next_extent->prev_extent == extent);
+ direct_map_lengths[num_direct_mapped_allocations] =
+ extent->bucket->slot_size;
+ ++num_direct_mapped_allocations;
+ if (num_direct_mapped_allocations == kMaxReportableDirectMaps)
+ break;
+ }
+ }
+
+ // Call |PartitionsDumpBucketStats| after collecting stats because it can try
+ // to allocate using |PartitionAllocGeneric| and it can't obtain the lock.
+ PartitionMemoryStats stats = {0};
+ stats.total_mmapped_bytes = partition->total_size_of_super_pages +
+ partition->total_size_of_direct_mapped_pages;
+ stats.total_committed_bytes = partition->total_size_of_committed_pages;
+ for (size_t i = 0; i < kGenericNumBuckets; ++i) {
+ if (bucket_stats[i].is_valid) {
+ stats.total_resident_bytes += bucket_stats[i].resident_bytes;
+ stats.total_active_bytes += bucket_stats[i].active_bytes;
+ stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes;
+ stats.total_discardable_bytes += bucket_stats[i].discardable_bytes;
+ if (!is_light_dump)
+ dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]);
+ }
+ }
+
+ size_t direct_mapped_allocations_total_size = 0;
+ for (size_t i = 0; i < num_direct_mapped_allocations; ++i) {
+ uint32_t size = direct_map_lengths[i];
+ direct_mapped_allocations_total_size += size;
+ if (is_light_dump)
+ continue;
+
+ PartitionBucketMemoryStats stats;
+ memset(&stats, '\0', sizeof(stats));
+ stats.is_valid = true;
+ stats.is_direct_map = true;
+ stats.num_full_pages = 1;
+ stats.allocated_page_size = size;
+ stats.bucket_slot_size = size;
+ stats.active_bytes = size;
+ stats.resident_bytes = size;
+ dumper->PartitionsDumpBucketStats(partition_name, &stats);
+ }
+ stats.total_resident_bytes += direct_mapped_allocations_total_size;
+ stats.total_active_bytes += direct_mapped_allocations_total_size;
+ dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+void PartitionDumpStats(PartitionRoot* partition,
+ const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
+ static const size_t kMaxReportableBuckets = 4096 / sizeof(void*);
+ PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets];
+ const size_t partitionNumBuckets = partition->num_buckets;
+ DCHECK(partitionNumBuckets <= kMaxReportableBuckets);
+
+ for (size_t i = 0; i < partitionNumBuckets; ++i)
+ PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]);
+
+ // PartitionsDumpBucketStats is called after collecting stats because it
+ // can use PartitionAlloc to allocate and this can affect the statistics.
+ PartitionMemoryStats stats = {0};
+ stats.total_mmapped_bytes = partition->total_size_of_super_pages;
+ stats.total_committed_bytes = partition->total_size_of_committed_pages;
+ DCHECK(!partition->total_size_of_direct_mapped_pages);
+ for (size_t i = 0; i < partitionNumBuckets; ++i) {
+ if (memory_stats[i].is_valid) {
+ stats.total_resident_bytes += memory_stats[i].resident_bytes;
+ stats.total_active_bytes += memory_stats[i].active_bytes;
+ stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes;
+ stats.total_discardable_bytes += memory_stats[i].discardable_bytes;
+ if (!is_light_dump)
+ dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
+ }
+ }
+ dumper->PartitionDumpTotals(partition_name, &stats);
+}
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
new file mode 100644
index 00000000000..15a501ae6f6
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -0,0 +1,905 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
+
+// DESCRIPTION
+// partitionAlloc() / PartitionAllocGeneric() and PartitionFree() /
+// PartitionFreeGeneric() are approximately analagous to malloc() and free().
+//
+// The main difference is that a PartitionRoot / PartitionRootGeneric object
+// must be supplied to these functions, representing a specific "heap partition"
+// that will be used to satisfy the allocation. Different partitions are
+// guaranteed to exist in separate address spaces, including being separate from
+// the main system heap. If the contained objects are all freed, physical memory
+// is returned to the system but the address space remains reserved.
+// See PartitionAlloc.md for other security properties PartitionAlloc provides.
+//
+// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
+// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
+// minimize the instruction count to the fullest extent possible, the
+// PartitionRoot is really just a header adjacent to other data areas provided
+// by the allocator class.
+//
+// The partitionAlloc() variant of the API has the following caveats:
+// - Allocations and frees against a single partition must be single threaded.
+// - Allocations must not exceed a max size, chosen at compile-time via a
+// templated parameter to PartitionAllocator.
+// - Allocation sizes must be aligned to the system pointer size.
+// - Allocations are bucketed exactly according to size.
+//
+// And for PartitionAllocGeneric():
+// - Multi-threaded use against a single partition is ok; locking is handled.
+// - Allocations of any arbitrary size can be handled (subject to a limit of
+// INT_MAX bytes for security reasons).
+// - Bucketing is by approximate size, for example an allocation of 4000 bytes
+// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+// keep worst-case waste to ~10%.
+//
+// The allocators are designed to be extremely fast, thanks to the following
+// properties and design:
+// - Just two single (reasonably predicatable) branches in the hot / fast path
+// for both allocating and (significantly) freeing.
+// - A minimal number of operations in the hot / fast path, with the slow paths
+// in separate functions, leading to the possibility of inlining.
+// - Each partition page (which is usually multiple physical pages) has a
+// metadata structure which allows fast mapping of free() address to an
+// underlying bucket.
+// - Supports a lock-free API for fast performance in single-threaded cases.
+// - The freelist for a given bucket is split across a number of partition
+// pages, enabling various simple tricks to try and minimize fragmentation.
+// - Fine-grained bucket sizes leading to less waste and better packing.
+//
+// The following security properties could be investigated in the future:
+// - Per-object bucketing (instead of per-size) is mostly available at the API,
+// but not used yet.
+// - No randomness of freelist entries or bucket position.
+// - Better checking for wild pointers in free().
+// - Better freelist masking function to guarantee fault on 32-bit.
+
+#include <limits.h>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/bits.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/synchronization/spin_lock.h"
+#include "base/sys_byteorder.h"
+#include "build/build_config.h"
+
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+#include <stdlib.h>
+#endif
+
+namespace base {
+
+// Allocation granularity of sizeof(void*) bytes.
+static const size_t kAllocationGranularity = sizeof(void*);
+static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
+static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
+
+// Underlying partition storage pages are a power-of-two size. It is typical
+// for a partition page to be based on multiple system pages. Most references to
+// "page" refer to partition pages.
+// We also have the concept of "super pages" -- these are the underlying system
+// allocations we make. Super pages contain multiple partition pages inside them
+// and include space for a small amount of metadata per partition page.
+// Inside super pages, we store "slot spans". A slot span is a continguous range
+// of one or more partition pages that stores allocations of the same size.
+// Slot span sizes are adjusted depending on the allocation size, to make sure
+// the packing does not lead to unused (wasted) space at the end of the last
+// system page of the span. For our current max slot span size of 64k and other
+// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up
+// against the end of a system page.
+static const size_t kPartitionPageShift = 14; // 16KB
+static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
+static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
+static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
+static const size_t kMaxPartitionPagesPerSlotSpan = 4;
+
+// To avoid fragmentation via never-used freelist entries, we hand out partition
+// freelist sections gradually, in units of the dominant system page size.
+// What we're actually doing is avoiding filling the full partition page (16 KB)
+// with freelist pointers right away. Writing freelist pointers will fault and
+// dirty a private page, which is very wasteful if we never actually store
+// objects there.
+static const size_t kNumSystemPagesPerPartitionPage =
+ kPartitionPageSize / kSystemPageSize;
+static const size_t kMaxSystemPagesPerSlotSpan =
+ kNumSystemPagesPerPartitionPage * kMaxPartitionPagesPerSlotSpan;
+
+// We reserve virtual address space in 2MB chunks (aligned to 2MB as well).
+// These chunks are called "super pages". We do this so that we can store
+// metadata in the first few pages of each 2MB aligned section. This leads to
+// a very fast free(). We specifically choose 2MB because this virtual address
+// block represents a full but single PTE allocation on ARM, ia32 and x64.
+//
+// The layout of the super page is as follows. The sizes below are the same
+// for 32 bit and 64 bit.
+//
+// | Guard page (4KB) |
+// | Metadata page (4KB) |
+// | Guard pages (8KB) |
+// | Slot span |
+// | Slot span |
+// | ... |
+// | Slot span |
+// | Guard page (4KB) |
+//
+// - Each slot span is a contiguous range of one or more PartitionPages.
+// - The metadata page has the following format. Note that the PartitionPage
+// that is not at the head of a slot span is "unused". In other words,
+// the metadata for the slot span is stored only in the first PartitionPage
+// of the slot span. Metadata accesses to other PartitionPages are
+// redirected to the first PartitionPage.
+//
+// | SuperPageExtentEntry (32B) |
+// | PartitionPage of slot span 1 (32B, used) |
+// | PartitionPage of slot span 1 (32B, unused) |
+// | PartitionPage of slot span 1 (32B, unused) |
+// | PartitionPage of slot span 2 (32B, used) |
+// | PartitionPage of slot span 3 (32B, used) |
+// | ... |
+// | PartitionPage of slot span N (32B, unused) |
+//
+// A direct mapped page has a similar layout to fake it looking like a super
+// page:
+//
+// | Guard page (4KB) |
+// | Metadata page (4KB) |
+// | Guard pages (8KB) |
+// | Direct mapped object |
+// | Guard page (4KB) |
+//
+// - The metadata page has the following layout:
+//
+// | SuperPageExtentEntry (32B) |
+// | PartitionPage (32B) |
+// | PartitionBucket (32B) |
+// | PartitionDirectMapExtent (8B) |
+static const size_t kSuperPageShift = 21; // 2MB
+static const size_t kSuperPageSize = 1 << kSuperPageShift;
+static const size_t kSuperPageOffsetMask = kSuperPageSize - 1;
+static const size_t kSuperPageBaseMask = ~kSuperPageOffsetMask;
+static const size_t kNumPartitionPagesPerSuperPage =
+ kSuperPageSize / kPartitionPageSize;
+
+static const size_t kPageMetadataShift = 5; // 32 bytes per partition page.
+static const size_t kPageMetadataSize = 1 << kPageMetadataShift;
+
+// The following kGeneric* constants apply to the generic variants of the API.
+// The "order" of an allocation is closely related to the power-of-two size of
+// the allocation. More precisely, the order is the bit index of the
+// most-significant-bit in the allocation size, where the bit numbers starts
+// at index 1 for the least-significant-bit.
+// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
+// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
+static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
+static const size_t kGenericMaxBucketedOrder =
+ 20; // Largest bucketed order is 1<<(20-1) (storing 512KB -> almost 1MB)
+static const size_t kGenericNumBucketedOrders =
+ (kGenericMaxBucketedOrder - kGenericMinBucketedOrder) + 1;
+// Eight buckets per order (for the higher orders), e.g. order 8 is 128, 144,
+// 160, ..., 240:
+static const size_t kGenericNumBucketsPerOrderBits = 3;
+static const size_t kGenericNumBucketsPerOrder =
+ 1 << kGenericNumBucketsPerOrderBits;
+static const size_t kGenericNumBuckets =
+ kGenericNumBucketedOrders * kGenericNumBucketsPerOrder;
+static const size_t kGenericSmallestBucket = 1
+ << (kGenericMinBucketedOrder - 1);
+static const size_t kGenericMaxBucketSpacing =
+ 1 << ((kGenericMaxBucketedOrder - 1) - kGenericNumBucketsPerOrderBits);
+static const size_t kGenericMaxBucketed =
+ (1 << (kGenericMaxBucketedOrder - 1)) +
+ ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing);
+static const size_t kGenericMinDirectMappedDownsize =
+ kGenericMaxBucketed +
+ 1; // Limit when downsizing a direct mapping using realloc().
+static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize;
+static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT;
+
+// Constants for the memory reclaim logic.
+static const size_t kMaxFreeableSpans = 16;
+
+// If the total size in bytes of allocated but not committed pages exceeds this
+// value (probably it is a "out of virtual address space" crash),
+// a special crash stack trace is generated at |partitionOutOfMemory|.
+// This is to distinguish "out of virtual address space" from
+// "out of physical memory" in crash reports.
+static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
+
+#if DCHECK_IS_ON()
+// These two byte values match tcmalloc.
+static const unsigned char kUninitializedByte = 0xAB;
+static const unsigned char kFreedByte = 0xCD;
+static const size_t kCookieSize =
+ 16; // Handles alignment up to XMM instructions on Intel.
+static const unsigned char kCookieValue[kCookieSize] = {
+ 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
+ 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
+#endif
+
+struct PartitionBucket;
+struct PartitionRootBase;
+
+struct PartitionFreelistEntry {
+ PartitionFreelistEntry* next;
+};
+
+// Some notes on page states. A page can be in one of four major states:
+// 1) Active.
+// 2) Full.
+// 3) Empty.
+// 4) Decommitted.
+// An active page has available free slots. A full page has no free slots. An
+// empty page has no free slots, and a decommitted page is an empty page that
+// had its backing memory released back to the system.
+// There are two linked lists tracking the pages. The "active page" list is an
+// approximation of a list of active pages. It is an approximation because
+// full, empty and decommitted pages may briefly be present in the list until
+// we next do a scan over it.
+// The "empty page" list is an accurate list of pages which are either empty
+// or decommitted.
+//
+// The significant page transitions are:
+// - free() will detect when a full page has a slot free()'d and immediately
+// return the page to the head of the active list.
+// - free() will detect when a page is fully emptied. It _may_ add it to the
+// empty list or it _may_ leave it on the active list until a future list scan.
+// - malloc() _may_ scan the active page list in order to fulfil the request.
+// If it does this, full, empty and decommitted pages encountered will be
+// booted out of the active list. If there are no suitable active pages found,
+// an empty or decommitted page (if one exists) will be pulled from the empty
+// list on to the active list.
+struct PartitionPage {
+ PartitionFreelistEntry* freelist_head;
+ PartitionPage* next_page;
+ PartitionBucket* bucket;
+ // Deliberately signed, 0 for empty or decommitted page, -n for full pages:
+ int16_t num_allocated_slots;
+ uint16_t num_unprovisioned_slots;
+ uint16_t page_offset;
+ int16_t empty_cache_index; // -1 if not in the empty cache.
+};
+
+struct PartitionBucket {
+ PartitionPage* active_pages_head; // Accessed most in hot path => goes first.
+ PartitionPage* empty_pages_head;
+ PartitionPage* decommitted_pages_head;
+ uint32_t slot_size;
+ unsigned num_system_pages_per_slot_span : 8;
+ unsigned num_full_pages : 24;
+};
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) at the very start of a superpage's metadata
+// area.
+struct PartitionSuperPageExtentEntry {
+ PartitionRootBase* root;
+ char* super_page_base;
+ char* super_pages_end;
+ PartitionSuperPageExtentEntry* next;
+};
+
+struct PartitionDirectMapExtent {
+ PartitionDirectMapExtent* next_extent;
+ PartitionDirectMapExtent* prev_extent;
+ PartitionBucket* bucket;
+ size_t map_size; // Mapped size, not including guard pages and meta-data.
+};
+
+struct BASE_EXPORT PartitionRootBase {
+ size_t total_size_of_committed_pages;
+ size_t total_size_of_super_pages;
+ size_t total_size_of_direct_mapped_pages;
+ // Invariant: total_size_of_committed_pages <=
+ // total_size_of_super_pages +
+ // total_size_of_direct_mapped_pages.
+ unsigned num_buckets;
+ unsigned max_allocation;
+ bool initialized;
+ char* next_super_page;
+ char* next_partition_page;
+ char* next_partition_page_end;
+ PartitionSuperPageExtentEntry* current_extent;
+ PartitionSuperPageExtentEntry* first_extent;
+ PartitionDirectMapExtent* direct_map_list;
+ PartitionPage* global_empty_page_ring[kMaxFreeableSpans];
+ int16_t global_empty_page_ring_index;
+ uintptr_t inverted_self;
+
+ static subtle::SpinLock gInitializedLock;
+ static bool gInitialized;
+ // gSeedPage is used as a sentinel to indicate that there is no page
+ // in the active page list. We can use nullptr, but in that case we need
+ // to add a null-check branch to the hot allocation path. We want to avoid
+ // that.
+ static PartitionPage gSeedPage;
+ static PartitionBucket gPagedBucket;
+ // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
+ static void (*gOomHandlingFunction)();
+};
+
+// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
+struct PartitionRoot : public PartitionRootBase {
+ // The PartitionAlloc templated class ensures the following is correct.
+ ALWAYS_INLINE PartitionBucket* buckets() {
+ return reinterpret_cast<PartitionBucket*>(this + 1);
+ }
+ ALWAYS_INLINE const PartitionBucket* buckets() const {
+ return reinterpret_cast<const PartitionBucket*>(this + 1);
+ }
+};
+
+// Never instantiate a PartitionRootGeneric directly, instead use
+// PartitionAllocatorGeneric.
+struct PartitionRootGeneric : public PartitionRootBase {
+ subtle::SpinLock lock;
+ // Some pre-computed constants.
+ size_t order_index_shifts[kBitsPerSizeT + 1];
+ size_t order_sub_index_masks[kBitsPerSizeT + 1];
+ // The bucket lookup table lets us map a size_t to a bucket quickly.
+ // The trailing +1 caters for the overflow case for very large allocation
+ // sizes. It is one flat array instead of a 2D array because in the 2D
+ // world, we'd need to index array[blah][max+1] which risks undefined
+ // behavior.
+ PartitionBucket*
+ bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1];
+ PartitionBucket buckets[kGenericNumBuckets];
+};
+
+// Flags for PartitionAllocGenericFlags.
+enum PartitionAllocFlags {
+ PartitionAllocReturnNull = 1 << 0,
+};
+
+// Struct used to retrieve total memory usage of a partition. Used by
+// PartitionStatsDumper implementation.
+struct PartitionMemoryStats {
+ size_t total_mmapped_bytes; // Total bytes mmaped from the system.
+ size_t total_committed_bytes; // Total size of commmitted pages.
+ size_t total_resident_bytes; // Total bytes provisioned by the partition.
+ size_t total_active_bytes; // Total active bytes in the partition.
+ size_t total_decommittable_bytes; // Total bytes that could be decommitted.
+ size_t total_discardable_bytes; // Total bytes that could be discarded.
+};
+
+// Struct used to retrieve memory statistics about a partition bucket. Used by
+// PartitionStatsDumper implementation.
+struct PartitionBucketMemoryStats {
+ bool is_valid; // Used to check if the stats is valid.
+ bool is_direct_map; // True if this is a direct mapping; size will not be
+ // unique.
+ uint32_t bucket_slot_size; // The size of the slot in bytes.
+ uint32_t allocated_page_size; // Total size the partition page allocated from
+ // the system.
+ uint32_t active_bytes; // Total active bytes used in the bucket.
+ uint32_t resident_bytes; // Total bytes provisioned in the bucket.
+ uint32_t decommittable_bytes; // Total bytes that could be decommitted.
+ uint32_t discardable_bytes; // Total bytes that could be discarded.
+ uint32_t num_full_pages; // Number of pages with all slots allocated.
+ uint32_t num_active_pages; // Number of pages that have at least one
+ // provisioned slot.
+ uint32_t num_empty_pages; // Number of pages that are empty
+ // but not decommitted.
+ uint32_t num_decommitted_pages; // Number of pages that are empty
+ // and decommitted.
+};
+
+// Interface that is passed to PartitionDumpStats and
+// PartitionDumpStatsGeneric for using the memory statistics.
+class BASE_EXPORT PartitionStatsDumper {
+ public:
+ // Called to dump total memory used by partition, once per partition.
+ virtual void PartitionDumpTotals(const char* partition_name,
+ const PartitionMemoryStats*) = 0;
+
+ // Called to dump stats about buckets, for each bucket.
+ virtual void PartitionsDumpBucketStats(const char* partition_name,
+ const PartitionBucketMemoryStats*) = 0;
+};
+
+BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)());
+BASE_EXPORT void PartitionAllocInit(PartitionRoot*,
+ size_t num_buckets,
+ size_t max_allocation);
+BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*);
+
+enum PartitionPurgeFlags {
+ // Decommitting the ring list of empty pages is reasonably fast.
+ PartitionPurgeDecommitEmptyPages = 1 << 0,
+ // Discarding unused system pages is slower, because it involves walking all
+ // freelists in all active partition pages of all buckets >= system page
+ // size. It often frees a similar amount of memory to decommitting the empty
+ // pages, though.
+ PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+};
+
+BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int);
+BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
+
+BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*,
+ int,
+ size_t,
+ PartitionBucket*);
+BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*);
+BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*,
+ void*,
+ size_t,
+ const char* type_name);
+
+BASE_EXPORT void PartitionDumpStats(PartitionRoot*,
+ const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper*);
+BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*,
+ const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper*);
+
+class BASE_EXPORT PartitionAllocHooks {
+ public:
+ typedef void AllocationHook(void* address, size_t, const char* type_name);
+ typedef void FreeHook(void* address);
+
+ static void SetAllocationHook(AllocationHook* hook) {
+ allocation_hook_ = hook;
+ }
+ static void SetFreeHook(FreeHook* hook) { free_hook_ = hook; }
+
+ static void AllocationHookIfEnabled(void* address,
+ size_t size,
+ const char* type_name) {
+ AllocationHook* hook = allocation_hook_;
+ if (UNLIKELY(hook != nullptr))
+ hook(address, size, type_name);
+ }
+
+ static void FreeHookIfEnabled(void* address) {
+ FreeHook* hook = free_hook_;
+ if (UNLIKELY(hook != nullptr))
+ hook(address);
+ }
+
+ static void ReallocHookIfEnabled(void* old_address,
+ void* new_address,
+ size_t size,
+ const char* type_name) {
+ // Report a reallocation as a free followed by an allocation.
+ AllocationHook* allocation_hook = allocation_hook_;
+ FreeHook* free_hook = free_hook_;
+ if (UNLIKELY(allocation_hook && free_hook)) {
+ free_hook(old_address);
+ allocation_hook(new_address, size, type_name);
+ }
+ }
+
+ private:
+ // Pointers to hook functions that PartitionAlloc will call on allocation and
+ // free if the pointers are non-null.
+ static AllocationHook* allocation_hook_;
+ static FreeHook* free_hook_;
+};
+
+ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask(
+ PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+ uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+ uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+ return reinterpret_cast<PartitionFreelistEntry*>(masked);
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+#if DCHECK_IS_ON()
+ // Add space for cookies, checking for integer overflow. TODO(palmer):
+ // Investigate the performance and code size implications of using
+ // CheckedNumeric throughout PA.
+ DCHECK(size + (2 * kCookieSize) > size);
+ size += 2 * kCookieSize;
+#endif
+ return size;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+#if DCHECK_IS_ON()
+ // Remove space for cookies.
+ DCHECK(size >= 2 * kCookieSize);
+ size -= 2 * kCookieSize;
+#endif
+ return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+#if DCHECK_IS_ON()
+ // The value given to the application is actually just after the cookie.
+ ptr = static_cast<char*>(ptr) - kCookieSize;
+#endif
+ return ptr;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+#if DCHECK_IS_ON()
+ unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+ for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+ *cookie_ptr = kCookieValue[i];
+#endif
+}
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
+#if DCHECK_IS_ON()
+ unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
+ for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
+ DCHECK(*cookie_ptr == kCookieValue[i]);
+#endif
+}
+
+ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+ DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+ // The metadata area is exactly one system page (the guard page) into the
+ // super page.
+ return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
+ char* super_page_ptr =
+ reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
+ uintptr_t partition_page_index =
+ (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
+ // Index 0 is invalid because it is the metadata and guard area and
+ // the last index is invalid because it is a guard page.
+ DCHECK(partition_page_index);
+ DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ PartitionPage* page = reinterpret_cast<PartitionPage*>(
+ PartitionSuperPageToMetadataArea(super_page_ptr) +
+ (partition_page_index << kPageMetadataShift));
+ // Partition pages in the same slot span can share the same page object.
+ // Adjust for that.
+ size_t delta = page->page_offset << kPageMetadataShift;
+ page =
+ reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
+ return page;
+}
+
+ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) {
+ uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page);
+ uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask);
+ DCHECK(super_page_offset > kSystemPageSize);
+ DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
+ kPageMetadataSize));
+ uintptr_t partition_page_index =
+ (super_page_offset - kSystemPageSize) >> kPageMetadataShift;
+ // Index 0 is invalid because it is the metadata area and the last index is
+ // invalid because it is a guard page.
+ DCHECK(partition_page_index);
+ DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
+ void* ret = reinterpret_cast<void*>(
+ super_page_base + (partition_page_index << kPartitionPageShift));
+ return ret;
+}
+
+ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) {
+ PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr);
+ // Checks that the pointer is a multiple of bucket size.
+ DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+ reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) %
+ page->bucket->slot_size));
+ return page;
+}
+
+ALWAYS_INLINE bool PartitionBucketIsDirectMapped(
+ const PartitionBucket* bucket) {
+ return !bucket->num_system_pages_per_slot_span;
+}
+
+ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) {
+ return bucket->num_system_pages_per_slot_span * kSystemPageSize;
+}
+
+ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) {
+ return static_cast<uint16_t>(PartitionBucketBytes(bucket) /
+ bucket->slot_size);
+}
+
+ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) {
+ // For single-slot buckets which span more than one partition page, we
+ // have some spare metadata space to store the raw allocation size. We
+ // can use this to report better statistics.
+ PartitionBucket* bucket = page->bucket;
+ if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
+ return nullptr;
+
+ DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+ DCHECK(PartitionBucketIsDirectMapped(bucket) ||
+ PartitionBucketSlots(bucket) == 1);
+ page++;
+ return reinterpret_cast<size_t*>(&page->freelist_head);
+}
+
+ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) {
+ size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page);
+ if (UNLIKELY(raw_size_ptr != nullptr))
+ return *raw_size_ptr;
+ return 0;
+}
+
+ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
+ PartitionSuperPageExtentEntry* extent_entry =
+ reinterpret_cast<PartitionSuperPageExtentEntry*>(
+ reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+ return extent_entry->root;
+}
+
+ALWAYS_INLINE bool PartitionPointerIsValid(void* ptr) {
+ PartitionPage* page = PartitionPointerToPage(ptr);
+ PartitionRootBase* root = PartitionPageToRoot(page);
+ return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root,
+ int flags,
+ size_t size,
+ PartitionBucket* bucket) {
+ PartitionPage* page = bucket->active_pages_head;
+ // Check that this page is neither full nor freed.
+ DCHECK(page->num_allocated_slots >= 0);
+ void* ret = page->freelist_head;
+ if (LIKELY(ret != 0)) {
+ // If these asserts fire, you probably corrupted memory.
+ DCHECK(PartitionPointerIsValid(ret));
+ // All large allocations must go through the slow path to correctly
+ // update the size metadata.
+ DCHECK(PartitionPageGetRawSize(page) == 0);
+ PartitionFreelistEntry* new_head =
+ PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
+ page->freelist_head = new_head;
+ page->num_allocated_slots++;
+ } else {
+ ret = PartitionAllocSlowPath(root, flags, size, bucket);
+ DCHECK(!ret || PartitionPointerIsValid(ret));
+ }
+#if DCHECK_IS_ON()
+ if (!ret)
+ return 0;
+ // Fill the uninitialized pattern, and write the cookies.
+ page = PartitionPointerToPage(ret);
+ size_t slot_size = page->bucket->slot_size;
+ size_t raw_size = PartitionPageGetRawSize(page);
+ if (raw_size) {
+ DCHECK(raw_size == size);
+ slot_size = raw_size;
+ }
+ size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size);
+ char* char_ret = static_cast<char*>(ret);
+ // The value given to the application is actually just after the cookie.
+ ret = char_ret + kCookieSize;
+ memset(ret, kUninitializedByte, no_cookie_size);
+ PartitionCookieWriteValue(char_ret);
+ PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
+#endif
+ return ret;
+}
+
+ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root,
+ size_t size,
+ const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ void* result = malloc(size);
+ CHECK(result);
+ return result;
+#else
+ size_t requested_size = size;
+ size = PartitionCookieSizeAdjustAdd(size);
+ DCHECK(root->initialized);
+ size_t index = size >> kBucketShift;
+ DCHECK(index < root->num_buckets);
+ DCHECK(size == index << kBucketShift);
+ PartitionBucket* bucket = &root->buckets()[index];
+ void* result = PartitionBucketAlloc(root, 0, size, bucket);
+ PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size,
+ type_name);
+ return result;
+#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+}
+
+ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) {
+// If these asserts fire, you probably corrupted memory.
+#if DCHECK_IS_ON()
+ size_t slot_size = page->bucket->slot_size;
+ size_t raw_size = PartitionPageGetRawSize(page);
+ if (raw_size)
+ slot_size = raw_size;
+ PartitionCookieCheckValue(ptr);
+ PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size -
+ kCookieSize);
+ memset(ptr, kFreedByte, slot_size);
+#endif
+ DCHECK(page->num_allocated_slots);
+ PartitionFreelistEntry* freelist_head = page->freelist_head;
+ DCHECK(!freelist_head || PartitionPointerIsValid(freelist_head));
+ CHECK(ptr != freelist_head); // Catches an immediate double free.
+ // Look for double free one level deeper in debug.
+ DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
+ PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
+ entry->next = PartitionFreelistMask(freelist_head);
+ page->freelist_head = entry;
+ --page->num_allocated_slots;
+ if (UNLIKELY(page->num_allocated_slots <= 0)) {
+ PartitionFreeSlowPath(page);
+ } else {
+ // All single-slot allocations must go through the slow path to
+ // correctly update the size metadata.
+ DCHECK(PartitionPageGetRawSize(page) == 0);
+ }
+}
+
+ALWAYS_INLINE void PartitionFree(void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ free(ptr);
+#else
+ PartitionAllocHooks::FreeHookIfEnabled(ptr);
+ ptr = PartitionCookieFreePointerAdjust(ptr);
+ DCHECK(PartitionPointerIsValid(ptr));
+ PartitionPage* page = PartitionPointerToPage(ptr);
+ PartitionFreeWithPage(ptr, page);
+#endif
+}
+
+ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket(
+ PartitionRootGeneric* root,
+ size_t size) {
+ size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
+ // The order index is simply the next few bits after the most significant bit.
+ size_t order_index = (size >> root->order_index_shifts[order]) &
+ (kGenericNumBucketsPerOrder - 1);
+ // And if the remaining bits are non-zero we must bump the bucket up.
+ size_t sub_order_index = size & root->order_sub_index_masks[order];
+ PartitionBucket* bucket =
+ root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
+ order_index + !!sub_order_index];
+ DCHECK(!bucket->slot_size || bucket->slot_size >= size);
+ DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
+ return bucket;
+}
+
+ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
+ int flags,
+ size_t size,
+ const char* type_name) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ void* result = malloc(size);
+ CHECK(result || flags & PartitionAllocReturnNull);
+ return result;
+#else
+ DCHECK(root->initialized);
+ size_t requested_size = size;
+ size = PartitionCookieSizeAdjustAdd(size);
+ PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+ void* ret = nullptr;
+ {
+ subtle::SpinLock::Guard guard(root->lock);
+ ret = PartitionBucketAlloc(root, flags, size, bucket);
+ }
+ PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name);
+ return ret;
+#endif
+}
+
+ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root,
+ size_t size,
+ const char* type_name) {
+ return PartitionAllocGenericFlags(root, 0, size, type_name);
+}
+
+ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ free(ptr);
+#else
+ DCHECK(root->initialized);
+
+ if (UNLIKELY(!ptr))
+ return;
+
+ PartitionAllocHooks::FreeHookIfEnabled(ptr);
+ ptr = PartitionCookieFreePointerAdjust(ptr);
+ DCHECK(PartitionPointerIsValid(ptr));
+ PartitionPage* page = PartitionPointerToPage(ptr);
+ {
+ subtle::SpinLock::Guard guard(root->lock);
+ PartitionFreeWithPage(ptr, page);
+ }
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) {
+ // Caller must check that the size is not above the kGenericMaxDirectMapped
+ // limit before calling. This also guards against integer overflow in the
+ // calculation here.
+ DCHECK(size <= kGenericMaxDirectMapped);
+ return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
+}
+
+ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root,
+ size_t size) {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ return size;
+#else
+ DCHECK(root->initialized);
+ size = PartitionCookieSizeAdjustAdd(size);
+ PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size);
+ if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) {
+ size = bucket->slot_size;
+ } else if (size > kGenericMaxDirectMapped) {
+ // Too large to allocate => return the size unchanged.
+ } else {
+ DCHECK(bucket == &PartitionRootBase::gPagedBucket);
+ size = PartitionDirectMapSize(size);
+ }
+ return PartitionCookieSizeAdjustSubtract(size);
+#endif
+}
+
+ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
+#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ return false;
+#else
+ return true;
+#endif
+}
+
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+ // No need to lock here. Only |ptr| being freed by another thread could
+ // cause trouble, and the caller is responsible for that not happening.
+ DCHECK(PartitionAllocSupportsGetSize());
+ ptr = PartitionCookieFreePointerAdjust(ptr);
+ DCHECK(PartitionPointerIsValid(ptr));
+ PartitionPage* page = PartitionPointerToPage(ptr);
+ size_t size = page->bucket->slot_size;
+ return PartitionCookieSizeAdjustSubtract(size);
+}
+
+// N (or more accurately, N - sizeof(void*)) represents the largest size in
+// bytes that will be handled by a SizeSpecificPartitionAllocator.
+// Attempts to partitionAlloc() more than this amount will fail.
+template <size_t N>
+class SizeSpecificPartitionAllocator {
+ public:
+ static const size_t kMaxAllocation = N - kAllocationGranularity;
+ static const size_t kNumBuckets = N / kAllocationGranularity;
+ void init() {
+ PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation);
+ }
+ ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
+
+ private:
+ PartitionRoot partition_root_;
+ PartitionBucket actual_buckets_[kNumBuckets];
+};
+
+class PartitionAllocatorGeneric {
+ public:
+ void init() { PartitionAllocGenericInit(&partition_root_); }
+ ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
+
+ private:
+ PartitionRootGeneric partition_root_;
+};
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
new file mode 100644
index 00000000000..2ddde105bb4
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -0,0 +1,2080 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include <memory>
+#include <vector>
+
+#include "base/bits.h"
+#include "base/sys_info.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if defined(OS_POSIX)
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+
+#ifndef MAP_ANONYMOUS
+#define MAP_ANONYMOUS MAP_ANON
+#endif
+#endif // defined(OS_POSIX)
+
+namespace {
+template <typename T>
+std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
+ return std::unique_ptr<T[]>(ptr);
+}
+} // namespace
+
+#if !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+
+namespace base {
+
+namespace {
+
+const size_t kTestMaxAllocation = 4096;
+SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
+PartitionAllocatorGeneric generic_allocator;
+
+const size_t kTestAllocSize = 16;
+#if !DCHECK_IS_ON()
+const size_t kPointerOffset = 0;
+const size_t kExtraAllocSize = 0;
+#else
+const size_t kPointerOffset = kCookieSize;
+const size_t kExtraAllocSize = kCookieSize * 2;
+#endif
+const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
+const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift;
+
+const char* type_name = nullptr;
+
+void TestSetup() {
+ allocator.init();
+ generic_allocator.init();
+}
+
+#if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
+bool SetAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS)
+ // 32 bits => address space is limited already.
+ return true;
+#elif defined(OS_POSIX) && !defined(OS_MACOSX)
+ // Mac will accept RLIMIT_AS changes but it is not enforced.
+ // See https://crbug.com/435269 and rdar://17576114.
+ // Note: this number must be not less than 6 GB, because with
+ // sanitizer_coverage_flags=edge, it reserves > 5 GB of address
+ // space, see https://crbug.com/674665.
+ const size_t kAddressSpaceLimit = static_cast<size_t>(6144) * 1024 * 1024;
+ struct rlimit limit;
+ if (getrlimit(RLIMIT_AS, &limit) != 0)
+ return false;
+ if (limit.rlim_cur == RLIM_INFINITY || limit.rlim_cur > kAddressSpaceLimit) {
+ limit.rlim_cur = kAddressSpaceLimit;
+ if (setrlimit(RLIMIT_AS, &limit) != 0)
+ return false;
+ }
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool ClearAddressSpaceLimit() {
+#if !defined(ARCH_CPU_64_BITS)
+ return true;
+#elif defined(OS_POSIX)
+ struct rlimit limit;
+ if (getrlimit(RLIMIT_AS, &limit) != 0)
+ return false;
+ limit.rlim_cur = limit.rlim_max;
+ if (setrlimit(RLIMIT_AS, &limit) != 0)
+ return false;
+ return true;
+#else
+ return false;
+#endif
+}
+#endif
+
+PartitionPage* GetFullPage(size_t size) {
+ size_t real_size = size + kExtraAllocSize;
+ size_t bucket_index = real_size >> kBucketShift;
+ PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+ size_t num_slots =
+ (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+ void* first = 0;
+ void* last = 0;
+ size_t i;
+ for (i = 0; i < num_slots; ++i) {
+ void* ptr = PartitionAlloc(allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ if (!i)
+ first = PartitionCookieFreePointerAdjust(ptr);
+ else if (i == num_slots - 1)
+ last = PartitionCookieFreePointerAdjust(ptr);
+ }
+ EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last));
+ if (bucket->num_system_pages_per_slot_span == kNumSystemPagesPerPartitionPage)
+ EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask,
+ reinterpret_cast<size_t>(last) & kPartitionPageBaseMask);
+ EXPECT_EQ(num_slots, static_cast<size_t>(
+ bucket->active_pages_head->num_allocated_slots));
+ EXPECT_EQ(0, bucket->active_pages_head->freelist_head);
+ EXPECT_TRUE(bucket->active_pages_head);
+ EXPECT_TRUE(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage);
+ return bucket->active_pages_head;
+}
+
+void FreeFullPage(PartitionPage* page) {
+ size_t size = page->bucket->slot_size;
+ size_t num_slots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size;
+ EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots)));
+ char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page));
+ size_t i;
+ for (i = 0; i < num_slots; ++i) {
+ PartitionFree(ptr + kPointerOffset);
+ ptr += size;
+ }
+}
+
+void CycleFreeCache(size_t size) {
+ size_t real_size = size + kExtraAllocSize;
+ size_t bucket_index = real_size >> kBucketShift;
+ PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+ DCHECK(!bucket->active_pages_head->num_allocated_slots);
+
+ for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+ void* ptr = PartitionAlloc(allocator.root(), size, type_name);
+ EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
+ PartitionFree(ptr);
+ EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
+ EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
+ }
+}
+
+void CycleGenericFreeCache(size_t size) {
+ for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
+ void* ptr =
+ PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionBucket* bucket = page->bucket;
+ EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
+ EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
+ }
+}
+
+void CheckPageInCore(void* ptr, bool inCore) {
+#if defined(OS_LINUX)
+ unsigned char ret;
+ EXPECT_EQ(0, mincore(ptr, kSystemPageSize, &ret));
+ EXPECT_EQ(inCore, ret);
+#endif
+}
+
+bool IsLargeMemoryDevice() {
+ return base::SysInfo::AmountOfPhysicalMemory() >= 2LL * 1024 * 1024 * 1024;
+}
+
+class MockPartitionStatsDumper : public PartitionStatsDumper {
+ public:
+ MockPartitionStatsDumper()
+ : total_resident_bytes(0),
+ total_active_bytes(0),
+ total_decommittable_bytes(0),
+ total_discardable_bytes(0) {}
+
+ void PartitionDumpTotals(const char* partition_name,
+ const PartitionMemoryStats* stats) override {
+ EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes);
+ EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes);
+ EXPECT_EQ(total_active_bytes, stats->total_active_bytes);
+ EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes);
+ EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes);
+ }
+
+ void PartitionsDumpBucketStats(
+ const char* partition_name,
+ const PartitionBucketMemoryStats* stats) override {
+ (void)partition_name;
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask);
+ bucket_stats.push_back(*stats);
+ total_resident_bytes += stats->resident_bytes;
+ total_active_bytes += stats->active_bytes;
+ total_decommittable_bytes += stats->decommittable_bytes;
+ total_discardable_bytes += stats->discardable_bytes;
+ }
+
+ bool IsMemoryAllocationRecorded() {
+ return total_resident_bytes != 0 && total_active_bytes != 0;
+ }
+
+ const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) {
+ for (size_t i = 0; i < bucket_stats.size(); ++i) {
+ if (bucket_stats[i].bucket_slot_size == bucket_size)
+ return &bucket_stats[i];
+ }
+ return 0;
+ }
+
+ private:
+ size_t total_resident_bytes;
+ size_t total_active_bytes;
+ size_t total_decommittable_bytes;
+ size_t total_discardable_bytes;
+
+ std::vector<PartitionBucketMemoryStats> bucket_stats;
+};
+
+} // anonymous namespace
+
+// Check that the most basic of allocate / free pairs work.
+TEST(PartitionAllocTest, Basic) {
+ TestSetup();
+ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage;
+
+ EXPECT_FALSE(bucket->empty_pages_head);
+ EXPECT_FALSE(bucket->decommitted_pages_head);
+ EXPECT_EQ(seedPage, bucket->active_pages_head);
+ EXPECT_EQ(0, bucket->active_pages_head->next_page);
+
+ void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ EXPECT_TRUE(ptr);
+ EXPECT_EQ(kPointerOffset,
+ reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask);
+ // Check that the offset appears to include a guard page.
+ EXPECT_EQ(kPartitionPageSize + kPointerOffset,
+ reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask);
+
+ PartitionFree(ptr);
+ // Expect that the last active page gets noticed as empty but doesn't get
+ // decommitted.
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_FALSE(bucket->decommitted_pages_head);
+}
+
+// Test multiple allocations, and freelist handling.
+TEST(PartitionAllocTest, MultiAlloc) {
+ TestSetup();
+
+ char* ptr1 = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ char* ptr2 = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_TRUE(ptr1);
+ EXPECT_TRUE(ptr2);
+ ptrdiff_t diff = ptr2 - ptr1;
+ EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+
+ // Check that we re-use the just-freed slot.
+ PartitionFree(ptr2);
+ ptr2 = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_TRUE(ptr2);
+ diff = ptr2 - ptr1;
+ EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+ PartitionFree(ptr1);
+ ptr1 = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_TRUE(ptr1);
+ diff = ptr2 - ptr1;
+ EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff);
+
+ char* ptr3 = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_TRUE(ptr3);
+ diff = ptr3 - ptr1;
+ EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff);
+
+ PartitionFree(ptr1);
+ PartitionFree(ptr2);
+ PartitionFree(ptr3);
+}
+
+// Test a bucket with multiple pages.
+TEST(PartitionAllocTest, MultiPages) {
+ TestSetup();
+ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+ PartitionPage* page = GetFullPage(kTestAllocSize);
+ FreeFullPage(page);
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+ EXPECT_EQ(0, page->next_page);
+ EXPECT_EQ(0, page->num_allocated_slots);
+
+ page = GetFullPage(kTestAllocSize);
+ PartitionPage* page2 = GetFullPage(kTestAllocSize);
+
+ EXPECT_EQ(page2, bucket->active_pages_head);
+ EXPECT_EQ(0, page2->next_page);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPageToPointer(page)) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(PartitionPageToPointer(page2)) &
+ kSuperPageBaseMask);
+
+ // Fully free the non-current page. This will leave us with no current
+ // active page because one is empty and the other is full.
+ FreeFullPage(page);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+
+ // Allocate a new page, it should pull from the freelist.
+ page = GetFullPage(kTestAllocSize);
+ EXPECT_FALSE(bucket->empty_pages_head);
+ EXPECT_EQ(page, bucket->active_pages_head);
+
+ FreeFullPage(page);
+ FreeFullPage(page2);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ EXPECT_EQ(0, page2->num_allocated_slots);
+ EXPECT_EQ(0, page2->num_unprovisioned_slots);
+ EXPECT_NE(-1, page2->empty_cache_index);
+}
+
+// Test some finer aspects of internal page transitions.
+TEST(PartitionAllocTest, PageTransitions) {
+ TestSetup();
+ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+ PartitionPage* page1 = GetFullPage(kTestAllocSize);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+ EXPECT_EQ(0, page1->next_page);
+ PartitionPage* page2 = GetFullPage(kTestAllocSize);
+ EXPECT_EQ(page2, bucket->active_pages_head);
+ EXPECT_EQ(0, page2->next_page);
+
+ // Bounce page1 back into the non-full list then fill it up again.
+ char* ptr =
+ reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ PartitionFree(ptr);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+ (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+ EXPECT_EQ(page2, bucket->active_pages_head->next_page);
+
+ // Allocating another page at this point should cause us to scan over page1
+ // (which is both full and NOT our current page), and evict it from the
+ // freelist. Older code had a O(n^2) condition due to failure to do this.
+ PartitionPage* page3 = GetFullPage(kTestAllocSize);
+ EXPECT_EQ(page3, bucket->active_pages_head);
+ EXPECT_EQ(0, page3->next_page);
+
+ // Work out a pointer into page2 and free it.
+ ptr = reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset;
+ PartitionFree(ptr);
+ // Trying to allocate at this time should cause us to cycle around to page2
+ // and find the recently freed slot.
+ char* newPtr = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_EQ(ptr, newPtr);
+ EXPECT_EQ(page2, bucket->active_pages_head);
+ EXPECT_EQ(page3, page2->next_page);
+
+ // Work out a pointer into page1 and free it. This should pull the page
+ // back into the list of available pages.
+ ptr = reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ PartitionFree(ptr);
+ // This allocation should be satisfied by page1.
+ newPtr = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ EXPECT_EQ(ptr, newPtr);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+ EXPECT_EQ(page2, page1->next_page);
+
+ FreeFullPage(page3);
+ FreeFullPage(page2);
+ FreeFullPage(page1);
+
+ // Allocating whilst in this state exposed a bug, so keep the test.
+ ptr = reinterpret_cast<char*>(
+ PartitionAlloc(allocator.root(), kTestAllocSize, type_name));
+ PartitionFree(ptr);
+}
+
+// Test some corner cases relating to page transitions in the internal
+// free page list metadata bucket.
+TEST(PartitionAllocTest, FreePageListPageTransitions) {
+ TestSetup();
+ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+ size_t numToFillFreeListPage =
+ kPartitionPageSize / (sizeof(PartitionPage) + kExtraAllocSize);
+ // The +1 is because we need to account for the fact that the current page
+ // never gets thrown on the freelist.
+ ++numToFillFreeListPage;
+ std::unique_ptr<PartitionPage* []> pages =
+ WrapArrayUnique(new PartitionPage*[numToFillFreeListPage]);
+
+ size_t i;
+ for (i = 0; i < numToFillFreeListPage; ++i) {
+ pages[i] = GetFullPage(kTestAllocSize);
+ }
+ EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
+ for (i = 0; i < numToFillFreeListPage; ++i)
+ FreeFullPage(pages[i]);
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head);
+
+ // Allocate / free in a different bucket size so we get control of a
+ // different free page list. We need two pages because one will be the last
+ // active page and not get freed.
+ PartitionPage* page1 = GetFullPage(kTestAllocSize * 2);
+ PartitionPage* page2 = GetFullPage(kTestAllocSize * 2);
+ FreeFullPage(page1);
+ FreeFullPage(page2);
+
+ for (i = 0; i < numToFillFreeListPage; ++i) {
+ pages[i] = GetFullPage(kTestAllocSize);
+ }
+ EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
+
+ for (i = 0; i < numToFillFreeListPage; ++i)
+ FreeFullPage(pages[i]);
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head);
+}
+
+// Test a large series of allocations that cross more than one underlying
+// 64KB super page allocation.
+TEST(PartitionAllocTest, MultiPageAllocs) {
+ TestSetup();
+ // This is guaranteed to cross a super page boundary because the first
+ // partition page "slot" will be taken up by a guard page.
+ size_t numPagesNeeded = kNumPartitionPagesPerSuperPage;
+ // The super page should begin and end in a guard so we one less page in
+ // order to allocate a single page in the new super page.
+ --numPagesNeeded;
+
+ EXPECT_GT(numPagesNeeded, 1u);
+ std::unique_ptr<PartitionPage* []> pages;
+ pages = WrapArrayUnique(new PartitionPage*[numPagesNeeded]);
+ uintptr_t firstSuperPageBase = 0;
+ size_t i;
+ for (i = 0; i < numPagesNeeded; ++i) {
+ pages[i] = GetFullPage(kTestAllocSize);
+ void* storagePtr = PartitionPageToPointer(pages[i]);
+ if (!i)
+ firstSuperPageBase =
+ reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
+ if (i == numPagesNeeded - 1) {
+ uintptr_t secondSuperPageBase =
+ reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask;
+ uintptr_t secondSuperPageOffset =
+ reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageOffsetMask;
+ EXPECT_FALSE(secondSuperPageBase == firstSuperPageBase);
+ // Check that we allocated a guard page for the second page.
+ EXPECT_EQ(kPartitionPageSize, secondSuperPageOffset);
+ }
+ }
+ for (i = 0; i < numPagesNeeded; ++i)
+ FreeFullPage(pages[i]);
+}
+
+// Test the generic allocation functions that can handle arbitrary sizes and
+// reallocing etc.
+TEST(PartitionAllocTest, GenericAlloc) {
+ TestSetup();
+
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1,
+ type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ EXPECT_TRUE(ptr);
+ void* origPtr = ptr;
+ char* charPtr = static_cast<char*>(ptr);
+ *charPtr = 'A';
+
+ // Change the size of the realloc, remaining inside the same bucket.
+ void* newPtr =
+ PartitionReallocGeneric(generic_allocator.root(), ptr, 2, type_name);
+ EXPECT_EQ(ptr, newPtr);
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ EXPECT_EQ(ptr, newPtr);
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericSmallestBucket, type_name);
+ EXPECT_EQ(ptr, newPtr);
+
+ // Change the size of the realloc, switching buckets.
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericSmallestBucket + 1, type_name);
+ EXPECT_NE(newPtr, ptr);
+ // Check that the realloc copied correctly.
+ char* newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'A');
+#if DCHECK_IS_ON()
+ // Subtle: this checks for an old bug where we copied too much from the
+ // source of the realloc. The condition can be detected by a trashing of
+ // the uninitialized value in the space of the upsized allocation.
+ EXPECT_EQ(kUninitializedByte,
+ static_cast<unsigned char>(*(newCharPtr + kGenericSmallestBucket)));
+#endif
+ *newCharPtr = 'B';
+ // The realloc moved. To check that the old allocation was freed, we can
+ // do an alloc of the old allocation size and check that the old allocation
+ // address is at the head of the freelist and reused.
+ void* reusedPtr =
+ PartitionAllocGeneric(generic_allocator.root(), 1, type_name);
+ EXPECT_EQ(reusedPtr, origPtr);
+ PartitionFreeGeneric(generic_allocator.root(), reusedPtr);
+
+ // Downsize the realloc.
+ ptr = newPtr;
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ EXPECT_EQ(newPtr, origPtr);
+ newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'B');
+ *newCharPtr = 'C';
+
+ // Upsize the realloc to outside the partition.
+ ptr = newPtr;
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericMaxBucketed + 1, type_name);
+ EXPECT_NE(newPtr, ptr);
+ newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'C');
+ *newCharPtr = 'D';
+
+ // Upsize and downsize the realloc, remaining outside the partition.
+ ptr = newPtr;
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericMaxBucketed * 10, type_name);
+ newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'D');
+ *newCharPtr = 'E';
+ ptr = newPtr;
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericMaxBucketed * 2, type_name);
+ newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'E');
+ *newCharPtr = 'F';
+
+ // Downsize the realloc to inside the partition.
+ ptr = newPtr;
+ newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name);
+ EXPECT_NE(newPtr, ptr);
+ EXPECT_EQ(newPtr, origPtr);
+ newCharPtr = static_cast<char*>(newPtr);
+ EXPECT_EQ(*newCharPtr, 'F');
+
+ PartitionFreeGeneric(generic_allocator.root(), newPtr);
+}
+
+// Test the generic allocation functions can handle some specific sizes of
+// interest.
+TEST(PartitionAllocTest, GenericAllocSizes) {
+ TestSetup();
+
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // kPartitionPageSize is interesting because it results in just one
+ // allocation per page, which tripped up some corner cases.
+ size_t size = kPartitionPageSize - kExtraAllocSize;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ // Should be freeable at this point.
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_NE(-1, page->empty_cache_index);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+
+ size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
+ kSystemPageSize) /
+ 2) -
+ kExtraAllocSize;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ memset(ptr, 'A', size);
+ ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr2);
+ void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr3);
+ void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr4);
+
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage* page2 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3));
+ EXPECT_NE(page, page2);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ // Should be freeable at this point.
+ EXPECT_NE(-1, page->empty_cache_index);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ EXPECT_EQ(0, page->num_unprovisioned_slots);
+ void* newPtr =
+ PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_EQ(ptr3, newPtr);
+ newPtr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_EQ(ptr2, newPtr);
+#if defined(OS_LINUX) && !DCHECK_IS_ON()
+ // On Linux, we have a guarantee that freelisting a page should cause its
+ // contents to be nulled out. We check for null here to detect an bug we
+ // had where a large slot size was causing us to not properly free all
+ // resources back to the system.
+ // We only run the check when asserts are disabled because when they are
+ // enabled, the allocated area is overwritten with an "uninitialized"
+ // byte pattern.
+ EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1)));
+#endif
+ PartitionFreeGeneric(generic_allocator.root(), newPtr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ PartitionFreeGeneric(generic_allocator.root(), ptr4);
+
+ // Can we allocate a massive (512MB) size?
+ // Allocate 512MB, but +1, to test for cookie writing alignment issues.
+ // Test this only if the device has enough memory or it might fail due
+ // to OOM.
+ if (IsLargeMemoryDevice()) {
+ ptr = PartitionAllocGeneric(generic_allocator.root(), 512 * 1024 * 1024 + 1,
+ type_name);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ }
+
+ // Check a more reasonable, but still direct mapped, size.
+ // Chop a system page and a byte off to test for rounding errors.
+ size = 20 * 1024 * 1024;
+ size -= kSystemPageSize;
+ size -= 1;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ char* charPtr = reinterpret_cast<char*>(ptr);
+ *(charPtr + (size - 1)) = 'A';
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Can we free null?
+ PartitionFreeGeneric(generic_allocator.root(), 0);
+
+ // Do we correctly get a null for a failed allocation?
+ EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(),
+ PartitionAllocReturnNull,
+ 3u * 1024 * 1024 * 1024, type_name));
+}
+
+// Test that we can fetch the real allocated size after an allocation.
+TEST(PartitionAllocTest, GenericAllocGetSize) {
+ TestSetup();
+
+ void* ptr;
+ size_t requestedSize, actualSize, predictedSize;
+
+ EXPECT_TRUE(PartitionAllocSupportsGetSize());
+
+ // Allocate something small.
+ requestedSize = 511 - kExtraAllocSize;
+ predictedSize =
+ PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ ptr =
+ PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ EXPECT_TRUE(ptr);
+ actualSize = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predictedSize, actualSize);
+ EXPECT_LT(requestedSize, actualSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Allocate a size that should be a perfect match for a bucket, because it
+ // is an exact power of 2.
+ requestedSize = (256 * 1024) - kExtraAllocSize;
+ predictedSize =
+ PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ ptr =
+ PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ EXPECT_TRUE(ptr);
+ actualSize = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predictedSize, actualSize);
+ EXPECT_EQ(requestedSize, actualSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Allocate a size that is a system page smaller than a bucket. GetSize()
+ // should return a larger size than we asked for now.
+ requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
+ predictedSize =
+ PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ ptr =
+ PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name);
+ EXPECT_TRUE(ptr);
+ actualSize = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predictedSize, actualSize);
+ EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
+ // Check that we can write at the end of the reported size too.
+ char* charPtr = reinterpret_cast<char*>(ptr);
+ *(charPtr + (actualSize - 1)) = 'A';
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Allocate something very large, and uneven.
+ if (IsLargeMemoryDevice()) {
+ requestedSize = 512 * 1024 * 1024 - 1;
+ predictedSize =
+ PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ ptr = PartitionAllocGeneric(generic_allocator.root(), requestedSize,
+ type_name);
+ EXPECT_TRUE(ptr);
+ actualSize = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predictedSize, actualSize);
+ EXPECT_LT(requestedSize, actualSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ }
+
+ // Too large allocation.
+ requestedSize = INT_MAX;
+ predictedSize =
+ PartitionAllocActualSize(generic_allocator.root(), requestedSize);
+ EXPECT_EQ(requestedSize, predictedSize);
+}
+
+// Test the realloc() contract.
+TEST(PartitionAllocTest, Realloc) {
+ TestSetup();
+
+ // realloc(0, size) should be equivalent to malloc().
+ void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0,
+ kTestAllocSize, type_name);
+ memset(ptr, 'A', kTestAllocSize);
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ // realloc(ptr, 0) should be equivalent to free().
+ void* ptr2 =
+ PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name);
+ EXPECT_EQ(0, ptr2);
+ EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
+
+ // Test that growing an allocation with realloc() copies everything from the
+ // old allocation.
+ size_t size = kSystemPageSize - kExtraAllocSize;
+ EXPECT_EQ(size, PartitionAllocActualSize(generic_allocator.root(), size));
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ memset(ptr, 'A', size);
+ ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, size + 1,
+ type_name);
+ EXPECT_NE(ptr, ptr2);
+ char* charPtr2 = static_cast<char*>(ptr2);
+ EXPECT_EQ('A', charPtr2[0]);
+ EXPECT_EQ('A', charPtr2[size - 1]);
+#if DCHECK_IS_ON()
+ EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr2[size]));
+#endif
+
+ // Test that shrinking an allocation with realloc() also copies everything
+ // from the old allocation.
+ ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, size - 1,
+ type_name);
+ EXPECT_NE(ptr2, ptr);
+ char* charPtr = static_cast<char*>(ptr);
+ EXPECT_EQ('A', charPtr[0]);
+ EXPECT_EQ('A', charPtr[size - 2]);
+#if DCHECK_IS_ON()
+ EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1]));
+#endif
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Test that shrinking a direct mapped allocation happens in-place.
+ size = kGenericMaxBucketed + 16 * kSystemPageSize;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ size_t actualSize = PartitionAllocGetSize(ptr);
+ ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr,
+ kGenericMaxBucketed + 8 * kSystemPageSize,
+ type_name);
+ EXPECT_EQ(ptr, ptr2);
+ EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+
+ // Test that a previously in-place shrunk direct mapped allocation can be
+ // expanded up again within its original size.
+ ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2,
+ size - kSystemPageSize, type_name);
+ EXPECT_EQ(ptr2, ptr);
+ EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr));
+
+ // Test that a direct mapped allocation is performed not in-place when the
+ // new size is small enough.
+ ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize,
+ type_name);
+ EXPECT_NE(ptr, ptr2);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+}
+
+// Tests the handing out of freelists for partial pages.
+TEST(PartitionAllocTest, PartialPageFreelists) {
+ TestSetup();
+
+ size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
+ EXPECT_EQ(kSystemPageSize - kAllocationGranularity,
+ big_size + kExtraAllocSize);
+ size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
+ PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+ EXPECT_EQ(0, bucket->empty_pages_head);
+
+ void* ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr);
+
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ size_t totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (big_size + kExtraAllocSize);
+ EXPECT_EQ(4u, totalSlots);
+ // The freelist should have one entry, because we were able to exactly fit
+ // one object slot and one freelist pointer (the null that the head points
+ // to) into a system page.
+ EXPECT_TRUE(page->freelist_head);
+ EXPECT_EQ(1, page->num_allocated_slots);
+ EXPECT_EQ(2, page->num_unprovisioned_slots);
+
+ void* ptr2 = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr2);
+ EXPECT_FALSE(page->freelist_head);
+ EXPECT_EQ(2, page->num_allocated_slots);
+ EXPECT_EQ(2, page->num_unprovisioned_slots);
+
+ void* ptr3 = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr3);
+ EXPECT_TRUE(page->freelist_head);
+ EXPECT_EQ(3, page->num_allocated_slots);
+ EXPECT_EQ(0, page->num_unprovisioned_slots);
+
+ void* ptr4 = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr4);
+ EXPECT_FALSE(page->freelist_head);
+ EXPECT_EQ(4, page->num_allocated_slots);
+ EXPECT_EQ(0, page->num_unprovisioned_slots);
+
+ void* ptr5 = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr5);
+
+ PartitionPage* page2 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr5));
+ EXPECT_EQ(1, page2->num_allocated_slots);
+
+ // Churn things a little whilst there's a partial page freelist.
+ PartitionFree(ptr);
+ ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ void* ptr6 = PartitionAlloc(allocator.root(), big_size, type_name);
+
+ PartitionFree(ptr);
+ PartitionFree(ptr2);
+ PartitionFree(ptr3);
+ PartitionFree(ptr4);
+ PartitionFree(ptr5);
+ PartitionFree(ptr6);
+ EXPECT_NE(-1, page->empty_cache_index);
+ EXPECT_NE(-1, page2->empty_cache_index);
+ EXPECT_TRUE(page2->freelist_head);
+ EXPECT_EQ(0, page2->num_allocated_slots);
+
+ // And test a couple of sizes that do not cross kSystemPageSize with a single
+ // allocation.
+ size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
+ bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift;
+ bucket = &allocator.root()->buckets()[bucket_index];
+ EXPECT_EQ(0, bucket->empty_pages_head);
+
+ ptr = PartitionAlloc(allocator.root(), mediumSize, type_name);
+ EXPECT_TRUE(ptr);
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+ totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (mediumSize + kExtraAllocSize);
+ size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize);
+ EXPECT_EQ(2u, firstPageSlots);
+ EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+ PartitionFree(ptr);
+
+ size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
+ bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift;
+ bucket = &allocator.root()->buckets()[bucket_index];
+ EXPECT_EQ(0, bucket->empty_pages_head);
+
+ ptr = PartitionAlloc(allocator.root(), smallSize, type_name);
+ EXPECT_TRUE(ptr);
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+ totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (smallSize + kExtraAllocSize);
+ firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize);
+ EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+ PartitionFree(ptr);
+ EXPECT_TRUE(page->freelist_head);
+ EXPECT_EQ(0, page->num_allocated_slots);
+
+ size_t verySmallSize = 32 - kExtraAllocSize;
+ bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift;
+ bucket = &allocator.root()->buckets()[bucket_index];
+ EXPECT_EQ(0, bucket->empty_pages_head);
+
+ ptr = PartitionAlloc(allocator.root(), verySmallSize, type_name);
+ EXPECT_TRUE(ptr);
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+ totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (verySmallSize + kExtraAllocSize);
+ firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
+ EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots);
+
+ PartitionFree(ptr);
+ EXPECT_TRUE(page->freelist_head);
+ EXPECT_EQ(0, page->num_allocated_slots);
+
+ // And try an allocation size (against the generic allocator) that is
+ // larger than a system page.
+ size_t pageAndAHalfSize =
+ (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), pageAndAHalfSize,
+ type_name);
+ EXPECT_TRUE(ptr);
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+ EXPECT_TRUE(page->freelist_head);
+ totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (pageAndAHalfSize + kExtraAllocSize);
+ EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // And then make sure than exactly the page size only faults one page.
+ size_t pageSize = kSystemPageSize - kExtraAllocSize;
+ ptr = PartitionAllocGeneric(generic_allocator.root(), pageSize, type_name);
+ EXPECT_TRUE(ptr);
+ page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+ EXPECT_FALSE(page->freelist_head);
+ totalSlots =
+ (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ (pageSize + kExtraAllocSize);
+ EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+}
+
+// Test some of the fragmentation-resistant properties of the allocator.
+TEST(PartitionAllocTest, PageRefilling) {
+ TestSetup();
+ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
+
+ // Grab two full pages and a non-full page.
+ PartitionPage* page1 = GetFullPage(kTestAllocSize);
+ PartitionPage* page2 = GetFullPage(kTestAllocSize);
+ void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ EXPECT_TRUE(ptr);
+ EXPECT_NE(page1, bucket->active_pages_head);
+ EXPECT_NE(page2, bucket->active_pages_head);
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(1, page->num_allocated_slots);
+
+ // Work out a pointer into page2 and free it; and then page1 and free it.
+ char* ptr2 =
+ reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset;
+ PartitionFree(ptr2);
+ ptr2 =
+ reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset;
+ PartitionFree(ptr2);
+
+ // If we perform two allocations from the same bucket now, we expect to
+ // refill both the nearly full pages.
+ (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ EXPECT_EQ(1, page->num_allocated_slots);
+
+ FreeFullPage(page2);
+ FreeFullPage(page1);
+ PartitionFree(ptr);
+}
+
+// Basic tests to ensure that allocations work for partial page buckets.
+TEST(PartitionAllocTest, PartialPages) {
+ TestSetup();
+
+ // Find a size that is backed by a partial partition page.
+ size_t size = sizeof(void*);
+ PartitionBucket* bucket = 0;
+ while (size < kTestMaxAllocation) {
+ bucket = &allocator.root()->buckets()[size >> kBucketShift];
+ if (bucket->num_system_pages_per_slot_span %
+ kNumSystemPagesPerPartitionPage)
+ break;
+ size += sizeof(void*);
+ }
+ EXPECT_LT(size, kTestMaxAllocation);
+
+ PartitionPage* page1 = GetFullPage(size);
+ PartitionPage* page2 = GetFullPage(size);
+ FreeFullPage(page2);
+ FreeFullPage(page1);
+}
+
+// Test correct handling if our mapping collides with another.
+TEST(PartitionAllocTest, MappingCollision) {
+ TestSetup();
+ // The -2 is because the first and last partition pages in a super page are
+ // guard pages.
+ size_t numPartitionPagesNeeded = kNumPartitionPagesPerSuperPage - 2;
+ std::unique_ptr<PartitionPage* []> firstSuperPagePages =
+ WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
+ std::unique_ptr<PartitionPage* []> secondSuperPagePages =
+ WrapArrayUnique(new PartitionPage*[numPartitionPagesNeeded]);
+
+ size_t i;
+ for (i = 0; i < numPartitionPagesNeeded; ++i)
+ firstSuperPagePages[i] = GetFullPage(kTestAllocSize);
+
+ char* pageBase =
+ reinterpret_cast<char*>(PartitionPageToPointer(firstSuperPagePages[0]));
+ EXPECT_EQ(kPartitionPageSize,
+ reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
+ pageBase -= kPartitionPageSize;
+ // Map a single system page either side of the mapping for our allocations,
+ // with the goal of tripping up alignment of the next mapping.
+ void* map1 = AllocPages(pageBase - kPageAllocationGranularity,
+ kPageAllocationGranularity,
+ kPageAllocationGranularity, PageInaccessible);
+ EXPECT_TRUE(map1);
+ void* map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity,
+ kPageAllocationGranularity, PageInaccessible);
+ EXPECT_TRUE(map2);
+
+ for (i = 0; i < numPartitionPagesNeeded; ++i)
+ secondSuperPagePages[i] = GetFullPage(kTestAllocSize);
+
+ FreePages(map1, kPageAllocationGranularity);
+ FreePages(map2, kPageAllocationGranularity);
+
+ pageBase =
+ reinterpret_cast<char*>(PartitionPageToPointer(secondSuperPagePages[0]));
+ EXPECT_EQ(kPartitionPageSize,
+ reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask);
+ pageBase -= kPartitionPageSize;
+ // Map a single system page either side of the mapping for our allocations,
+ // with the goal of tripping up alignment of the next mapping.
+ map1 = AllocPages(pageBase - kPageAllocationGranularity,
+ kPageAllocationGranularity, kPageAllocationGranularity,
+ PageAccessible);
+ EXPECT_TRUE(map1);
+ map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity,
+ kPageAllocationGranularity, PageAccessible);
+ EXPECT_TRUE(map2);
+ SetSystemPagesInaccessible(map1, kPageAllocationGranularity);
+ SetSystemPagesInaccessible(map2, kPageAllocationGranularity);
+
+ PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize);
+ FreePages(map1, kPageAllocationGranularity);
+ FreePages(map2, kPageAllocationGranularity);
+
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
+ PartitionPageToPointer(pageInThirdSuperPage)) &
+ kPartitionPageOffsetMask);
+
+ // And make sure we really did get a page in a new superpage.
+ EXPECT_NE(reinterpret_cast<uintptr_t>(
+ PartitionPageToPointer(firstSuperPagePages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(
+ PartitionPageToPointer(pageInThirdSuperPage)) &
+ kSuperPageBaseMask);
+ EXPECT_NE(reinterpret_cast<uintptr_t>(
+ PartitionPageToPointer(secondSuperPagePages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(
+ PartitionPageToPointer(pageInThirdSuperPage)) &
+ kSuperPageBaseMask);
+
+ FreeFullPage(pageInThirdSuperPage);
+ for (i = 0; i < numPartitionPagesNeeded; ++i) {
+ FreeFullPage(firstSuperPagePages[i]);
+ FreeFullPage(secondSuperPagePages[i]);
+ }
+}
+
+// Tests that pages in the free page cache do get freed as appropriate.
+TEST(PartitionAllocTest, FreeCache) {
+ TestSetup();
+
+ EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages);
+
+ size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
+ size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
+ PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index];
+
+ void* ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(ptr);
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(1, page->num_allocated_slots);
+ EXPECT_EQ(kPartitionPageSize,
+ allocator.root()->total_size_of_committed_pages);
+ PartitionFree(ptr);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ EXPECT_NE(-1, page->empty_cache_index);
+ EXPECT_TRUE(page->freelist_head);
+
+ CycleFreeCache(kTestAllocSize);
+
+ // Flushing the cache should have really freed the unused page.
+ EXPECT_FALSE(page->freelist_head);
+ EXPECT_EQ(-1, page->empty_cache_index);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ PartitionBucket* cycle_free_cache_bucket =
+ &allocator.root()->buckets()[kTestBucketIndex];
+ EXPECT_EQ(
+ cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize,
+ allocator.root()->total_size_of_committed_pages);
+
+ // Check that an allocation works ok whilst in this state (a free'd page
+ // as the active pages head).
+ ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_FALSE(bucket->empty_pages_head);
+ PartitionFree(ptr);
+
+ // Also check that a page that is bouncing immediately between empty and
+ // used does not get freed.
+ for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) {
+ ptr = PartitionAlloc(allocator.root(), big_size, type_name);
+ EXPECT_TRUE(page->freelist_head);
+ PartitionFree(ptr);
+ EXPECT_TRUE(page->freelist_head);
+ }
+ EXPECT_EQ(kPartitionPageSize,
+ allocator.root()->total_size_of_committed_pages);
+}
+
+// Tests for a bug we had with losing references to free pages.
+TEST(PartitionAllocTest, LostFreePagesBug) {
+ TestSetup();
+
+ size_t size = kPartitionPageSize - kExtraAllocSize;
+
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr2);
+
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr));
+ PartitionPage* page2 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr2));
+ PartitionBucket* bucket = page->bucket;
+
+ EXPECT_EQ(0, bucket->empty_pages_head);
+ EXPECT_EQ(-1, page->num_allocated_slots);
+ EXPECT_EQ(1, page2->num_allocated_slots);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head->next_page);
+ EXPECT_EQ(0, page->num_allocated_slots);
+ EXPECT_EQ(0, page2->num_allocated_slots);
+ EXPECT_TRUE(page->freelist_head);
+ EXPECT_TRUE(page2->freelist_head);
+
+ CycleGenericFreeCache(kTestAllocSize);
+
+ EXPECT_FALSE(page->freelist_head);
+ EXPECT_FALSE(page2->freelist_head);
+
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head->next_page);
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+
+ // At this moment, we have two decommitted pages, on the empty list.
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_TRUE(bucket->decommitted_pages_head);
+
+ CycleGenericFreeCache(kTestAllocSize);
+
+ // We're now set up to trigger a historical bug by scanning over the active
+ // pages list. The current code gets into a different state, but we'll keep
+ // the test as being an interesting corner case.
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ EXPECT_TRUE(bucket->active_pages_head);
+ EXPECT_TRUE(bucket->empty_pages_head);
+ EXPECT_TRUE(bucket->decommitted_pages_head);
+}
+
+#if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
+
+static void DoReturnNullTest(size_t allocSize) {
+ // TODO(crbug.com/678782): Where necessary and possible, disable the
+ // platform's OOM-killing behavior. OOM-killing makes this test flaky on
+ // low-memory devices.
+ if (!IsLargeMemoryDevice()) {
+ LOG(WARNING) << "Skipping test on this device because of crbug.com/678782";
+ return;
+ }
+
+ TestSetup();
+
+ EXPECT_TRUE(SetAddressSpaceLimit());
+
+ // Work out the number of allocations for 6 GB of memory.
+ const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024);
+
+ void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric(
+ generic_allocator.root(), numAllocations * sizeof(void*), type_name));
+ int i;
+
+ for (i = 0; i < numAllocations; ++i) {
+ ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+ PartitionAllocReturnNull, allocSize,
+ type_name);
+ if (!i)
+ EXPECT_TRUE(ptrs[0]);
+ if (!ptrs[i]) {
+ ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+ PartitionAllocReturnNull, allocSize,
+ type_name);
+ EXPECT_FALSE(ptrs[i]);
+ break;
+ }
+ }
+
+ // We shouldn't succeed in allocating all 6 GB of memory. If we do, then
+ // we're not actually testing anything here.
+ EXPECT_LT(i, numAllocations);
+
+ // Free, reallocate and free again each block we allocated. We do this to
+ // check that freeing memory also works correctly after a failed allocation.
+ for (--i; i >= 0; --i) {
+ PartitionFreeGeneric(generic_allocator.root(), ptrs[i]);
+ ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
+ PartitionAllocReturnNull, allocSize,
+ type_name);
+ EXPECT_TRUE(ptrs[i]);
+ PartitionFreeGeneric(generic_allocator.root(), ptrs[i]);
+ }
+
+ PartitionFreeGeneric(generic_allocator.root(), ptrs);
+
+ EXPECT_TRUE(ClearAddressSpaceLimit());
+}
+
+// Tests that if an allocation fails in "return null" mode, repeating it doesn't
+// crash, and still returns null. The test tries to allocate 6 GB of memory in
+// 512 kB blocks. On 64-bit POSIX systems, the address space is limited to 6 GB
+// using setrlimit() first.
+//
+// Disable this test on Android because, due to its allocation-heavy behavior,
+// it tends to get OOM-killed rather than pass.
+#if defined(OS_MACOSX) || defined(OS_ANDROID)
+#define MAYBE_RepeatedReturnNull DISABLED_RepeatedReturnNull
+#else
+#define MAYBE_RepeatedReturnNull RepeatedReturnNull
+#endif
+TEST(PartitionAllocTest, MAYBE_RepeatedReturnNull) {
+ // A single-slot but non-direct-mapped allocation size.
+ DoReturnNullTest(512 * 1024);
+}
+
+// Another "return null" test but for larger, direct-mapped allocations.
+//
+// Disable this test on Android because, due to its allocation-heavy behavior,
+// it tends to get OOM-killed rather than pass.
+#if defined(OS_MACOSX) || defined(OS_ANDROID)
+#define MAYBE_RepeatedReturnNullDirect DISABLED_RepeatedReturnNullDirect
+#else
+#define MAYBE_RepeatedReturnNullDirect RepeatedReturnNullDirect
+#endif
+TEST(PartitionAllocTest, MAYBE_RepeatedReturnNullDirect) {
+ // A direct-mapped allocation size.
+ DoReturnNullTest(32 * 1024 * 1024);
+}
+
+#endif // !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX)
+
+// Death tests misbehave on Android, http://crbug.com/643760.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+
+// Make sure that malloc(-1) dies.
+// In the past, we had an integer overflow that would alias malloc(-1) to
+// malloc(0), which is not good.
+TEST(PartitionAllocDeathTest, LargeAllocs) {
+ TestSetup();
+ // Largest alloc.
+ EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(),
+ static_cast<size_t>(-1), type_name),
+ "");
+ // And the smallest allocation we expect to die.
+ EXPECT_DEATH(
+ PartitionAllocGeneric(generic_allocator.root(),
+ static_cast<size_t>(INT_MAX) + 1, type_name),
+ "");
+}
+
+// Check that our immediate double-free detection works.
+TEST(PartitionAllocDeathTest, ImmediateDoubleFree) {
+ TestSetup();
+
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
+ type_name);
+ EXPECT_TRUE(ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), "");
+}
+
+// Check that our refcount-based double-free detection works.
+TEST(PartitionAllocDeathTest, RefcountDoubleFree) {
+ TestSetup();
+
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
+ type_name);
+ EXPECT_TRUE(ptr);
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize,
+ type_name);
+ EXPECT_TRUE(ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ // This is not an immediate double-free so our immediate detection won't
+ // fire. However, it does take the "refcount" of the partition page to -1,
+ // which is illegal and should be trapped.
+ EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), "");
+}
+
+// Check that guard pages are present where expected.
+TEST(PartitionAllocDeathTest, GuardPages) {
+ TestSetup();
+
+// PartitionAlloc adds kPartitionPageSize to the requested size
+// (for metadata), and then rounds that size to kPageAllocationGranularity.
+// To be able to reliably write one past a direct allocation, choose a size
+// that's
+// a) larger than kGenericMaxBucketed (to make the allocation direct)
+// b) aligned at kPageAllocationGranularity boundaries after
+// kPartitionPageSize has been added to it.
+// (On 32-bit, PartitionAlloc adds another kSystemPageSize to the
+// allocation size before rounding, but there it marks the memory right
+// after size as inaccessible, so it's fine to write 1 past the size we
+// hand to PartitionAlloc and we don't need to worry about allocation
+// granularities.)
+#define ALIGN(N, A) (((N) + (A)-1) / (A) * (A))
+ const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize,
+ kPageAllocationGranularity) -
+ kPartitionPageSize;
+#undef ALIGN
+ static_assert(kSize > kGenericMaxBucketed,
+ "allocation not large enough for direct allocation");
+ size_t size = kSize - kExtraAllocSize;
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+
+ EXPECT_TRUE(ptr);
+ char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset;
+
+ EXPECT_DEATH(*(charPtr - 1) = 'A', "");
+ EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', "");
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+}
+
+// Check that a bad free() is caught where the free() refers to an unused
+// partition page of a large allocation.
+TEST(PartitionAllocDeathTest, FreeWrongPartitionPage) {
+ TestSetup();
+
+ // This large size will result in a direct mapped allocation with guard
+ // pages at either end.
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(),
+ kPartitionPageSize * 2, type_name);
+ EXPECT_TRUE(ptr);
+ char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
+
+ EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), "");
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+}
+
+#endif // !defined(OS_ANDROID) && !defined(OS_IOS)
+
+// Tests that PartitionDumpStatsGeneric and PartitionDumpStats runs without
+// crashing and returns non zero values when memory is allocated.
+TEST(PartitionAllocTest, DumpMemoryStats) {
+ TestSetup();
+ {
+ void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name);
+ MockPartitionStatsDumper mockStatsDumper;
+ PartitionDumpStats(allocator.root(), "mock_allocator",
+ false /* detailed dump */, &mockStatsDumper);
+ EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded());
+
+ PartitionFree(ptr);
+ }
+
+ // This series of tests checks the active -> empty -> decommitted states.
+ {
+ void* genericPtr = PartitionAllocGeneric(generic_allocator.root(),
+ 2048 - kExtraAllocSize, type_name);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(2048u, stats->bucket_slot_size);
+ EXPECT_EQ(2048u, stats->active_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(0u, stats->num_full_pages);
+ EXPECT_EQ(1u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ PartitionFreeGeneric(generic_allocator.root(), genericPtr);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(2048u, stats->bucket_slot_size);
+ EXPECT_EQ(0u, stats->active_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(0u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(1u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ CycleGenericFreeCache(kTestAllocSize);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(2048u, stats->bucket_slot_size);
+ EXPECT_EQ(0u, stats->active_bytes);
+ EXPECT_EQ(0u, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(0u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(1u, stats->num_decommitted_pages);
+ }
+ }
+
+ // This test checks for correct empty page list accounting.
+ {
+ size_t size = kPartitionPageSize - kExtraAllocSize;
+ void* ptr1 =
+ PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr2 =
+ PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+
+ CycleGenericFreeCache(kTestAllocSize);
+
+ ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(kPartitionPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(kPartitionPageSize, stats->bucket_slot_size);
+ EXPECT_EQ(kPartitionPageSize, stats->active_bytes);
+ EXPECT_EQ(kPartitionPageSize, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(1u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(1u, stats->num_decommitted_pages);
+ }
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ }
+
+ // This test checks for correct direct mapped accounting.
+ {
+ size_t size_smaller = kGenericMaxBucketed + 1;
+ size_t size_bigger = (kGenericMaxBucketed * 2) + 1;
+ size_t real_size_smaller =
+ (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
+ size_t real_size_bigger =
+ (size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
+ void* ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller,
+ type_name);
+ void* ptr2 =
+ PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(real_size_smaller);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_TRUE(stats->is_direct_map);
+ EXPECT_EQ(real_size_smaller, stats->bucket_slot_size);
+ EXPECT_EQ(real_size_smaller, stats->active_bytes);
+ EXPECT_EQ(real_size_smaller, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(1u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+
+ stats = dumper.GetBucketStats(real_size_bigger);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_TRUE(stats->is_direct_map);
+ EXPECT_EQ(real_size_bigger, stats->bucket_slot_size);
+ EXPECT_EQ(real_size_bigger, stats->active_bytes);
+ EXPECT_EQ(real_size_bigger, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(1u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ // Whilst we're here, allocate again and free with different ordering
+ // to give a workout to our linked list code.
+ ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller,
+ type_name);
+ ptr2 =
+ PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name);
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ }
+
+ // This test checks large-but-not-quite-direct allocations.
+ {
+ void* ptr =
+ PartitionAllocGeneric(generic_allocator.root(), 65536 + 1, type_name);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(slot_size);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_FALSE(stats->is_direct_map);
+ EXPECT_EQ(slot_size, stats->bucket_slot_size);
+ EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->active_bytes);
+ EXPECT_EQ(slot_size, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(1u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+ size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(slot_size);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_FALSE(stats->is_direct_map);
+ EXPECT_EQ(slot_size, stats->bucket_slot_size);
+ EXPECT_EQ(0u, stats->active_bytes);
+ EXPECT_EQ(slot_size, stats->resident_bytes);
+ EXPECT_EQ(slot_size, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(1u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(),
+ 65536 + kSystemPageSize + 1, type_name);
+ EXPECT_EQ(ptr, ptr2);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(slot_size);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_FALSE(stats->is_direct_map);
+ EXPECT_EQ(slot_size, stats->bucket_slot_size);
+ EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize,
+ stats->active_bytes);
+ EXPECT_EQ(slot_size, stats->resident_bytes);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->discardable_bytes);
+ EXPECT_EQ(1u, stats->num_full_pages);
+ EXPECT_EQ(0u, stats->num_active_pages);
+ EXPECT_EQ(0u, stats->num_empty_pages);
+ EXPECT_EQ(0u, stats->num_decommitted_pages);
+ }
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ }
+}
+
+// Tests the API to purge freeable memory.
+TEST(PartitionAllocTest, Purge) {
+ TestSetup();
+
+ char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), 2048 - kExtraAllocSize, type_name));
+ PartitionFreeGeneric(generic_allocator.root(), ptr);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
+ }
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDecommitEmptyPages);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(0u, stats->resident_bytes);
+ }
+ // Calling purge again here is a good way of testing we didn't mess up the
+ // state of the free cache ring.
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDecommitEmptyPages);
+
+ char* bigPtr = reinterpret_cast<char*>(
+ PartitionAllocGeneric(generic_allocator.root(), 256 * 1024, type_name));
+ PartitionFreeGeneric(generic_allocator.root(), bigPtr);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDecommitEmptyPages);
+
+ CheckPageInCore(ptr - kPointerOffset, false);
+ CheckPageInCore(bigPtr - kPointerOffset, false);
+}
+
+// Tests that we prefer to allocate into a non-empty partition page over an
+// empty one. This is an important aspect of minimizing memory usage for some
+// allocation sizes, particularly larger ones.
+TEST(PartitionAllocTest, PreferActiveOverEmpty) {
+ TestSetup();
+
+ size_t size = (kSystemPageSize * 2) - kExtraAllocSize;
+ // Allocate 3 full slot spans worth of 8192-byte allocations.
+ // Each slot span for this size is 16384 bytes, or 1 partition page and 2
+ // slots.
+ void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+
+ PartitionPage* page1 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ PartitionPage* page2 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3));
+ PartitionPage* page3 =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr6));
+ EXPECT_NE(page1, page2);
+ EXPECT_NE(page2, page3);
+ PartitionBucket* bucket = page1->bucket;
+ EXPECT_EQ(page3, bucket->active_pages_head);
+
+ // Free up the 2nd slot in each slot span.
+ // This leaves the active list containing 3 pages, each with 1 used and 1
+ // free slot. The active page will be the one containing ptr1.
+ PartitionFreeGeneric(generic_allocator.root(), ptr6);
+ PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+
+ // Empty the middle page in the active list.
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ EXPECT_EQ(page1, bucket->active_pages_head);
+
+ // Empty the the first page in the active list -- also the current page.
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+
+ // A good choice here is to re-fill the third page since the first two are
+ // empty. We used to fail that.
+ void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name);
+ EXPECT_EQ(ptr6, ptr7);
+ EXPECT_EQ(page3, bucket->active_pages_head);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr5);
+ PartitionFreeGeneric(generic_allocator.root(), ptr7);
+}
+
+// Tests the API to purge discardable memory.
+TEST(PartitionAllocTest, PurgeDiscardable) {
+ TestSetup();
+
+ // Free the second of two 4096 byte allocations and then purge.
+ {
+ void* ptr1 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr2 = reinterpret_cast<char*>(
+ PartitionAllocGeneric(generic_allocator.root(),
+ kSystemPageSize - kExtraAllocSize, type_name));
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ EXPECT_EQ(2u, page->num_unprovisioned_slots);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr2 - kPointerOffset, true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr2 - kPointerOffset, false);
+ EXPECT_EQ(3u, page->num_unprovisioned_slots);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ }
+ // Free the first of two 4096 byte allocations and then purge.
+ {
+ char* ptr1 = reinterpret_cast<char*>(
+ PartitionAllocGeneric(generic_allocator.root(),
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr1 - kPointerOffset, false);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ }
+ {
+ char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), 9216 - kExtraAllocSize, type_name));
+ void* ptr2 = PartitionAllocGeneric(generic_allocator.root(),
+ 9216 - kExtraAllocSize, type_name);
+ void* ptr3 = PartitionAllocGeneric(generic_allocator.root(),
+ 9216 - kExtraAllocSize, type_name);
+ void* ptr4 = PartitionAllocGeneric(generic_allocator.root(),
+ 9216 - kExtraAllocSize, type_name);
+ memset(ptr1, 'A', 9216 - kExtraAllocSize);
+ memset(ptr2, 'A', 9216 - kExtraAllocSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(9216u * 2, stats->active_bytes);
+ EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ }
+ {
+ char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (64 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (61 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(64 * kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(3 * kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(61 * kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(64 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ }
+ // This sub-test tests truncation of the provisioned slots in a trickier
+ // case where the freelist is rewritten.
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDecommitEmptyPages);
+ {
+ char* ptr1 = reinterpret_cast<char*>(
+ PartitionAllocGeneric(generic_allocator.root(),
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ ptr1[0] = 'A';
+ ptr1[kSystemPageSize] = 'A';
+ ptr1[kSystemPageSize * 2] = 'A';
+ ptr1[kSystemPageSize * 3] = 'A';
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ EXPECT_EQ(0u, page->num_unprovisioned_slots);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ EXPECT_EQ(1u, page->num_unprovisioned_slots);
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+
+ // Let's check we didn't brick the freelist.
+ void* ptr1b = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ EXPECT_EQ(ptr1, ptr1b);
+ void* ptr2b = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ EXPECT_EQ(ptr2, ptr2b);
+ EXPECT_FALSE(page->freelist_head);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ }
+ // This sub-test is similar, but tests a double-truncation.
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDecommitEmptyPages);
+ {
+ char* ptr1 = reinterpret_cast<char*>(
+ PartitionAllocGeneric(generic_allocator.root(),
+ kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 = PartitionAllocGeneric(
+ generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name);
+ ptr1[0] = 'A';
+ ptr1[kSystemPageSize] = 'A';
+ ptr1[kSystemPageSize * 2] = 'A';
+ ptr1[kSystemPageSize * 3] = 'A';
+ PartitionPage* page =
+ PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1));
+ PartitionFreeGeneric(generic_allocator.root(), ptr4);
+ PartitionFreeGeneric(generic_allocator.root(), ptr3);
+ EXPECT_EQ(0u, page->num_unprovisioned_slots);
+
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(2 * kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ EXPECT_EQ(2u, page->num_unprovisioned_slots);
+ CheckPageInCore(ptr1 - kPointerOffset, true);
+ CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
+
+ EXPECT_FALSE(page->freelist_head);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ PartitionFreeGeneric(generic_allocator.root(), ptr2);
+ }
+}
+
+} // namespace base
+
+#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/at_exit.cc b/chromium/base/at_exit.cc
index cfe4cf9a587..5dcc83cb2f5 100644
--- a/chromium/base/at_exit.cc
+++ b/chromium/base/at_exit.cc
@@ -22,6 +22,8 @@ namespace base {
// this for thread-safe access, since it will only be modified in testing.
static AtExitManager* g_top_manager = NULL;
+static bool g_disable_managers = false;
+
AtExitManager::AtExitManager()
: processing_callbacks_(false), next_manager_(g_top_manager) {
// If multiple modules instantiate AtExitManagers they'll end up living in this
@@ -39,7 +41,8 @@ AtExitManager::~AtExitManager() {
}
DCHECK_EQ(this, g_top_manager);
- ProcessCallbacksNow();
+ if (!g_disable_managers)
+ ProcessCallbacksNow();
g_top_manager = next_manager_;
}
@@ -88,6 +91,11 @@ void AtExitManager::ProcessCallbacksNow() {
DCHECK(g_top_manager->stack_.empty());
}
+void AtExitManager::DisableAllAtExitManagers() {
+ AutoLock lock(g_top_manager->lock_);
+ g_disable_managers = true;
+}
+
AtExitManager::AtExitManager(bool shadow)
: processing_callbacks_(false), next_manager_(g_top_manager) {
DCHECK(shadow || !g_top_manager);
diff --git a/chromium/base/at_exit.h b/chromium/base/at_exit.h
index 02e18ed9eb7..6bf3f50350e 100644
--- a/chromium/base/at_exit.h
+++ b/chromium/base/at_exit.h
@@ -49,6 +49,10 @@ class BASE_EXPORT AtExitManager {
// is possible to register new callbacks after calling this function.
static void ProcessCallbacksNow();
+ // Disable all registered at-exit callbacks. This is used only in a single-
+ // process mode.
+ static void DisableAllAtExitManagers();
+
protected:
// This constructor will allow this instance of AtExitManager to be created
// even if one already exists. This should only be used for testing!
diff --git a/chromium/base/base_paths_posix.cc b/chromium/base/base_paths_posix.cc
index a60e1121c95..baba3cd83f3 100644
--- a/chromium/base/base_paths_posix.cc
+++ b/chromium/base/base_paths_posix.cc
@@ -36,8 +36,8 @@ namespace base {
bool PathProviderPosix(int key, FilePath* result) {
FilePath path;
switch (key) {
- case base::FILE_EXE:
- case base::FILE_MODULE: { // TODO(evanm): is this correct?
+ case FILE_EXE:
+ case FILE_MODULE: { // TODO(evanm): is this correct?
#if defined(OS_LINUX)
FilePath bin_dir;
if (!ReadSymbolicLink(FilePath(kProcSelfExe), &bin_dir)) {
@@ -77,24 +77,23 @@ bool PathProviderPosix(int key, FilePath* result) {
return true;
#endif
}
- case base::DIR_SOURCE_ROOT: {
+ case DIR_SOURCE_ROOT: {
// Allow passing this in the environment, for more flexibility in build
// tree configurations (sub-project builds, gyp --output_dir, etc.)
- std::unique_ptr<base::Environment> env(base::Environment::Create());
+ std::unique_ptr<Environment> env(Environment::Create());
std::string cr_source_root;
if (env->GetVar("CR_SOURCE_ROOT", &cr_source_root)) {
path = FilePath(cr_source_root);
- if (base::PathExists(path)) {
+ if (PathExists(path)) {
*result = path;
return true;
- } else {
- DLOG(WARNING) << "CR_SOURCE_ROOT is set, but it appears to not "
- << "point to a directory.";
}
+ DLOG(WARNING) << "CR_SOURCE_ROOT is set, but it appears to not "
+ << "point to a directory.";
}
// On POSIX, unit tests execute two levels deep from the source root.
// For example: out/{Debug|Release}/net_unittest
- if (PathService::Get(base::DIR_EXE, &path)) {
+ if (PathService::Get(DIR_EXE, &path)) {
*result = path.DirName().DirName();
return true;
}
@@ -103,13 +102,13 @@ bool PathProviderPosix(int key, FilePath* result) {
<< "Try running from your chromium/src directory.";
return false;
}
- case base::DIR_USER_DESKTOP:
- *result = base::nix::GetXDGUserDirectory("DESKTOP", "Desktop");
+ case DIR_USER_DESKTOP:
+ *result = nix::GetXDGUserDirectory("DESKTOP", "Desktop");
return true;
- case base::DIR_CACHE: {
- std::unique_ptr<base::Environment> env(base::Environment::Create());
- FilePath cache_dir(base::nix::GetXDGDirectory(env.get(), "XDG_CACHE_HOME",
- ".cache"));
+ case DIR_CACHE: {
+ std::unique_ptr<Environment> env(Environment::Create());
+ FilePath cache_dir(
+ nix::GetXDGDirectory(env.get(), "XDG_CACHE_HOME", ".cache"));
*result = cache_dir;
return true;
}
diff --git a/chromium/base/bind_helpers.h b/chromium/base/bind_helpers.h
index c7c7be8ee84..7b3d7d34740 100644
--- a/chromium/base/bind_helpers.h
+++ b/chromium/base/bind_helpers.h
@@ -21,7 +21,7 @@
// Owned() transfers ownership of an object to the Callback resulting from
// bind; the object will be deleted when the Callback is deleted.
//
-// Passed() is for transferring movable-but-not-copyable types (eg. scoped_ptr)
+// Passed() is for transferring movable-but-not-copyable types (eg. unique_ptr)
// through a Callback. Logically, this signifies a destructive transfer of
// the state of the argument into the target function. Invoking
// Callback::Run() twice on a Callback that was created with a Passed()
@@ -179,6 +179,9 @@ struct BindUnwrapTraits;
namespace internal {
+template <typename Functor, typename SFINAE = void>
+struct FunctorTraits;
+
template <typename T>
class UnretainedWrapper {
public:
@@ -521,6 +524,48 @@ struct BindUnwrapTraits<internal::PassedWrapper<T>> {
}
};
+// CallbackCancellationTraits allows customization of Callback's cancellation
+// semantics. By default, callbacks are not cancellable. A specialization should
+// set is_cancellable = true and implement an IsCancelled() that returns if the
+// callback should be cancelled.
+template <typename Functor, typename BoundArgsTuple, typename SFINAE = void>
+struct CallbackCancellationTraits {
+ static constexpr bool is_cancellable = false;
+};
+
+// Specialization for method bound to weak pointer receiver.
+template <typename Functor, typename... BoundArgs>
+struct CallbackCancellationTraits<
+ Functor,
+ std::tuple<BoundArgs...>,
+ typename std::enable_if<
+ internal::IsWeakMethod<internal::FunctorTraits<Functor>::is_method,
+ BoundArgs...>::value>::type> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Receiver, typename... Args>
+ static bool IsCancelled(const Functor&,
+ const Receiver& receiver,
+ const Args&...) {
+ return !receiver;
+ }
+};
+
+// Specialization for a nested bind.
+template <typename Signature,
+ typename... BoundArgs,
+ internal::CopyMode copy_mode,
+ internal::RepeatMode repeat_mode>
+struct CallbackCancellationTraits<Callback<Signature, copy_mode, repeat_mode>,
+ std::tuple<BoundArgs...>> {
+ static constexpr bool is_cancellable = true;
+
+ template <typename Functor>
+ static bool IsCancelled(const Functor& functor, const BoundArgs&...) {
+ return functor.IsCancelled();
+ }
+};
+
} // namespace base
#endif // BASE_BIND_HELPERS_H_
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index 88e764547f8..8988bdca226 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -130,7 +130,7 @@ struct ForceVoidReturn<R(Args...)> {
// FunctorTraits<>
//
// See description at top of file.
-template <typename Functor, typename SFINAE = void>
+template <typename Functor, typename SFINAE>
struct FunctorTraits;
// For a callable type that is convertible to the corresponding function type.
@@ -387,43 +387,24 @@ IsNull(const Functor&) {
return false;
}
-template <typename Functor, typename... BoundArgs>
-struct BindState;
-
-template <typename BindStateType, typename SFINAE = void>
-struct CancellationChecker {
- static constexpr bool is_cancellable = false;
- static bool Run(const BindStateBase*) {
- return false;
- }
-};
-
-template <typename Functor, typename... BoundArgs>
-struct CancellationChecker<
- BindState<Functor, BoundArgs...>,
- typename std::enable_if<IsWeakMethod<FunctorTraits<Functor>::is_method,
- BoundArgs...>::value>::type> {
- static constexpr bool is_cancellable = true;
- static bool Run(const BindStateBase* base) {
- using BindStateType = BindState<Functor, BoundArgs...>;
- const BindStateType* bind_state = static_cast<const BindStateType*>(base);
- return !base::get<0>(bind_state->bound_args_);
- }
-};
+// Used by ApplyCancellationTraits below.
+template <typename Functor, typename BoundArgsTuple, size_t... indices>
+bool ApplyCancellationTraitsImpl(const Functor& functor,
+ const BoundArgsTuple& bound_args,
+ IndexSequence<indices...>) {
+ return CallbackCancellationTraits<Functor, BoundArgsTuple>::IsCancelled(
+ functor, base::get<indices>(bound_args)...);
+}
-template <typename Signature,
- typename... BoundArgs,
- CopyMode copy_mode,
- RepeatMode repeat_mode>
-struct CancellationChecker<
- BindState<Callback<Signature, copy_mode, repeat_mode>, BoundArgs...>> {
- static constexpr bool is_cancellable = true;
- static bool Run(const BindStateBase* base) {
- using Functor = Callback<Signature, copy_mode, repeat_mode>;
- using BindStateType = BindState<Functor, BoundArgs...>;
- const BindStateType* bind_state = static_cast<const BindStateType*>(base);
- return bind_state->functor_.IsCancelled();
- }
+// Relays |base| to corresponding CallbackCancellationTraits<>::Run(). Returns
+// true if the callback |base| represents is canceled.
+template <typename BindStateType>
+bool ApplyCancellationTraits(const BindStateBase* base) {
+ const BindStateType* storage = static_cast<const BindStateType*>(base);
+ static constexpr size_t num_bound_args =
+ std::tuple_size<decltype(storage->bound_args_)>::value;
+ return ApplyCancellationTraitsImpl(storage->functor_, storage->bound_args_,
+ MakeIndexSequence<num_bound_args>());
};
// Template helpers to detect using Bind() on a base::Callback without any
@@ -449,14 +430,17 @@ struct BindingCallbackWithNoArgs<Callback<Signature, copy_mode, repeat_mode>,
template <typename Functor, typename... BoundArgs>
struct BindState final : BindStateBase {
using IsCancellable = std::integral_constant<
- bool, CancellationChecker<BindState>::is_cancellable>;
+ bool,
+ CallbackCancellationTraits<Functor,
+ std::tuple<BoundArgs...>>::is_cancellable>;
template <typename ForwardFunctor, typename... ForwardBoundArgs>
explicit BindState(BindStateBase::InvokeFuncStorage invoke_func,
ForwardFunctor&& functor,
ForwardBoundArgs&&... bound_args)
- // IsCancellable is std::false_type if the CancellationChecker<>::Run
- // returns always false. Otherwise, it's std::true_type.
+ // IsCancellable is std::false_type if
+ // CallbackCancellationTraits<>::IsCancelled returns always false.
+ // Otherwise, it's std::true_type.
: BindState(IsCancellable{},
invoke_func,
std::forward<ForwardFunctor>(functor),
@@ -476,8 +460,9 @@ struct BindState final : BindStateBase {
BindStateBase::InvokeFuncStorage invoke_func,
ForwardFunctor&& functor,
ForwardBoundArgs&&... bound_args)
- : BindStateBase(invoke_func, &Destroy,
- &CancellationChecker<BindState>::Run),
+ : BindStateBase(invoke_func,
+ &Destroy,
+ &ApplyCancellationTraits<BindState>),
functor_(std::forward<ForwardFunctor>(functor)),
bound_args_(std::forward<ForwardBoundArgs>(bound_args)...) {
DCHECK(!IsNull(functor_));
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index c6b53d55f03..6e01f7f3fdb 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -1241,7 +1241,7 @@ TEST_F(BindTest, CapturelessLambda) {
EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
int i = 0;
- auto g = [i]() {};
+ auto g = [i]() { (void)i; };
EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
auto h = [](int, double) { return 'k'; };
diff --git a/chromium/base/bind_unittest.nc b/chromium/base/bind_unittest.nc
index bba2e025f2c..e6b25348087 100644
--- a/chromium/base/bind_unittest.nc
+++ b/chromium/base/bind_unittest.nc
@@ -203,7 +203,7 @@ void WontCompile() {
Closure callback_mismatches_bind_type = Bind(&VoidPolymorphic1<int>);
}
-#elif defined(NCTEST_DISALLOW_CAPTURING_LAMBDA) // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<\(lambda at ../../base/bind_unittest.nc:[0-9]+:[0-9]+\), void>'"]
+#elif defined(NCTEST_DISALLOW_CAPTURING_LAMBDA) // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<\(lambda at (\.\./)+base/bind_unittest.nc:[0-9]+:[0-9]+\), void>'"]
void WontCompile() {
int i = 0;
@@ -224,6 +224,27 @@ void WontCompile() {
Closure cb2 = Bind(cb);
}
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_LVALUE) // [r"static_assert failed \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+ OnceClosure cb = Bind([] {});
+ cb.Run();
+}
+
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_LVALUE) // [r"static_assert failed \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+ const OnceClosure cb = Bind([] {});
+ cb.Run();
+}
+
+#elif defined(NCTEST_DISALLOW_ONCECALLBACK_RUN_ON_CONST_RVALUE) // [r"static_assert failed \"OnceCallback::Run\(\) may only be invoked on a non-const rvalue, i\.e\. std::move\(callback\)\.Run\(\)\.\""]
+
+void WontCompile() {
+ const OnceClosure cb = Bind([] {});
+ std::move(cb).Run();
+}
+
#endif
} // namespace base
diff --git a/chromium/base/bit_cast.h b/chromium/base/bit_cast.h
index c9514bceef4..90dd925e86c 100644
--- a/chromium/base/bit_cast.h
+++ b/chromium/base/bit_cast.h
@@ -9,6 +9,7 @@
#include <type_traits>
#include "base/compiler_specific.h"
+#include "base/template_util.h"
#include "build/build_config.h"
// bit_cast<Dest,Source> is a template function that implements the equivalent
@@ -63,34 +64,10 @@ template <class Dest, class Source>
inline Dest bit_cast(const Source& source) {
static_assert(sizeof(Dest) == sizeof(Source),
"bit_cast requires source and destination to be the same size");
-
-#if (__GNUC__ > 5 || (__GNUC__ == 5 && __GNUC_MINOR__ >= 1) || \
- (defined(__clang__) && defined(_LIBCPP_VERSION)))
- // GCC 5.1 contains the first libstdc++ with is_trivially_copyable.
- // Assume libc++ Just Works: is_trivially_copyable added on May 13th 2011.
- // However, with libc++ when GCC is the compiler the trait is buggy, see
- // crbug.com/607158, so fall back to the less strict variant for non-clang.
- static_assert(std::is_trivially_copyable<Dest>::value,
- "non-trivially-copyable bit_cast is undefined");
- static_assert(std::is_trivially_copyable<Source>::value,
- "non-trivially-copyable bit_cast is undefined");
-#elif HAS_FEATURE(is_trivially_copyable)
- // The compiler supports an equivalent intrinsic.
- static_assert(__is_trivially_copyable(Dest),
- "non-trivially-copyable bit_cast is undefined");
- static_assert(__is_trivially_copyable(Source),
- "non-trivially-copyable bit_cast is undefined");
-#elif COMPILER_GCC
- // Fallback to compiler intrinsic on GCC and clang (which pretends to be
- // GCC). This isn't quite the same as is_trivially_copyable but it'll do for
- // our purpose.
- static_assert(__has_trivial_copy(Dest),
- "non-trivially-copyable bit_cast is undefined");
- static_assert(__has_trivial_copy(Source),
- "non-trivially-copyable bit_cast is undefined");
-#else
- // Do nothing, let the bots handle it.
-#endif
+ static_assert(base::is_trivially_copyable<Dest>::value,
+ "bit_cast requires the destination type to be copyable");
+ static_assert(base::is_trivially_copyable<Source>::value,
+ "bit_cast requires the source type to be copyable");
Dest dest;
memcpy(&dest, &source, sizeof(dest));
diff --git a/chromium/base/bit_cast_unittest.cc b/chromium/base/bit_cast_unittest.cc
index 757b0c65feb..f36d3fe64c4 100644
--- a/chromium/base/bit_cast_unittest.cc
+++ b/chromium/base/bit_cast_unittest.cc
@@ -27,6 +27,5 @@ TEST(BitCastTest, StructureInt) {
EXPECT_EQ(1, b);
}
-
} // namespace
} // namespace base
diff --git a/chromium/base/bits.h b/chromium/base/bits.h
index a3a59d1dfad..d101cb731a7 100644
--- a/chromium/base/bits.h
+++ b/chromium/base/bits.h
@@ -1,4 +1,4 @@
-// Copyright (c) 2009 The Chromium Authors. All rights reserved.
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
@@ -10,8 +10,13 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/compiler_specific.h"
#include "base/logging.h"
+#if defined(COMPILER_MSVC)
+#include <intrin.h>
+#endif
+
namespace base {
namespace bits {
@@ -49,6 +54,58 @@ inline size_t Align(size_t size, size_t alignment) {
return (size + alignment - 1) & ~(alignment - 1);
}
+// These functions count the number of leading zeros in a binary value, starting
+// with the most significant bit. C does not have an operator to do this, but
+// fortunately the various compilers have built-ins that map to fast underlying
+// processor instructions.
+#if defined(COMPILER_MSVC)
+
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ unsigned long index;
+ return LIKELY(_BitScanReverse(&index, x)) ? (31 - index) : 32;
+}
+
+#if defined(ARCH_CPU_64_BITS)
+
+// MSVC only supplies _BitScanForward64 when building for a 64-bit target.
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ unsigned long index;
+ return LIKELY(_BitScanReverse64(&index, x)) ? (63 - index) : 64;
+}
+
+#endif
+
+#elif defined(COMPILER_GCC)
+
+// This is very annoying. __builtin_clz has undefined behaviour for an input of
+// 0, even though there's clearly a return value that makes sense, and even
+// though some processor clz instructions have defined behaviour for 0. We could
+// drop to raw __asm__ to do better, but we'll avoid doing that unless we see
+// proof that we need to.
+ALWAYS_INLINE uint32_t CountLeadingZeroBits32(uint32_t x) {
+ return LIKELY(x) ? __builtin_clz(x) : 32;
+}
+
+ALWAYS_INLINE uint64_t CountLeadingZeroBits64(uint64_t x) {
+ return LIKELY(x) ? __builtin_clzll(x) : 64;
+}
+
+#endif
+
+#if defined(ARCH_CPU_64_BITS)
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+ return CountLeadingZeroBits64(x);
+}
+
+#else
+
+ALWAYS_INLINE size_t CountLeadingZeroBitsSizeT(size_t x) {
+ return CountLeadingZeroBits32(x);
+}
+
+#endif
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/bits_unittest.cc b/chromium/base/bits_unittest.cc
index 4f5b6ea49e5..270b8ef7d3b 100644
--- a/chromium/base/bits_unittest.cc
+++ b/chromium/base/bits_unittest.cc
@@ -61,5 +61,25 @@ TEST(BitsTest, Align) {
EXPECT_EQ(kSizeTMax / 2 + 1, Align(1, kSizeTMax / 2 + 1));
}
+TEST(BitsTest, CLZWorks) {
+ EXPECT_EQ(32u, CountLeadingZeroBits32(0u));
+ EXPECT_EQ(31u, CountLeadingZeroBits32(1u));
+ EXPECT_EQ(1u, CountLeadingZeroBits32(1u << 30));
+ EXPECT_EQ(0u, CountLeadingZeroBits32(1u << 31));
+
+#if defined(ARCH_CPU_64_BITS)
+ EXPECT_EQ(64u, CountLeadingZeroBitsSizeT(0ull));
+ EXPECT_EQ(63u, CountLeadingZeroBitsSizeT(1ull));
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(1ull << 31));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1ull << 62));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1ull << 63));
+#else
+ EXPECT_EQ(32u, CountLeadingZeroBitsSizeT(0u));
+ EXPECT_EQ(31u, CountLeadingZeroBitsSizeT(1u));
+ EXPECT_EQ(1u, CountLeadingZeroBitsSizeT(1u << 30));
+ EXPECT_EQ(0u, CountLeadingZeroBitsSizeT(1u << 31));
+#endif
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index c6b8ca3c448..9bb0c0f6679 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -21,6 +21,12 @@ namespace base {
namespace internal {
+template <typename CallbackType>
+struct IsOnceCallback : std::false_type {};
+
+template <typename Signature>
+struct IsOnceCallback<OnceCallback<Signature>> : std::true_type {};
+
// RunMixin provides different variants of `Run()` function to `Callback<>`
// based on the type of callback.
template <typename CallbackType>
@@ -28,14 +34,29 @@ class RunMixin;
// Specialization for OnceCallback.
template <typename R, typename... Args>
-class RunMixin<Callback<R(Args...), CopyMode::MoveOnly, RepeatMode::Once>> {
+class RunMixin<OnceCallback<R(Args...)>> {
private:
- using CallbackType =
- Callback<R(Args...), CopyMode::MoveOnly, RepeatMode::Once>;
+ using CallbackType = OnceCallback<R(Args...)>;
public:
using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
+ R Run(Args... args) const & {
+ // Note: even though this static_assert will trivially always fail, it
+ // cannot be simply replaced with static_assert(false, ...) because:
+ // - Per [dcl.dcl]/p4, a program is ill-formed if the constant-expression
+ // argument does not evaluate to true.
+ // - Per [temp.res]/p8, if no valid specialization can be generated for a
+ // template definition, and that template is not instantiated, the
+ // template definition is ill-formed, no diagnostic required.
+ // These two clauses, taken together, would allow a conforming C++ compiler
+ // to immediately reject static_assert(false, ...), even inside an
+ // uninstantiated template.
+ static_assert(!IsOnceCallback<CallbackType>::value,
+ "OnceCallback::Run() may only be invoked on a non-const "
+ "rvalue, i.e. std::move(callback).Run().");
+ }
+
R Run(Args... args) && {
// Move the callback instance into a local variable before the invocation,
// that ensures the internal state is cleared after the invocation.
@@ -49,10 +70,10 @@ class RunMixin<Callback<R(Args...), CopyMode::MoveOnly, RepeatMode::Once>> {
};
// Specialization for RepeatingCallback.
-template <typename R, typename... Args, CopyMode copy_mode>
-class RunMixin<Callback<R(Args...), copy_mode, RepeatMode::Repeating>> {
+template <typename R, typename... Args>
+class RunMixin<RepeatingCallback<R(Args...)>> {
private:
- using CallbackType = Callback<R(Args...), copy_mode, RepeatMode::Repeating>;
+ using CallbackType = RepeatingCallback<R(Args...)>;
public:
using PolymorphicInvoke = R(*)(internal::BindStateBase*, Args&&...);
@@ -69,10 +90,8 @@ template <typename From, typename To>
struct IsCallbackConvertible : std::false_type {};
template <typename Signature>
-struct IsCallbackConvertible<
- Callback<Signature, CopyMode::Copyable, RepeatMode::Repeating>,
- Callback<Signature, CopyMode::MoveOnly, RepeatMode::Once>> : std::true_type {
-};
+struct IsCallbackConvertible<RepeatingCallback<Signature>,
+ OnceCallback<Signature>> : std::true_type {};
} // namespace internal
diff --git a/chromium/base/callback_list_unittest.nc b/chromium/base/callback_list_unittest.nc
index 0ddc135eaa8..7347f765dcc 100644
--- a/chromium/base/callback_list_unittest.nc
+++ b/chromium/base/callback_list_unittest.nc
@@ -35,7 +35,7 @@ class FooListener {
};
-#if defined(NCTEST_MOVE_ONLY_TYPE_PARAMETER) // [r"fatal error: call to deleted constructor"]
+#if defined(NCTEST_MOVE_ONLY_TYPE_PARAMETER) // [r"fatal error: call to (implicitly-)?deleted( copy)? constructor"]
// Callbacks run with a move-only typed parameter.
//
diff --git a/chromium/base/callback_unittest.cc b/chromium/base/callback_unittest.cc
index 783f3bad4f5..a41736946a1 100644
--- a/chromium/base/callback_unittest.cc
+++ b/chromium/base/callback_unittest.cc
@@ -115,6 +115,17 @@ TEST_F(CallbackTest, Reset) {
EXPECT_TRUE(callback_a_.Equals(null_callback_));
}
+TEST_F(CallbackTest, Move) {
+ // Moving should reset the callback.
+ ASSERT_FALSE(callback_a_.is_null());
+ ASSERT_FALSE(callback_a_.Equals(null_callback_));
+
+ auto tmp = std::move(callback_a_);
+
+ EXPECT_TRUE(callback_a_.is_null());
+ EXPECT_TRUE(callback_a_.Equals(null_callback_));
+}
+
struct TestForReentrancy {
TestForReentrancy()
: cb_already_run(false),
diff --git a/chromium/base/check_example.cc b/chromium/base/check_example.cc
index 4b3f4287dbf..7b9d8e6a80e 100644
--- a/chromium/base/check_example.cc
+++ b/chromium/base/check_example.cc
@@ -5,16 +5,27 @@
// This file is meant for analyzing the code generated by the CHECK
// macros in a small executable file that's easy to disassemble.
+#include "base/compiler_specific.h"
#include "base/logging.h"
// An official build shouldn't generate code to print out messages for
// the CHECK* macros, nor should it have the strings in the
-// executable.
+// executable. It is also important that the CHECK() function collapse to the
+// same implementation as RELEASE_ASSERT(), in particular on Windows x86.
+// Historically, the stream eating caused additional unnecessary instructions.
+// See https://crbug.com/672699.
+
+#define BLINK_RELEASE_ASSERT_EQUIVALENT(assertion) \
+ (UNLIKELY(!(assertion)) ? (IMMEDIATE_CRASH()) : (void)0)
void DoCheck(bool b) {
CHECK(b) << "DoCheck " << b;
}
+void DoBlinkReleaseAssert(bool b) {
+ BLINK_RELEASE_ASSERT_EQUIVALENT(b);
+}
+
void DoCheckEq(int x, int y) {
CHECK_EQ(x, y);
}
@@ -22,4 +33,5 @@ void DoCheckEq(int x, int y) {
int main(int argc, const char* argv[]) {
DoCheck(argc > 1);
DoCheckEq(argc, 1);
+ DoBlinkReleaseAssert(argc > 1);
}
diff --git a/chromium/base/compiler_specific.h b/chromium/base/compiler_specific.h
index bb4c52be45e..0f4c058b3be 100644
--- a/chromium/base/compiler_specific.h
+++ b/chromium/base/compiler_specific.h
@@ -157,6 +157,16 @@
// If available, it would look like:
// __attribute__((format(wprintf, format_param, dots_param)))
+// Sanitizers annotations.
+#if defined(__has_attribute)
+#if __has_attribute(no_sanitize)
+#define NO_SANITIZE(what) __attribute__((no_sanitize(what)))
+#endif
+#endif
+#if !defined(NO_SANITIZE)
+#define NO_SANITIZE(what)
+#endif
+
// MemorySanitizer annotations.
#if defined(MEMORY_SANITIZER) && !defined(OS_NACL)
#include <sanitizer/msan_interface.h>
@@ -206,7 +216,7 @@
#if !defined(LIKELY)
#if defined(COMPILER_GCC)
-#define LIKELY(x) __builtin_expect((x), 1)
+#define LIKELY(x) __builtin_expect(!!(x), 1)
#else
#define LIKELY(x) (x)
#endif // defined(COMPILER_GCC)
diff --git a/chromium/base/containers/mru_cache.h b/chromium/base/containers/mru_cache.h
index 6c1d6260f55..4005489d4b2 100644
--- a/chromium/base/containers/mru_cache.h
+++ b/chromium/base/containers/mru_cache.h
@@ -209,10 +209,12 @@ class MRUCacheBase {
// A container that does not do anything to free its data. Use this when storing
// value types (as opposed to pointers) in the list.
-template <class KeyType, class PayloadType>
-class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
+template <class KeyType,
+ class PayloadType,
+ class CompareType = std::less<KeyType>>
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
private:
- using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
+ using ParentType = MRUCacheBase<KeyType, PayloadType, CompareType>;
public:
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
diff --git a/chromium/base/containers/scoped_ptr_hash_map.h b/chromium/base/containers/scoped_ptr_hash_map.h
deleted file mode 100644
index 72c6ff4152e..00000000000
--- a/chromium/base/containers/scoped_ptr_hash_map.h
+++ /dev/null
@@ -1,175 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
-#define BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
-
-#include <stddef.h>
-
-#include <algorithm>
-#include <memory>
-#include <utility>
-
-#include "base/containers/hash_tables.h"
-#include "base/logging.h"
-#include "base/macros.h"
-
-namespace base {
-
-// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
-//
-// This type acts like a hash_map<K, std::unique_ptr<V, D> >, based on top of
-// base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
-// structure.
-template <typename Key, typename ScopedPtr>
-class ScopedPtrHashMap {
- typedef base::hash_map<Key, typename ScopedPtr::element_type*> Container;
-
- public:
- typedef typename Container::key_type key_type;
- typedef typename Container::mapped_type mapped_type;
- typedef typename Container::value_type value_type;
- typedef typename Container::iterator iterator;
- typedef typename Container::const_iterator const_iterator;
-
- ScopedPtrHashMap() {}
-
- ~ScopedPtrHashMap() { clear(); }
-
- void swap(ScopedPtrHashMap<Key, ScopedPtr>& other) {
- data_.swap(other.data_);
- }
-
- // Replaces value but not key if key is already present.
- iterator set(const Key& key, ScopedPtr data) {
- iterator it = find(key);
- if (it != end()) {
- // Let ScopedPtr decide how to delete. For example, it may use custom
- // deleter.
- ScopedPtr(it->second).reset();
- it->second = data.release();
- return it;
- }
-
- return data_.insert(std::make_pair(key, data.release())).first;
- }
-
- // Does nothing if key is already present
- std::pair<iterator, bool> add(const Key& key, ScopedPtr data) {
- std::pair<iterator, bool> result =
- data_.insert(std::make_pair(key, data.get()));
- if (result.second)
- ::ignore_result(data.release());
- return result;
- }
-
- void erase(iterator it) {
- // Let ScopedPtr decide how to delete.
- ScopedPtr(it->second).reset();
- data_.erase(it);
- }
-
- size_t erase(const Key& k) {
- iterator it = data_.find(k);
- if (it == data_.end())
- return 0;
- erase(it);
- return 1;
- }
-
- ScopedPtr take(iterator it) {
- DCHECK(it != data_.end());
- if (it == data_.end())
- return ScopedPtr();
-
- ScopedPtr ret(it->second);
- it->second = NULL;
- return ret;
- }
-
- ScopedPtr take(const Key& k) {
- iterator it = find(k);
- if (it == data_.end())
- return ScopedPtr();
-
- return take(it);
- }
-
- ScopedPtr take_and_erase(iterator it) {
- DCHECK(it != data_.end());
- if (it == data_.end())
- return ScopedPtr();
-
- ScopedPtr ret(it->second);
- data_.erase(it);
- return ret;
- }
-
- ScopedPtr take_and_erase(const Key& k) {
- iterator it = find(k);
- if (it == data_.end())
- return ScopedPtr();
-
- return take_and_erase(it);
- }
-
- // Returns the element in the hash_map that matches the given key.
- // If no such element exists it returns NULL.
- typename ScopedPtr::element_type* get(const Key& k) const {
- const_iterator it = find(k);
- if (it == end())
- return NULL;
- return it->second;
- }
-
- inline bool contains(const Key& k) const { return data_.count(k) > 0; }
-
- inline void clear() {
- auto it = data_.begin();
- while (it != data_.end()) {
- // NOTE: Deleting behind the iterator. Deleting the value does not always
- // invalidate the iterator, but it may do so if the key is a pointer into
- // the value object.
- auto temp = it;
- ++it;
- // Let ScopedPtr decide how to delete.
- ScopedPtr(temp->second).reset();
- }
- data_.clear();
- }
-
- inline const_iterator find(const Key& k) const { return data_.find(k); }
- inline iterator find(const Key& k) { return data_.find(k); }
-
- inline size_t count(const Key& k) const { return data_.count(k); }
- inline std::pair<const_iterator, const_iterator> equal_range(
- const Key& k) const {
- return data_.equal_range(k);
- }
- inline std::pair<iterator, iterator> equal_range(const Key& k) {
- return data_.equal_range(k);
- }
-
- inline size_t size() const { return data_.size(); }
- inline size_t max_size() const { return data_.max_size(); }
-
- inline bool empty() const { return data_.empty(); }
-
- inline size_t bucket_count() const { return data_.bucket_count(); }
- inline void resize(size_t size) { return data_.resize(size); }
-
- inline iterator begin() { return data_.begin(); }
- inline const_iterator begin() const { return data_.begin(); }
- inline iterator end() { return data_.end(); }
- inline const_iterator end() const { return data_.end(); }
-
- private:
- Container data_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedPtrHashMap);
-};
-
-} // namespace base
-
-#endif // BASE_CONTAINERS_SCOPED_PTR_HASH_MAP_H_
diff --git a/chromium/base/containers/scoped_ptr_hash_map_unittest.cc b/chromium/base/containers/scoped_ptr_hash_map_unittest.cc
deleted file mode 100644
index eddabafd2f6..00000000000
--- a/chromium/base/containers/scoped_ptr_hash_map_unittest.cc
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/containers/scoped_ptr_hash_map.h"
-
-#include <memory>
-
-#include "base/memory/ptr_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace {
-
-namespace namespace_with_ignore_result {
-
-class Value {};
-
-template <typename T>
-void ignore_result(const T&) {}
-
-} // namespace namespace_with_ignore_result
-
-struct DeleteCounter {
- public:
- DeleteCounter() {}
- ~DeleteCounter() { g_delete_count++; }
-
- static void ResetCounter() { g_delete_count = 0; }
- static int delete_count() { return g_delete_count; }
-
- private:
- static int g_delete_count;
-};
-
-int DeleteCounter::g_delete_count = 0;
-
-struct CountingDeleter {
- public:
- inline void operator()(DeleteCounter* ptr) const {
- g_deleter_call_count++;
- delete ptr;
- }
-
- static int count() { return g_deleter_call_count; }
- static void ResetCounter() { g_deleter_call_count = 0; }
-
- private:
- static int g_deleter_call_count;
-};
-
-int CountingDeleter::g_deleter_call_count = 0;
-
-TEST(ScopedPtrHashMapTest, CustomDeleter) {
- int key = 123;
-
- // Test dtor.
- DeleteCounter::ResetCounter();
- CountingDeleter::ResetCounter();
- {
- ScopedPtrHashMap<int, std::unique_ptr<DeleteCounter, CountingDeleter>> map;
- map.set(key,
- std::unique_ptr<DeleteCounter, CountingDeleter>(new DeleteCounter));
- }
- EXPECT_EQ(1, DeleteCounter::delete_count());
- EXPECT_EQ(1, CountingDeleter::count());
-
- // Test set and erase.
- DeleteCounter::ResetCounter();
- CountingDeleter::ResetCounter();
- {
- ScopedPtrHashMap<int, std::unique_ptr<DeleteCounter, CountingDeleter>> map;
- map.erase(map.set(key, std::unique_ptr<DeleteCounter, CountingDeleter>(
- new DeleteCounter)));
- EXPECT_EQ(1, DeleteCounter::delete_count());
- EXPECT_EQ(1, CountingDeleter::count());
- }
- EXPECT_EQ(1, DeleteCounter::delete_count());
- EXPECT_EQ(1, CountingDeleter::count());
-
- // Test set more than once.
- DeleteCounter::ResetCounter();
- CountingDeleter::ResetCounter();
- {
- ScopedPtrHashMap<int, std::unique_ptr<DeleteCounter, CountingDeleter>> map;
- map.set(key,
- std::unique_ptr<DeleteCounter, CountingDeleter>(new DeleteCounter));
- map.set(key,
- std::unique_ptr<DeleteCounter, CountingDeleter>(new DeleteCounter));
- map.set(key,
- std::unique_ptr<DeleteCounter, CountingDeleter>(new DeleteCounter));
- EXPECT_EQ(2, DeleteCounter::delete_count());
- EXPECT_EQ(2, CountingDeleter::count());
- }
- EXPECT_EQ(3, DeleteCounter::delete_count());
- EXPECT_EQ(3, CountingDeleter::count());
-}
-
-// Test that using a value type from a namespace containing an ignore_result
-// function compiles correctly.
-TEST(ScopedPtrHashMapTest, IgnoreResultCompile) {
- ScopedPtrHashMap<int, std::unique_ptr<namespace_with_ignore_result::Value>>
- scoped_map;
- scoped_map.add(1, WrapUnique(new namespace_with_ignore_result::Value));
-}
-
-} // namespace
-} // namespace base
diff --git a/chromium/base/cpu.cc b/chromium/base/cpu.cc
index af9c23da1a1..25328167425 100644
--- a/chromium/base/cpu.cc
+++ b/chromium/base/cpu.cc
@@ -60,23 +60,22 @@ namespace {
#if defined(__pic__) && defined(__i386__)
void __cpuid(int cpu_info[4], int info_type) {
- __asm__ volatile (
- "mov %%ebx, %%edi\n"
- "cpuid\n"
- "xchg %%edi, %%ebx\n"
- : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
+ __asm__ volatile(
+ "mov %%ebx, %%edi\n"
+ "cpuid\n"
+ "xchg %%edi, %%ebx\n"
+ : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
}
#else
void __cpuid(int cpu_info[4], int info_type) {
- __asm__ volatile (
- "cpuid\n"
- : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
- : "a"(info_type)
- );
+ __asm__ volatile("cpuid\n"
+ : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]),
+ "=d"(cpu_info[3])
+ : "a"(info_type), "c"(0));
}
#endif
diff --git a/chromium/base/debug/activity_analyzer.cc b/chromium/base/debug/activity_analyzer.cc
index cbac01e1e13..6a483f38fba 100644
--- a/chromium/base/debug/activity_analyzer.cc
+++ b/chromium/base/debug/activity_analyzer.cc
@@ -15,9 +15,12 @@
namespace base {
namespace debug {
+ThreadActivityAnalyzer::Snapshot::Snapshot() {}
+ThreadActivityAnalyzer::Snapshot::~Snapshot() {}
+
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
const ThreadActivityTracker& tracker)
- : activity_snapshot_valid_(tracker.Snapshot(&activity_snapshot_)) {}
+ : activity_snapshot_valid_(tracker.CreateSnapshot(&activity_snapshot_)) {}
ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
: ThreadActivityAnalyzer(ThreadActivityTracker(base, size)) {}
@@ -25,13 +28,30 @@ ThreadActivityAnalyzer::ThreadActivityAnalyzer(void* base, size_t size)
ThreadActivityAnalyzer::ThreadActivityAnalyzer(
PersistentMemoryAllocator* allocator,
PersistentMemoryAllocator::Reference reference)
- : ThreadActivityAnalyzer(allocator->GetAsObject<char>(
+ : ThreadActivityAnalyzer(allocator->GetAsArray<char>(
reference,
- GlobalActivityTracker::kTypeIdActivityTracker),
+ GlobalActivityTracker::kTypeIdActivityTracker,
+ 1),
allocator->GetAllocSize(reference)) {}
ThreadActivityAnalyzer::~ThreadActivityAnalyzer() {}
+void ThreadActivityAnalyzer::AddGlobalInformation(
+ GlobalActivityAnalyzer* global) {
+ if (!IsValid())
+ return;
+
+ // User-data is held at the global scope even though it's referenced at the
+ // thread scope.
+ activity_snapshot_.user_data_stack.clear();
+ for (auto& activity : activity_snapshot_.activity_stack) {
+ // The global GetUserDataSnapshot will return an empty snapshot if the ref
+ // or id is not valid.
+ activity_snapshot_.user_data_stack.push_back(global->GetUserDataSnapshot(
+ activity.user_data_ref, activity.user_data_id));
+ }
+}
+
GlobalActivityAnalyzer::GlobalActivityAnalyzer(
std::unique_ptr<PersistentMemoryAllocator> allocator)
: allocator_(std::move(allocator)), allocator_iterator_(allocator_.get()) {}
@@ -82,6 +102,63 @@ ThreadActivityAnalyzer* GlobalActivityAnalyzer::GetAnalyzerForThread(
return found->second.get();
}
+ActivityUserData::Snapshot GlobalActivityAnalyzer::GetUserDataSnapshot(
+ uint32_t ref,
+ uint32_t id) {
+ ActivityUserData::Snapshot snapshot;
+
+ void* memory = allocator_->GetAsArray<char>(
+ ref, GlobalActivityTracker::kTypeIdUserDataRecord,
+ PersistentMemoryAllocator::kSizeAny);
+ if (memory) {
+ size_t size = allocator_->GetAllocSize(ref);
+ const ActivityUserData user_data(memory, size);
+ user_data.CreateSnapshot(&snapshot);
+ if (user_data.id() != id) {
+ // This allocation has been overwritten since it was created. Return an
+ // empty snapshot because whatever was captured is incorrect.
+ snapshot.clear();
+ }
+ }
+
+ return snapshot;
+}
+
+ActivityUserData::Snapshot GlobalActivityAnalyzer::GetGlobalUserDataSnapshot() {
+ ActivityUserData::Snapshot snapshot;
+
+ PersistentMemoryAllocator::Reference ref =
+ PersistentMemoryAllocator::Iterator(allocator_.get())
+ .GetNextOfType(GlobalActivityTracker::kTypeIdGlobalDataRecord);
+ void* memory = allocator_->GetAsArray<char>(
+ ref, GlobalActivityTracker::kTypeIdGlobalDataRecord,
+ PersistentMemoryAllocator::kSizeAny);
+ if (memory) {
+ size_t size = allocator_->GetAllocSize(ref);
+ const ActivityUserData global_data(memory, size);
+ global_data.CreateSnapshot(&snapshot);
+ }
+
+ return snapshot;
+}
+
+std::vector<std::string> GlobalActivityAnalyzer::GetLogMessages() {
+ std::vector<std::string> messages;
+ PersistentMemoryAllocator::Reference ref;
+
+ PersistentMemoryAllocator::Iterator iter(allocator_.get());
+ while ((ref = iter.GetNextOfType(
+ GlobalActivityTracker::kTypeIdGlobalLogMessage)) != 0) {
+ const char* message = allocator_->GetAsArray<char>(
+ ref, GlobalActivityTracker::kTypeIdGlobalLogMessage,
+ PersistentMemoryAllocator::kSizeAny);
+ if (message)
+ messages.push_back(message);
+ }
+
+ return messages;
+}
+
GlobalActivityAnalyzer::ProgramLocation
GlobalActivityAnalyzer::GetProgramLocationFromAddress(uint64_t address) {
// TODO(bcwhite): Implement this.
@@ -109,8 +186,9 @@ void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
for (PersistentMemoryAllocator::Reference tracker_ref : tracker_references_) {
// Get the actual data segment for the tracker. This can fail if the
// record has been marked "free" since the type will not match.
- void* base = allocator_->GetAsObject<char>(
- tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker);
+ void* base = allocator_->GetAsArray<char>(
+ tracker_ref, GlobalActivityTracker::kTypeIdActivityTracker,
+ PersistentMemoryAllocator::kSizeAny);
if (!base)
continue;
@@ -121,6 +199,7 @@ void GlobalActivityAnalyzer::PrepareAllAnalyzers() {
base, allocator_->GetAllocSize(tracker_ref)));
if (!analyzer->IsValid())
continue;
+ analyzer->AddGlobalInformation(this);
// Add this analyzer to the map of known ones, indexed by a unique thread
// identifier.
diff --git a/chromium/base/debug/activity_analyzer.h b/chromium/base/debug/activity_analyzer.h
index 0a527332a62..48efd854a00 100644
--- a/chromium/base/debug/activity_analyzer.h
+++ b/chromium/base/debug/activity_analyzer.h
@@ -16,11 +16,22 @@
namespace base {
namespace debug {
+class GlobalActivityAnalyzer;
+
// This class provides analysis of data captured from a ThreadActivityTracker.
// When created, it takes a snapshot of the data held by the tracker and
// makes that information available to other code.
class BASE_EXPORT ThreadActivityAnalyzer {
public:
+ struct BASE_EXPORT Snapshot : ThreadActivityTracker::Snapshot {
+ Snapshot();
+ ~Snapshot();
+
+ // The user-data snapshot for an activity, matching the |activity_stack|
+ // of ThreadActivityTracker::Snapshot, if any.
+ std::vector<ActivityUserData::Snapshot> user_data_stack;
+ };
+
// This class provides keys that uniquely identify a thread, even across
// multiple processes.
class ThreadKey {
@@ -59,6 +70,9 @@ class BASE_EXPORT ThreadActivityAnalyzer {
~ThreadActivityAnalyzer();
+ // Adds information from the global analyzer.
+ void AddGlobalInformation(GlobalActivityAnalyzer* global);
+
// Returns true iff the contained data is valid. Results from all other
// methods are undefined if this returns false.
bool IsValid() { return activity_snapshot_valid_; }
@@ -74,13 +88,13 @@ class BASE_EXPORT ThreadActivityAnalyzer {
activity_snapshot_.thread_id);
}
- const ActivitySnapshot& activity_snapshot() { return activity_snapshot_; }
+ const Snapshot& activity_snapshot() { return activity_snapshot_; }
private:
friend class GlobalActivityAnalyzer;
// The snapshot of the activity tracker taken at the moment of construction.
- ActivitySnapshot activity_snapshot_;
+ Snapshot activity_snapshot_;
// Flag indicating if the snapshot data is valid.
bool activity_snapshot_valid_;
@@ -132,6 +146,15 @@ class BASE_EXPORT GlobalActivityAnalyzer {
// Ownership stays with the global analyzer object.
ThreadActivityAnalyzer* GetAnalyzerForThread(const ThreadKey& key);
+ // Extract user data based on a reference and its identifier.
+ ActivityUserData::Snapshot GetUserDataSnapshot(uint32_t ref, uint32_t id);
+
+ // Extract the global user data.
+ ActivityUserData::Snapshot GetGlobalUserDataSnapshot();
+
+ // Gets all log messages stored within.
+ std::vector<std::string> GetLogMessages();
+
// Gets the corresponding "program location" for a given "program counter".
// This will return {0,0} if no mapping could be found.
ProgramLocation GetProgramLocationFromAddress(uint64_t address);
diff --git a/chromium/base/debug/activity_analyzer_unittest.cc b/chromium/base/debug/activity_analyzer_unittest.cc
index 5cdb6c5fe48..08ed85c66db 100644
--- a/chromium/base/debug/activity_analyzer_unittest.cc
+++ b/chromium/base/debug/activity_analyzer_unittest.cc
@@ -16,6 +16,7 @@
#include "base/memory/ptr_util.h"
#include "base/pending_task.h"
#include "base/process/process.h"
+#include "base/stl_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/spin_wait.h"
@@ -176,5 +177,166 @@ TEST_F(ActivityAnalyzerTest, GlobalAnalyzerConstruction) {
EXPECT_EQ(tk1, tk2);
}
+TEST_F(ActivityAnalyzerTest, UserDataSnapshotTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+ ThreadActivityAnalyzer::Snapshot snapshot;
+
+ const char string1a[] = "string1a";
+ const char string1b[] = "string1b";
+ const char string2a[] = "string2a";
+ const char string2b[] = "string2b";
+
+ PersistentMemoryAllocator* allocator =
+ GlobalActivityTracker::Get()->allocator();
+ GlobalActivityAnalyzer global_analyzer(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+ ThreadActivityTracker* tracker =
+ GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
+
+ {
+ ScopedActivity activity1(1, 11, 111);
+ ActivityUserData& user_data1 = activity1.user_data();
+ user_data1.Set("raw1", "foo1", 4);
+ user_data1.SetString("string1", "bar1");
+ user_data1.SetChar("char1", '1');
+ user_data1.SetInt("int1", -1111);
+ user_data1.SetUint("uint1", 1111);
+ user_data1.SetBool("bool1", true);
+ user_data1.SetReference("ref1", string1a, sizeof(string1a));
+ user_data1.SetStringReference("sref1", string1b);
+
+ {
+ ScopedActivity activity2(2, 22, 222);
+ ActivityUserData& user_data2 = activity2.user_data();
+ user_data2.Set("raw2", "foo2", 4);
+ user_data2.SetString("string2", "bar2");
+ user_data2.SetChar("char2", '2');
+ user_data2.SetInt("int2", -2222);
+ user_data2.SetUint("uint2", 2222);
+ user_data2.SetBool("bool2", false);
+ user_data2.SetReference("ref2", string2a, sizeof(string2a));
+ user_data2.SetStringReference("sref2", string2b);
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(2U, snapshot.activity_stack.size());
+
+ ThreadActivityAnalyzer analyzer(*tracker);
+ analyzer.AddGlobalInformation(&global_analyzer);
+ const ThreadActivityAnalyzer::Snapshot& snapshot =
+ analyzer.activity_snapshot();
+ ASSERT_EQ(2U, snapshot.user_data_stack.size());
+ const ActivityUserData::Snapshot& user_data =
+ snapshot.user_data_stack.at(1);
+ EXPECT_EQ(8U, user_data.size());
+ ASSERT_TRUE(ContainsKey(user_data, "raw2"));
+ EXPECT_EQ("foo2", user_data.at("raw2").Get().as_string());
+ ASSERT_TRUE(ContainsKey(user_data, "string2"));
+ EXPECT_EQ("bar2", user_data.at("string2").GetString().as_string());
+ ASSERT_TRUE(ContainsKey(user_data, "char2"));
+ EXPECT_EQ('2', user_data.at("char2").GetChar());
+ ASSERT_TRUE(ContainsKey(user_data, "int2"));
+ EXPECT_EQ(-2222, user_data.at("int2").GetInt());
+ ASSERT_TRUE(ContainsKey(user_data, "uint2"));
+ EXPECT_EQ(2222U, user_data.at("uint2").GetUint());
+ ASSERT_TRUE(ContainsKey(user_data, "bool2"));
+ EXPECT_FALSE(user_data.at("bool2").GetBool());
+ ASSERT_TRUE(ContainsKey(user_data, "ref2"));
+ EXPECT_EQ(string2a, user_data.at("ref2").GetReference().data());
+ EXPECT_EQ(sizeof(string2a), user_data.at("ref2").GetReference().size());
+ ASSERT_TRUE(ContainsKey(user_data, "sref2"));
+ EXPECT_EQ(string2b, user_data.at("sref2").GetStringReference().data());
+ EXPECT_EQ(strlen(string2b),
+ user_data.at("sref2").GetStringReference().size());
+ }
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(1U, snapshot.activity_stack.size());
+
+ ThreadActivityAnalyzer analyzer(*tracker);
+ analyzer.AddGlobalInformation(&global_analyzer);
+ const ThreadActivityAnalyzer::Snapshot& snapshot =
+ analyzer.activity_snapshot();
+ ASSERT_EQ(1U, snapshot.user_data_stack.size());
+ const ActivityUserData::Snapshot& user_data =
+ snapshot.user_data_stack.at(0);
+ EXPECT_EQ(8U, user_data.size());
+ EXPECT_EQ("foo1", user_data.at("raw1").Get().as_string());
+ EXPECT_EQ("bar1", user_data.at("string1").GetString().as_string());
+ EXPECT_EQ('1', user_data.at("char1").GetChar());
+ EXPECT_EQ(-1111, user_data.at("int1").GetInt());
+ EXPECT_EQ(1111U, user_data.at("uint1").GetUint());
+ EXPECT_TRUE(user_data.at("bool1").GetBool());
+ EXPECT_EQ(string1a, user_data.at("ref1").GetReference().data());
+ EXPECT_EQ(sizeof(string1a), user_data.at("ref1").GetReference().size());
+ EXPECT_EQ(string1b, user_data.at("sref1").GetStringReference().data());
+ EXPECT_EQ(strlen(string1b),
+ user_data.at("sref1").GetStringReference().size());
+ }
+
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
+ ASSERT_EQ(0U, snapshot.activity_stack.size());
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalUserDataTest) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+
+ const char string1[] = "foo";
+ const char string2[] = "bar";
+
+ PersistentMemoryAllocator* allocator =
+ GlobalActivityTracker::Get()->allocator();
+ GlobalActivityAnalyzer global_analyzer(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+ ActivityUserData& global_data = GlobalActivityTracker::Get()->user_data();
+ global_data.Set("raw", "foo", 3);
+ global_data.SetString("string", "bar");
+ global_data.SetChar("char", '9');
+ global_data.SetInt("int", -9999);
+ global_data.SetUint("uint", 9999);
+ global_data.SetBool("bool", true);
+ global_data.SetReference("ref", string1, sizeof(string1));
+ global_data.SetStringReference("sref", string2);
+
+ ActivityUserData::Snapshot snapshot =
+ global_analyzer.GetGlobalUserDataSnapshot();
+ ASSERT_TRUE(ContainsKey(snapshot, "raw"));
+ EXPECT_EQ("foo", snapshot.at("raw").Get().as_string());
+ ASSERT_TRUE(ContainsKey(snapshot, "string"));
+ EXPECT_EQ("bar", snapshot.at("string").GetString().as_string());
+ ASSERT_TRUE(ContainsKey(snapshot, "char"));
+ EXPECT_EQ('9', snapshot.at("char").GetChar());
+ ASSERT_TRUE(ContainsKey(snapshot, "int"));
+ EXPECT_EQ(-9999, snapshot.at("int").GetInt());
+ ASSERT_TRUE(ContainsKey(snapshot, "uint"));
+ EXPECT_EQ(9999U, snapshot.at("uint").GetUint());
+ ASSERT_TRUE(ContainsKey(snapshot, "bool"));
+ EXPECT_TRUE(snapshot.at("bool").GetBool());
+ ASSERT_TRUE(ContainsKey(snapshot, "ref"));
+ EXPECT_EQ(string1, snapshot.at("ref").GetReference().data());
+ EXPECT_EQ(sizeof(string1), snapshot.at("ref").GetReference().size());
+ ASSERT_TRUE(ContainsKey(snapshot, "sref"));
+ EXPECT_EQ(string2, snapshot.at("sref").GetStringReference().data());
+ EXPECT_EQ(strlen(string2), snapshot.at("sref").GetStringReference().size());
+}
+
+TEST_F(ActivityAnalyzerTest, GlobalLogMessages) {
+ GlobalActivityTracker::CreateWithLocalMemory(kMemorySize, 0, "", 3);
+
+ PersistentMemoryAllocator* allocator =
+ GlobalActivityTracker::Get()->allocator();
+ GlobalActivityAnalyzer analyzer(MakeUnique<PersistentMemoryAllocator>(
+ const_cast<void*>(allocator->data()), allocator->size(), 0, 0, "", true));
+
+ GlobalActivityTracker::Get()->RecordLogMessage("hello world");
+ GlobalActivityTracker::Get()->RecordLogMessage("foo bar");
+
+ std::vector<std::string> messages = analyzer.GetLogMessages();
+ ASSERT_EQ(2U, messages.size());
+ EXPECT_EQ("hello world", messages[0]);
+ EXPECT_EQ("foo bar", messages[1]);
+}
+
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/activity_tracker.cc b/chromium/base/debug/activity_tracker.cc
index 678e740f584..c728fa052a5 100644
--- a/chromium/base/debug/activity_tracker.cc
+++ b/chromium/base/debug/activity_tracker.cc
@@ -5,6 +5,8 @@
#include "base/debug/activity_tracker.h"
#include <algorithm>
+#include <limits>
+#include <utility>
#include "base/debug/stack_trace.h"
#include "base/files/file.h"
@@ -38,7 +40,7 @@ const int kMinStackDepth = 2;
// The amount of memory set aside for holding arbitrary user data (key/value
// pairs) globally or associated with ActivityData entries.
const size_t kUserDataSize = 1024; // bytes
-const size_t kGlobalDataSize = 1024; // bytes
+const size_t kGlobalDataSize = 4096; // bytes
const size_t kMaxUserDataNameLength =
static_cast<size_t>(std::numeric_limits<uint8_t>::max());
@@ -57,6 +59,11 @@ union ThreadRef {
#endif
};
+// Determines the previous aligned index.
+size_t RoundDownToAlignment(size_t index, size_t alignment) {
+ return index & (0 - alignment);
+}
+
// Determines the next aligned index.
size_t RoundUpToAlignment(size_t index, size_t alignment) {
return (index + (alignment - 1)) & (0 - alignment);
@@ -150,7 +157,8 @@ ActivityTrackerMemoryAllocator::GetObjectReference() {
void ActivityTrackerMemoryAllocator::ReleaseObjectReference(Reference ref) {
// Zero the memory so that it is ready for immediate use if needed later.
- char* mem_base = allocator_->GetAsObject<char>(ref, object_type_);
+ char* mem_base = allocator_->GetAsArray<char>(
+ ref, object_type_, PersistentMemoryAllocator::kSizeAny);
DCHECK(mem_base);
memset(mem_base, 0, object_size_);
@@ -193,15 +201,81 @@ void Activity::FillFrom(Activity* activity,
#endif
}
-ActivitySnapshot::ActivitySnapshot() {}
-ActivitySnapshot::~ActivitySnapshot() {}
+ActivityUserData::TypedValue::TypedValue() {}
+ActivityUserData::TypedValue::TypedValue(const TypedValue& other) = default;
+ActivityUserData::TypedValue::~TypedValue() {}
+
+StringPiece ActivityUserData::TypedValue::Get() const {
+ DCHECK_EQ(RAW_VALUE, type_);
+ return long_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetString() const {
+ DCHECK_EQ(STRING_VALUE, type_);
+ return long_value_;
+}
+
+bool ActivityUserData::TypedValue::GetBool() const {
+ DCHECK_EQ(BOOL_VALUE, type_);
+ return short_value_ != 0;
+}
+
+char ActivityUserData::TypedValue::GetChar() const {
+ DCHECK_EQ(CHAR_VALUE, type_);
+ return static_cast<char>(short_value_);
+}
+
+int64_t ActivityUserData::TypedValue::GetInt() const {
+ DCHECK_EQ(SIGNED_VALUE, type_);
+ return static_cast<int64_t>(short_value_);
+}
+
+uint64_t ActivityUserData::TypedValue::GetUint() const {
+ DCHECK_EQ(UNSIGNED_VALUE, type_);
+ return static_cast<uint64_t>(short_value_);
+}
+
+StringPiece ActivityUserData::TypedValue::GetReference() const {
+ DCHECK_EQ(RAW_VALUE_REFERENCE, type_);
+ return ref_value_;
+}
+
+StringPiece ActivityUserData::TypedValue::GetStringReference() const {
+ DCHECK_EQ(STRING_VALUE_REFERENCE, type_);
+ return ref_value_;
+}
ActivityUserData::ValueInfo::ValueInfo() {}
ActivityUserData::ValueInfo::ValueInfo(ValueInfo&&) = default;
ActivityUserData::ValueInfo::~ValueInfo() {}
+std::atomic<uint32_t> ActivityUserData::next_id_;
+
ActivityUserData::ActivityUserData(void* memory, size_t size)
- : memory_(static_cast<char*>(memory)), available_(size) {}
+ : memory_(reinterpret_cast<char*>(memory)),
+ available_(RoundDownToAlignment(size, kMemoryAlignment)),
+ id_(reinterpret_cast<std::atomic<uint32_t>*>(memory)) {
+ // It's possible that no user data is being stored.
+ if (!memory_)
+ return;
+
+ DCHECK_LT(kMemoryAlignment, available_);
+ if (id_->load(std::memory_order_relaxed) == 0) {
+ // Generate a new ID and store it in the first 32-bit word of memory_.
+ // |id_| must be non-zero for non-sink instances.
+ uint32_t id;
+ while ((id = next_id_.fetch_add(1, std::memory_order_relaxed)) == 0)
+ ;
+ id_->store(id, std::memory_order_relaxed);
+ DCHECK_NE(0U, id_->load(std::memory_order_relaxed));
+ }
+ memory_ += kMemoryAlignment;
+ available_ -= kMemoryAlignment;
+
+ // If there is already data present, load that. This allows the same class
+ // to be used for analysis through snapshots.
+ ImportExistingData();
+}
ActivityUserData::~ActivityUserData() {}
@@ -238,18 +312,28 @@ void ActivityUserData::Set(StringPiece name,
sizeof(Header);
size_t value_extent = RoundUpToAlignment(size, kMemoryAlignment);
- // The "basic size" is the minimum size of the record. It's possible that
- // lengthy values will get truncated but there must be at least some bytes
- // available.
- size_t basic_size = sizeof(Header) + name_extent + kMemoryAlignment;
- if (basic_size > available_)
- return; // No space to store even the smallest value.
+ // The "base size" is the size of the header and (padded) string key. Stop
+ // now if there's not room enough for even this.
+ size_t base_size = sizeof(Header) + name_extent;
+ if (base_size > available_)
+ return;
+
+ // The "full size" is the size for storing the entire value.
+ size_t full_size = std::min(base_size + value_extent, available_);
+
+ // If the value is actually a single byte, see if it can be stuffed at the
+ // end of the name extent rather than wasting kMemoryAlignment bytes.
+ if (size == 1 && name_extent > name_size) {
+ full_size = base_size;
+ --name_extent;
+ --base_size;
+ }
- // The "full size" is the size for storing the entire value, truncated
- // to the amount of available memory.
- size_t full_size =
- std::min(sizeof(Header) + name_extent + value_extent, available_);
- size = std::min(full_size - sizeof(Header) - name_extent, size);
+ // Truncate the stored size to the amount of available memory. Stop now if
+ // there's not any room for even part of the value.
+ size = std::min(full_size - base_size, size);
+ if (size == 0)
+ return;
// Allocate a chunk of memory.
Header* header = reinterpret_cast<Header*>(memory_);
@@ -302,13 +386,107 @@ void ActivityUserData::SetReference(StringPiece name,
Set(name, type, &rec, sizeof(rec));
}
+void ActivityUserData::ImportExistingData() const {
+ while (available_ > sizeof(Header)) {
+ Header* header = reinterpret_cast<Header*>(memory_);
+ ValueType type =
+ static_cast<ValueType>(header->type.load(std::memory_order_acquire));
+ if (type == END_OF_VALUES)
+ return;
+ if (header->record_size > available_)
+ return;
+
+ size_t value_offset = RoundUpToAlignment(sizeof(Header) + header->name_size,
+ kMemoryAlignment);
+ if (header->record_size == value_offset &&
+ header->value_size.load(std::memory_order_relaxed) == 1) {
+ value_offset -= 1;
+ }
+ if (value_offset + header->value_size > header->record_size)
+ return;
+
+ ValueInfo info;
+ info.name = StringPiece(memory_ + sizeof(Header), header->name_size);
+ info.type = type;
+ info.memory = memory_ + value_offset;
+ info.size_ptr = &header->value_size;
+ info.extent = header->record_size - value_offset;
+
+ StringPiece key(info.name);
+ values_.insert(std::make_pair(key, std::move(info)));
+
+ memory_ += header->record_size;
+ available_ -= header->record_size;
+ }
+}
+
+bool ActivityUserData::CreateSnapshot(Snapshot* output_snapshot) const {
+ DCHECK(output_snapshot);
+ DCHECK(output_snapshot->empty());
+
+ // Find any new data that may have been added by an active instance of this
+ // class that is adding records.
+ ImportExistingData();
+
+ for (const auto& entry : values_) {
+ TypedValue value;
+ value.type_ = entry.second.type;
+ DCHECK_GE(entry.second.extent,
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+
+ switch (entry.second.type) {
+ case RAW_VALUE:
+ case STRING_VALUE:
+ value.long_value_ =
+ std::string(reinterpret_cast<char*>(entry.second.memory),
+ entry.second.size_ptr->load(std::memory_order_relaxed));
+ break;
+ case RAW_VALUE_REFERENCE:
+ case STRING_VALUE_REFERENCE: {
+ ReferenceRecord* ref =
+ reinterpret_cast<ReferenceRecord*>(entry.second.memory);
+ value.ref_value_ = StringPiece(
+ reinterpret_cast<char*>(static_cast<uintptr_t>(ref->address)),
+ static_cast<size_t>(ref->size));
+ } break;
+ case BOOL_VALUE:
+ case CHAR_VALUE:
+ value.short_value_ = *reinterpret_cast<char*>(entry.second.memory);
+ break;
+ case SIGNED_VALUE:
+ case UNSIGNED_VALUE:
+ value.short_value_ = *reinterpret_cast<uint64_t*>(entry.second.memory);
+ break;
+ case END_OF_VALUES: // Included for completeness purposes.
+ NOTREACHED();
+ }
+ auto inserted = output_snapshot->insert(
+ std::make_pair(entry.second.name.as_string(), std::move(value)));
+ DCHECK(inserted.second); // True if inserted, false if existed.
+ }
+
+ return true;
+}
+
+const void* ActivityUserData::GetBaseAddress() {
+ // The |memory_| pointer advances as elements are written but the |id_|
+ // value is always at the start of the block so just return that.
+ return id_;
+}
+
// This information is kept for every thread that is tracked. It is filled
// the very first time the thread is seen. All fields must be of exact sizes
// so there is no issue moving between 32 and 64-bit builds.
struct ThreadActivityTracker::Header {
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 80;
+
// This unique number indicates a valid initialization of the memory.
std::atomic<uint32_t> cookie;
- uint32_t reserved; // pad out to 64 bits
+
+ // The number of Activity slots (spaces that can hold an Activity) that
+ // immediately follow this structure in memory.
+ uint32_t stack_slots;
// The process-id and thread-id (thread_ref.as_id) to which this data belongs.
// These identifiers are not guaranteed to mean anything but are unique, in
@@ -330,9 +508,6 @@ struct ThreadActivityTracker::Header {
int64_t start_time;
int64_t start_ticks;
- // The number of Activity slots in the data.
- uint32_t stack_slots;
-
// The current depth of the stack. This may be greater than the number of
// slots. If the depth exceeds the number of slots, the newest entries
// won't be recorded.
@@ -355,6 +530,9 @@ struct ThreadActivityTracker::Header {
char thread_name[32];
};
+ThreadActivityTracker::Snapshot::Snapshot() {}
+ThreadActivityTracker::Snapshot::~Snapshot() {}
+
ThreadActivityTracker::ScopedActivity::ScopedActivity(
ThreadActivityTracker* tracker,
const void* program_counter,
@@ -378,16 +556,6 @@ void ThreadActivityTracker::ScopedActivity::ChangeTypeAndData(
tracker_->ChangeActivity(activity_id_, type, data);
}
-ActivityUserData& ThreadActivityTracker::ScopedActivity::user_data() {
- if (!user_data_) {
- if (tracker_)
- user_data_ = tracker_->GetUserData(activity_id_);
- else
- user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
- }
- return *user_data_;
-}
-
ThreadActivityTracker::ThreadActivityTracker(void* base, size_t size)
: header_(static_cast<Header*>(base)),
stack_(reinterpret_cast<Activity*>(reinterpret_cast<char*>(base) +
@@ -543,11 +711,6 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
DCHECK(stack_[depth].activity_type == Activity::ACT_LOCK_ACQUIRE ||
thread_checker_.CalledOnValidThread());
- // Check if there was any user-data memory. It isn't free'd until later
- // because the call to release it can push something on the stack.
- PersistentMemoryAllocator::Reference user_data = stack_[depth].user_data;
- stack_[depth].user_data = 0;
-
// The stack has shrunk meaning that some other thread trying to copy the
// contents for reporting purposes could get bad data. That thread would
// have written a non-zero value into |stack_unchanged|; clearing it here
@@ -555,27 +718,52 @@ void ThreadActivityTracker::PopActivity(ActivityId id) {
// happen after the atomic |depth| operation above so a "release" store
// is required.
header_->stack_unchanged.store(0, std::memory_order_release);
-
- // Release resources located above. All stack processing is done so it's
- // safe if some outside code does another push.
- if (user_data)
- GlobalActivityTracker::Get()->ReleaseUserDataMemory(&user_data);
}
std::unique_ptr<ActivityUserData> ThreadActivityTracker::GetUserData(
- ActivityId id) {
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator) {
// User-data is only stored for activities actually held in the stack.
if (id < stack_slots_) {
+ // Don't allow user data for lock acquisition as recursion may occur.
+ if (stack_[id].activity_type == Activity::ACT_LOCK_ACQUIRE) {
+ NOTREACHED();
+ return MakeUnique<ActivityUserData>(nullptr, 0);
+ }
+
+ // Get (or reuse) a block of memory and create a real UserData object
+ // on it.
+ PersistentMemoryAllocator::Reference ref = allocator->GetObjectReference();
void* memory =
- GlobalActivityTracker::Get()->GetUserDataMemory(&stack_[id].user_data);
- if (memory)
- return MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ allocator->GetAsArray<char>(ref, PersistentMemoryAllocator::kSizeAny);
+ if (memory) {
+ std::unique_ptr<ActivityUserData> user_data =
+ MakeUnique<ActivityUserData>(memory, kUserDataSize);
+ stack_[id].user_data_ref = ref;
+ stack_[id].user_data_id = user_data->id();
+ return user_data;
+ }
}
// Return a dummy object that will still accept (but ignore) Set() calls.
return MakeUnique<ActivityUserData>(nullptr, 0);
}
+bool ThreadActivityTracker::HasUserData(ActivityId id) {
+ // User-data is only stored for activities actually held in the stack.
+ return (id < stack_slots_ && stack_[id].user_data_ref);
+}
+
+void ThreadActivityTracker::ReleaseUserData(
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator) {
+ // User-data is only stored for activities actually held in the stack.
+ if (id < stack_slots_ && stack_[id].user_data_ref) {
+ allocator->ReleaseObjectReference(stack_[id].user_data_ref);
+ stack_[id].user_data_ref = 0;
+ }
+}
+
bool ThreadActivityTracker::IsValid() const {
if (header_->cookie.load(std::memory_order_acquire) != kHeaderCookie ||
header_->process_id.load(std::memory_order_relaxed) == 0 ||
@@ -590,7 +778,7 @@ bool ThreadActivityTracker::IsValid() const {
return valid_;
}
-bool ThreadActivityTracker::Snapshot(ActivitySnapshot* output_snapshot) const {
+bool ThreadActivityTracker::CreateSnapshot(Snapshot* output_snapshot) const {
DCHECK(output_snapshot);
// There is no "called on valid thread" check for this method as it can be
@@ -706,6 +894,40 @@ size_t ThreadActivityTracker::SizeForStackDepth(int stack_depth) {
GlobalActivityTracker* GlobalActivityTracker::g_tracker_ = nullptr;
+GlobalActivityTracker::ScopedThreadActivity::ScopedThreadActivity(
+ const void* program_counter,
+ const void* origin,
+ Activity::Type type,
+ const ActivityData& data,
+ bool lock_allowed)
+ : ThreadActivityTracker::ScopedActivity(GetOrCreateTracker(lock_allowed),
+ program_counter,
+ origin,
+ type,
+ data) {}
+
+GlobalActivityTracker::ScopedThreadActivity::~ScopedThreadActivity() {
+ if (tracker_ && tracker_->HasUserData(activity_id_)) {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ AutoLock lock(global->user_data_allocator_lock_);
+ tracker_->ReleaseUserData(activity_id_, &global->user_data_allocator_);
+ }
+}
+
+ActivityUserData& GlobalActivityTracker::ScopedThreadActivity::user_data() {
+ if (!user_data_) {
+ if (tracker_) {
+ GlobalActivityTracker* global = GlobalActivityTracker::Get();
+ AutoLock lock(global->user_data_allocator_lock_);
+ user_data_ =
+ tracker_->GetUserData(activity_id_, &global->user_data_allocator_);
+ } else {
+ user_data_ = MakeUnique<ActivityUserData>(nullptr, 0);
+ }
+ }
+ return *user_data_;
+}
+
GlobalActivityTracker::ManagedActivityTracker::ManagedActivityTracker(
PersistentMemoryAllocator::Reference mem_reference,
void* base,
@@ -791,9 +1013,23 @@ ThreadActivityTracker* GlobalActivityTracker::CreateTrackerForCurrentThread() {
}
// Convert the memory block found above into an actual memory address.
+ // Doing the conversion as a Header object enacts the 32/64-bit size
+ // consistency checks which would not otherwise be done. Unfortunately,
+ // some older compilers and MSVC don't have standard-conforming definitions
+ // of std::atomic which cause it not to be plain-old-data. Don't check on
+ // those platforms assuming that the checks on other platforms will be
+ // sufficient.
+ // TODO(bcwhite): Review this after major compiler releases.
DCHECK(mem_reference);
- void* mem_base =
- allocator_->GetAsObject<char>(mem_reference, kTypeIdActivityTracker);
+ void* mem_base;
+#if 0 // TODO(bcwhite): Update this for new GetAsObject functionality.
+ mem_base = allocator_->GetAsObject<ThreadActivityTracker::Header>(
+ mem_reference, kTypeIdActivityTracker);
+#else
+ mem_base = allocator_->GetAsArray<char>(mem_reference, kTypeIdActivityTracker,
+ PersistentMemoryAllocator::kSizeAny);
+#endif
+
DCHECK(mem_base);
DCHECK_LE(stack_memory_size_, allocator_->GetAllocSize(mem_reference));
@@ -817,27 +1053,17 @@ void GlobalActivityTracker::ReleaseTrackerForCurrentThreadForTesting() {
delete tracker;
}
-void* GlobalActivityTracker::GetUserDataMemory(
- PersistentMemoryAllocator::Reference* reference) {
- if (!*reference) {
- base::AutoLock autolock(user_data_allocator_lock_);
- *reference = user_data_allocator_.GetObjectReference();
- if (!*reference)
- return nullptr;
+void GlobalActivityTracker::RecordLogMessage(StringPiece message) {
+ // Allocate at least one extra byte so the string is NUL terminated. All
+ // memory returned by the allocator is guaranteed to be zeroed.
+ PersistentMemoryAllocator::Reference ref =
+ allocator_->Allocate(message.size() + 1, kTypeIdGlobalLogMessage);
+ char* memory = allocator_->GetAsArray<char>(ref, kTypeIdGlobalLogMessage,
+ message.size() + 1);
+ if (memory) {
+ memcpy(memory, message.data(), message.size());
+ allocator_->MakeIterable(ref);
}
-
- void* memory =
- allocator_->GetAsObject<char>(*reference, kTypeIdUserDataRecord);
- DCHECK(memory);
- return memory;
-}
-
-void GlobalActivityTracker::ReleaseUserDataMemory(
- PersistentMemoryAllocator::Reference* reference) {
- DCHECK(*reference);
- base::AutoLock autolock(user_data_allocator_lock_);
- user_data_allocator_.ReleaseObjectReference(*reference);
- *reference = PersistentMemoryAllocator::kReferenceNull;
}
GlobalActivityTracker::GlobalActivityTracker(
@@ -860,9 +1086,10 @@ GlobalActivityTracker::GlobalActivityTracker(
kCachedUserDataMemories,
/*make_iterable=*/false),
user_data_(
- allocator_->GetAsObject<char>(
+ allocator_->GetAsArray<char>(
allocator_->Allocate(kGlobalDataSize, kTypeIdGlobalDataRecord),
- kTypeIdGlobalDataRecord),
+ kTypeIdGlobalDataRecord,
+ PersistentMemoryAllocator::kSizeAny),
kGlobalDataSize) {
// Ensure the passed memory is valid and empty (iterator finds nothing).
uint32_t type;
@@ -871,6 +1098,11 @@ GlobalActivityTracker::GlobalActivityTracker(
// Ensure that there is no other global object and then make this one such.
DCHECK(!g_tracker_);
g_tracker_ = this;
+
+ // The global user-data record must be iterable in order to be found by an
+ // analyzer.
+ allocator_->MakeIterable(allocator_->GetAsReference(
+ user_data_.GetBaseAddress(), kTypeIdGlobalDataRecord));
}
GlobalActivityTracker::~GlobalActivityTracker() {
diff --git a/chromium/base/debug/activity_tracker.h b/chromium/base/debug/activity_tracker.h
index 24de4317448..62f983b5e87 100644
--- a/chromium/base/debug/activity_tracker.h
+++ b/chromium/base/debug/activity_tracker.h
@@ -26,6 +26,7 @@
#include "base/gtest_prod_util.h"
#include "base/location.h"
#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_local_storage.h"
@@ -131,7 +132,7 @@ extern const ActivityData kNullActivityData;
// A helper class that is used for managing memory allocations within a
// persistent memory allocator. Instances of this class are NOT thread-safe.
// Use from a single thread or protect access with a lock.
-class ActivityTrackerMemoryAllocator {
+class BASE_EXPORT ActivityTrackerMemoryAllocator {
public:
using Reference = PersistentMemoryAllocator::Reference;
@@ -155,6 +156,18 @@ class ActivityTrackerMemoryAllocator {
// Returns an object to the "free" pool.
void ReleaseObjectReference(Reference ref);
+ // Helper function to access an object allocated using this instance.
+ template <typename T>
+ T* GetAsObject(Reference ref) {
+ return allocator_->GetAsObject<T>(ref, object_type_);
+ }
+
+ // Similar to GetAsObject() but converts references to arrays of objects.
+ template <typename T>
+ T* GetAsArray(Reference ref, size_t count) {
+ return allocator_->GetAsArray<T>(ref, object_type_, count);
+ }
+
// The current "used size" of the internal cache, visible for testing.
size_t cache_used() const { return cache_used_; }
@@ -244,8 +257,10 @@ struct Activity {
// enabled.
uint64_t call_stack[kActivityCallStackSize];
- // Reference to arbitrary user data within the persistent memory segment.
- uint32_t user_data;
+ // Reference to arbitrary user data within the persistent memory segment
+ // and a unique identifier for it.
+ uint32_t user_data_ref;
+ uint32_t user_data_id;
// The (enumerated) type of the activity. This defines what fields of the
// |data| record are valid.
@@ -254,7 +269,7 @@ struct Activity {
// Padding to ensure that the next member begins on a 64-bit boundary
// even on 32-bit builds which ensures inter-operability between CPU
// architectures. New fields can be taken from this space.
- uint8_t padding[3];
+ uint8_t padding[7];
// Information specific to the |activity_type|.
ActivityData data;
@@ -266,40 +281,12 @@ struct Activity {
const ActivityData& data);
};
-// This structure holds a copy of all the internal data at the moment the
-// "snapshot" operation is done. It is disconnected from the live tracker
-// so that continued operation of the thread will not cause changes here.
-struct BASE_EXPORT ActivitySnapshot {
- // Explicit constructor/destructor are needed because of complex types
- // with non-trivial default constructors and destructors.
- ActivitySnapshot();
- ~ActivitySnapshot();
-
- // The name of the thread as set when it was created. The name may be
- // truncated due to internal length limitations.
- std::string thread_name;
-
- // The process and thread IDs. These values have no meaning other than
- // they uniquely identify a running process and a running thread within
- // that process. Thread-IDs can be re-used across different processes
- // and both can be re-used after the process/thread exits.
- int64_t process_id = 0;
- int64_t thread_id = 0;
-
- // The current stack of activities that are underway for this thread. It
- // is limited in its maximum size with later entries being left off.
- std::vector<Activity> activity_stack;
-
- // The current total depth of the activity stack, including those later
- // entries not recorded in the |activity_stack| vector.
- uint32_t activity_stack_depth = 0;
-};
-
// This class manages arbitrary user data that can be associated with activities
// done by a thread by supporting key/value pairs of any type. This can provide
// additional information during debugging. It is also used to store arbitrary
// global data. All updates must be done from the same thread.
class BASE_EXPORT ActivityUserData {
+ public:
// List of known value type. REFERENCE types must immediately follow the non-
// external types.
enum ValueType : uint8_t {
@@ -309,14 +296,59 @@ class BASE_EXPORT ActivityUserData {
STRING_VALUE,
STRING_VALUE_REFERENCE,
CHAR_VALUE,
+ BOOL_VALUE,
SIGNED_VALUE,
UNSIGNED_VALUE,
};
- public:
+ class BASE_EXPORT TypedValue {
+ public:
+ TypedValue();
+ TypedValue(const TypedValue& other);
+ ~TypedValue();
+
+ ValueType type() const { return type_; }
+
+ // These methods return the extracted value in the correct format.
+ StringPiece Get() const;
+ StringPiece GetString() const;
+ bool GetBool() const;
+ char GetChar() const;
+ int64_t GetInt() const;
+ uint64_t GetUint() const;
+
+ // These methods return references to process memory as originally provided
+ // to corresponding Set calls. USE WITH CAUTION! There is no guarantee that
+ // the referenced memory is assessible or useful. It's possible that:
+ // - the memory was free'd and reallocated for a different purpose
+ // - the memory has been released back to the OS
+ // - the memory belongs to a different process's address space
+ // Dereferencing the returned StringPiece when the memory is not accessible
+ // will cause the program to SEGV!
+ StringPiece GetReference() const;
+ StringPiece GetStringReference() const;
+
+ private:
+ friend class ActivityUserData;
+
+ ValueType type_;
+ uint64_t short_value_; // Used to hold copy of numbers, etc.
+ std::string long_value_; // Used to hold copy of raw/string data.
+ StringPiece ref_value_; // Used to hold reference to external data.
+ };
+
+ using Snapshot = std::map<std::string, TypedValue>;
+
ActivityUserData(void* memory, size_t size);
~ActivityUserData();
+ // Gets the unique ID number for this user data. If this changes then the
+ // contents have been overwritten by another thread. The return value is
+ // always non-zero unless it's actually just a data "sink".
+ uint32_t id() const {
+ return memory_ ? id_->load(std::memory_order_relaxed) : 0;
+ }
+
// Writes a |value| (as part of a key/value pair) that will be included with
// the activity in any reports. The same |name| can be written multiple times
// with each successive call overwriting the previously stored |value|. For
@@ -332,6 +364,13 @@ class BASE_EXPORT ActivityUserData {
void SetString(StringPiece name, StringPiece value) {
Set(name, STRING_VALUE, value.data(), value.length());
}
+ void SetString(StringPiece name, StringPiece16 value) {
+ SetString(name, UTF16ToUTF8(value));
+ }
+ void SetBool(StringPiece name, bool value) {
+ char cvalue = value ? 1 : 0;
+ Set(name, BOOL_VALUE, &cvalue, sizeof(cvalue));
+ }
void SetChar(StringPiece name, char value) {
Set(name, CHAR_VALUE, &value, sizeof(value));
}
@@ -353,6 +392,16 @@ class BASE_EXPORT ActivityUserData {
SetReference(name, STRING_VALUE_REFERENCE, value.data(), value.length());
}
+ // Creates a snapshot of the key/value pairs contained within. The returned
+ // data will be fixed, independent of whatever changes afterward. There is
+ // protection against concurrent modification of the values but no protection
+ // against a complete overwrite of the contents; the caller must ensure that
+ // the memory segment is not going to be re-initialized while this runs.
+ bool CreateSnapshot(Snapshot* output_snapshot) const;
+
+ // Gets the base memory address used for storing data.
+ const void* GetBaseAddress();
+
private:
FRIEND_TEST_ALL_PREFIXES(ActivityTrackerTest, UserDataTest);
@@ -392,15 +441,30 @@ class BASE_EXPORT ActivityUserData {
const void* memory,
size_t size);
- // TODO(bcwhite): Add Get() methods for Analyzer to use.
+ // Loads any data already in the memory segment. This allows for accessing
+ // records created previously.
+ void ImportExistingData() const;
+
+ // A map of all the values within the memory block, keyed by name for quick
+ // updates of the values. This is "mutable" because it changes on "const"
+ // objects even when the actual data values can't change.
+ mutable std::map<StringPiece, ValueInfo> values_;
- std::map<StringPiece, ValueInfo> values_;
+ // Information about the memory block in which new data can be stored. These
+ // are "mutable" because they change even on "const" objects that are just
+ // skipping already set values.
+ mutable char* memory_;
+ mutable size_t available_;
- char* memory_;
- size_t available_;
+ // A pointer to the unique ID for this instance.
+ std::atomic<uint32_t>* const id_;
base::ThreadChecker thread_checker_;
+ // This ID is used to create unique indentifiers for user data so that it's
+ // possible to tell if the information has been overwritten.
+ static std::atomic<uint32_t> next_id_;
+
DISALLOW_COPY_AND_ASSIGN(ActivityUserData);
};
@@ -417,6 +481,40 @@ class BASE_EXPORT ThreadActivityTracker {
public:
using ActivityId = uint32_t;
+ // This structure contains all the common information about the thread so
+ // it doesn't have to be repeated in every entry on the stack. It is defined
+ // and used completely within the .cc file.
+ struct Header;
+
+ // This structure holds a copy of all the internal data at the moment the
+ // "snapshot" operation is done. It is disconnected from the live tracker
+ // so that continued operation of the thread will not cause changes here.
+ struct BASE_EXPORT Snapshot {
+ // Explicit constructor/destructor are needed because of complex types
+ // with non-trivial default constructors and destructors.
+ Snapshot();
+ ~Snapshot();
+
+ // The name of the thread as set when it was created. The name may be
+ // truncated due to internal length limitations.
+ std::string thread_name;
+
+ // The process and thread IDs. These values have no meaning other than
+ // they uniquely identify a running process and a running thread within
+ // that process. Thread-IDs can be re-used across different processes
+ // and both can be re-used after the process/thread exits.
+ int64_t process_id = 0;
+ int64_t thread_id = 0;
+
+ // The current stack of activities that are underway for this thread. It
+ // is limited in its maximum size with later entries being left off.
+ std::vector<Activity> activity_stack;
+
+ // The current total depth of the activity stack, including those later
+ // entries not recorded in the |activity_stack| vector.
+ uint32_t activity_stack_depth = 0;
+ };
+
// This is the base class for having the compiler manage an activity on the
// tracker's stack. It does nothing but call methods on the passed |tracker|
// if it is not null, making it safe (and cheap) to create these objects
@@ -433,10 +531,7 @@ class BASE_EXPORT ThreadActivityTracker {
// Changes some basic metadata about the activity.
void ChangeTypeAndData(Activity::Type type, const ActivityData& data);
- // Returns an object for manipulating user data.
- ActivityUserData& user_data();
-
- private:
+ protected:
// The thread tracker to which this object reports. It can be null if
// activity tracking is not (yet) enabled.
ThreadActivityTracker* const tracker_;
@@ -444,9 +539,7 @@ class BASE_EXPORT ThreadActivityTracker {
// An identifier that indicates a specific activity on the stack.
ActivityId activity_id_;
- // An object that manages additional user data, created only upon request.
- std::unique_ptr<ActivityUserData> user_data_;
-
+ private:
DISALLOW_COPY_AND_ASSIGN(ScopedActivity);
};
@@ -490,8 +583,18 @@ class BASE_EXPORT ThreadActivityTracker {
// Indicates that an activity has completed.
void PopActivity(ActivityId id);
- // Returns an object capable of storing arbitrary user data.
- std::unique_ptr<ActivityUserData> GetUserData(ActivityId id);
+ // Sets the user-data information for an activity.
+ std::unique_ptr<ActivityUserData> GetUserData(
+ ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator);
+
+ // Returns if there is true use-data associated with a given ActivityId since
+ // it's possible than any returned object is just a sink.
+ bool HasUserData(ActivityId id);
+
+ // Release the user-data information for an activity.
+ void ReleaseUserData(ActivityId id,
+ ActivityTrackerMemoryAllocator* allocator);
// Returns whether the current data is valid or not. It is not valid if
// corruption has been detected in the header or other data structures.
@@ -501,7 +604,7 @@ class BASE_EXPORT ThreadActivityTracker {
// snapshot was not possible, perhaps because the data is not valid; the
// contents of |output_snapshot| are undefined in that case. The current
// implementation does not support concurrent snapshot operations.
- bool Snapshot(ActivitySnapshot* output_snapshot) const;
+ bool CreateSnapshot(Snapshot* output_snapshot) const;
// Calculates the memory size required for a given stack depth, including
// the internal header structure for the stack.
@@ -510,11 +613,6 @@ class BASE_EXPORT ThreadActivityTracker {
private:
friend class ActivityTrackerTest;
- // This structure contains all the common information about the thread so
- // it doesn't have to be repeated in every entry on the stack. It is defined
- // and used completely within the .cc file.
- struct Header;
-
Header* const header_; // Pointer to the Header structure.
Activity* const stack_; // The stack of activities.
const uint32_t stack_slots_; // The total number of stack slots.
@@ -539,9 +637,10 @@ class BASE_EXPORT GlobalActivityTracker {
// will be safely ignored. These are public so that an external process
// can recognize records of this type within an allocator.
enum : uint32_t {
- kTypeIdActivityTracker = 0x5D7381AF + 2, // SHA1(ActivityTracker) v2
- kTypeIdUserDataRecord = 0x615EDDD7 + 1, // SHA1(UserDataRecord) v1
- kTypeIdGlobalDataRecord = 0xAFE61ABE + 1, // SHA1(GlobalDataRecord) v1
+ kTypeIdActivityTracker = 0x5D7381AF + 3, // SHA1(ActivityTracker) v3
+ kTypeIdUserDataRecord = 0x615EDDD7 + 2, // SHA1(UserDataRecord) v2
+ kTypeIdGlobalLogMessage = 0x4CF434F9 + 1, // SHA1(GlobalLogMessage) v1
+ kTypeIdGlobalDataRecord = kTypeIdUserDataRecord + 1000,
kTypeIdActivityTrackerFree = ~kTypeIdActivityTracker,
kTypeIdUserDataRecordFree = ~kTypeIdUserDataRecord,
@@ -558,13 +657,11 @@ class BASE_EXPORT GlobalActivityTracker {
const void* origin,
Activity::Type type,
const ActivityData& data,
- bool lock_allowed)
- : ThreadActivityTracker::ScopedActivity(
- GetOrCreateTracker(lock_allowed),
- program_counter,
- origin,
- type,
- data) {}
+ bool lock_allowed);
+ ~ScopedThreadActivity();
+
+ // Returns an object for manipulating user data.
+ ActivityUserData& user_data();
private:
// Gets (or creates) a tracker for the current thread. If locking is not
@@ -582,6 +679,9 @@ class BASE_EXPORT GlobalActivityTracker {
return global_tracker->GetTrackerForCurrentThread();
}
+ // An object that manages additional user data, created only upon request.
+ std::unique_ptr<ActivityUserData> user_data_;
+
DISALLOW_COPY_AND_ASSIGN(ScopedThreadActivity);
};
@@ -645,18 +745,15 @@ class BASE_EXPORT GlobalActivityTracker {
// Releases the activity-tracker for the current thread (for testing only).
void ReleaseTrackerForCurrentThreadForTesting();
- // Gets a reference to memory for holding user-defined activity data. If
- // the reference is valid, it's memory will be returned. If not, then a
- // new reference will be created (and stored) and that memory returned.
- void* GetUserDataMemory(PersistentMemoryAllocator::Reference* reference);
-
- // Releases memory for user-defined activity data.
- void ReleaseUserDataMemory(PersistentMemoryAllocator::Reference* reference);
+ // Records a log message. The current implementation does NOT recycle these
+ // only store critical messages such as FATAL ones.
+ void RecordLogMessage(StringPiece message);
// Accesses the global data record for storing arbitrary key/value pairs.
ActivityUserData& user_data() { return user_data_; }
private:
+ friend class ScopedThreadActivity;
friend class ActivityTrackerTest;
enum : int {
diff --git a/chromium/base/debug/activity_tracker_unittest.cc b/chromium/base/debug/activity_tracker_unittest.cc
index 6e7630e0628..c46b81686b9 100644
--- a/chromium/base/debug/activity_tracker_unittest.cc
+++ b/chromium/base/debug/activity_tracker_unittest.cc
@@ -91,45 +91,48 @@ TEST_F(ActivityTrackerTest, UserDataTest) {
char buffer[256];
memset(buffer, 0, sizeof(buffer));
ActivityUserData data(buffer, sizeof(buffer));
- ASSERT_EQ(sizeof(buffer), data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8, data.available_);
data.SetInt("foo", 1);
- ASSERT_EQ(sizeof(buffer) - 24, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24, data.available_);
data.SetUint("b", 1U); // Small names fit beside header in a word.
- ASSERT_EQ(sizeof(buffer) - 24 - 16, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16, data.available_);
data.Set("c", buffer, 10);
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24, data.available_);
data.SetString("dear john", "it's been fun");
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
data.Set("c", buffer, 20);
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
data.SetString("dear john", "but we're done together");
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
data.SetString("dear john", "bye");
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32, data.available_);
data.SetChar("d", 'x');
- ASSERT_EQ(sizeof(buffer) - 24 - 16 - 24 - 32 - 16, data.available_);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32 - 8, data.available_);
+
+ data.SetBool("ee", true);
+ ASSERT_EQ(sizeof(buffer) - 8 - 24 - 16 - 24 - 32 - 8 - 16, data.available_);
}
TEST_F(ActivityTrackerTest, PushPopTest) {
std::unique_ptr<ThreadActivityTracker> tracker = CreateActivityTracker();
- ActivitySnapshot snapshot;
+ ThreadActivityTracker::Snapshot snapshot;
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
char origin1;
ActivityId id1 = tracker->PushActivity(&origin1, Activity::ACT_TASK,
ActivityData::ForTask(11));
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
EXPECT_NE(0, snapshot.activity_stack[0].time_internal);
@@ -142,7 +145,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
char lock2;
ActivityId id2 = tracker->PushActivity(&origin2, Activity::ACT_LOCK,
ActivityData::ForLock(&lock2));
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(2U, snapshot.activity_stack_depth);
ASSERT_EQ(2U, snapshot.activity_stack.size());
EXPECT_LE(snapshot.activity_stack[0].time_internal,
@@ -154,7 +157,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
snapshot.activity_stack[1].data.lock.lock_address);
tracker->PopActivity(id2);
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
@@ -163,7 +166,7 @@ TEST_F(ActivityTrackerTest, PushPopTest) {
EXPECT_EQ(11U, snapshot.activity_stack[0].data.task.sequence_id);
tracker->PopActivity(id1);
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
}
@@ -173,10 +176,10 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ThreadActivityTracker* tracker =
GlobalActivityTracker::Get()->GetOrCreateTrackerForCurrentThread();
- ActivitySnapshot snapshot;
+ ThreadActivityTracker::Snapshot snapshot;
ASSERT_EQ(0U, GetGlobalUserDataMemoryCacheUsed());
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
@@ -186,7 +189,7 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ActivityUserData& user_data1 = activity1.user_data();
(void)user_data1; // Tell compiler it's been used.
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
@@ -197,19 +200,19 @@ TEST_F(ActivityTrackerTest, ScopedTaskTest) {
ActivityUserData& user_data2 = activity2.user_data();
(void)user_data2; // Tell compiler it's been used.
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(2U, snapshot.activity_stack_depth);
ASSERT_EQ(2U, snapshot.activity_stack.size());
EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[1].activity_type);
}
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(1U, snapshot.activity_stack_depth);
ASSERT_EQ(1U, snapshot.activity_stack.size());
EXPECT_EQ(Activity::ACT_TASK, snapshot.activity_stack[0].activity_type);
}
- ASSERT_TRUE(tracker->Snapshot(&snapshot));
+ ASSERT_TRUE(tracker->CreateSnapshot(&snapshot));
ASSERT_EQ(0U, snapshot.activity_stack_depth);
ASSERT_EQ(0U, snapshot.activity_stack.size());
ASSERT_EQ(2U, GetGlobalUserDataMemoryCacheUsed());
diff --git a/chromium/base/debug/dump_without_crashing.cc b/chromium/base/debug/dump_without_crashing.cc
index 47fd873c19f..4b338ca2930 100644
--- a/chromium/base/debug/dump_without_crashing.cc
+++ b/chromium/base/debug/dump_without_crashing.cc
@@ -18,9 +18,12 @@ namespace base {
namespace debug {
-void DumpWithoutCrashing() {
- if (dump_without_crashing_function_)
+bool DumpWithoutCrashing() {
+ if (dump_without_crashing_function_) {
(*dump_without_crashing_function_)();
+ return true;
+ }
+ return false;
}
void SetDumpWithoutCrashingFunction(void (CDECL *function)()) {
diff --git a/chromium/base/debug/dump_without_crashing.h b/chromium/base/debug/dump_without_crashing.h
index b8ed17414ac..a5c85d5ebea 100644
--- a/chromium/base/debug/dump_without_crashing.h
+++ b/chromium/base/debug/dump_without_crashing.h
@@ -17,7 +17,8 @@ namespace debug {
// Before calling this function, call SetDumpWithoutCrashingFunction to pass a
// function pointer, typically chrome!DumpProcessWithoutCrash. See example code
// in chrome_main.cc that does this for chrome.dll.
-BASE_EXPORT void DumpWithoutCrashing();
+// Returns false if called before SetDumpWithoutCrashingFunction.
+BASE_EXPORT bool DumpWithoutCrashing();
// Sets a function that'll be invoked to dump the current process when
// DumpWithoutCrashing() is called.
diff --git a/chromium/base/debug/leak_tracker_unittest.cc b/chromium/base/debug/leak_tracker_unittest.cc
index 8b4c5681e0d..b9ecdcf3c97 100644
--- a/chromium/base/debug/leak_tracker_unittest.cc
+++ b/chromium/base/debug/leak_tracker_unittest.cc
@@ -30,7 +30,7 @@ TEST(LeakTrackerTest, NotEnabled) {
EXPECT_EQ(-1, LeakTracker<ClassA>::NumLiveInstances());
EXPECT_EQ(-1, LeakTracker<ClassB>::NumLiveInstances());
- // Use scoped_ptr so compiler doesn't complain about unused variables.
+ // Use unique_ptr so compiler doesn't complain about unused variables.
std::unique_ptr<ClassA> a1(new ClassA);
std::unique_ptr<ClassB> b1(new ClassB);
std::unique_ptr<ClassB> b2(new ClassB);
diff --git a/chromium/base/debug/proc_maps_linux_unittest.cc b/chromium/base/debug/proc_maps_linux_unittest.cc
index 2e35ca64c01..9b5bcaca0eb 100644
--- a/chromium/base/debug/proc_maps_linux_unittest.cc
+++ b/chromium/base/debug/proc_maps_linux_unittest.cc
@@ -227,8 +227,12 @@ void CheckProcMapsRegions(const std::vector<MappedMemoryRegion> &regions) {
// ignore checking for the stack and address when running under Valgrind.
// See http://crbug.com/431702 for more details.
if (!RunningOnValgrind() && regions[i].path == "[stack]") {
+// On Android the test is run on a background thread, since [stack] is for
+// the main thread, we cannot test this.
+#if !defined(OS_ANDROID)
EXPECT_GE(address, regions[i].start);
EXPECT_LT(address, regions[i].end);
+#endif
EXPECT_TRUE(regions[i].permissions & MappedMemoryRegion::READ);
EXPECT_TRUE(regions[i].permissions & MappedMemoryRegion::WRITE);
EXPECT_FALSE(regions[i].permissions & MappedMemoryRegion::EXECUTE);
diff --git a/chromium/base/debug/stack_trace_posix.cc b/chromium/base/debug/stack_trace_posix.cc
index 27a656e6caf..db999b783f2 100644
--- a/chromium/base/debug/stack_trace_posix.cc
+++ b/chromium/base/debug/stack_trace_posix.cc
@@ -33,8 +33,11 @@
#include <AvailabilityMacros.h>
#endif
-#include "base/debug/debugger.h"
+#if defined(OS_LINUX)
#include "base/debug/proc_maps_linux.h"
+#endif
+
+#include "base/debug/debugger.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/free_deleter.h"
diff --git a/chromium/base/debug/thread_heap_usage_tracker.cc b/chromium/base/debug/thread_heap_usage_tracker.cc
index b9018e0c705..25af20b0aa7 100644
--- a/chromium/base/debug/thread_heap_usage_tracker.cc
+++ b/chromium/base/debug/thread_heap_usage_tracker.cc
@@ -6,6 +6,7 @@
#include <stdint.h>
#include <algorithm>
+#include <new>
#include <type_traits>
#include "base/allocator/allocator_shim.h"
@@ -144,7 +145,14 @@ ThreadHeapUsage* GetOrCreateThreadUsage() {
// Prevent reentrancy due to the allocation below.
g_thread_allocator_usage.Set(kInitializingSentinel);
- allocator_usage = new ThreadHeapUsage;
+ // Delegate the allocation of the per-thread structure to the underlying
+ // heap shim, for symmetry with the deallocation. Otherwise interposing
+ // shims may mis-attribute or mis-direct this allocation.
+ const AllocatorDispatch* next = allocator_dispatch.next;
+ allocator_usage = new (next->alloc_function(next, sizeof(ThreadHeapUsage)))
+ ThreadHeapUsage();
+ static_assert(std::is_pod<ThreadHeapUsage>::value,
+ "AllocatorDispatch must be POD");
memset(allocator_usage, 0, sizeof(*allocator_usage));
g_thread_allocator_usage.Set(allocator_usage);
}
@@ -254,7 +262,11 @@ ThreadHeapUsageTracker::GetDispatchForTesting() {
void ThreadHeapUsageTracker::EnsureTLSInitialized() {
if (!g_thread_allocator_usage.initialized()) {
g_thread_allocator_usage.Initialize([](void* allocator_usage) {
- delete static_cast<ThreadHeapUsage*>(allocator_usage);
+ // Delegate the freeing of the per-thread structure to the next-lower
+ // heap shim. Otherwise this free will re-initialize the TLS on thread
+ // exit.
+ allocator_dispatch.next->free_function(allocator_dispatch.next,
+ allocator_usage);
});
}
}
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index 89b105defc6..234d5be190e 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
+#include "base/pickle.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@@ -27,6 +28,42 @@ FeatureList* g_instance = nullptr;
// Tracks whether the FeatureList instance was initialized via an accessor.
bool g_initialized_from_accessor = false;
+// An allocator entry for a feature in shared memory. The FeatureEntry is
+// followed by a base::Pickle object that contains the feature and trial name.
+struct FeatureEntry {
+ // SHA1(FeatureEntry): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x06567CA6 + 1;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 8;
+
+ // Specifies whether a feature override enables or disables the feature. Same
+ // values as the OverrideState enum in feature_list.h
+ uint32_t override_state;
+
+ // Size of the pickled structure, NOT the total size of this entry.
+ uint32_t pickle_size;
+
+ // Reads the feature and trial name from the pickle. Calling this is only
+ // valid on an initialized entry that's in shared memory.
+ bool GetFeatureAndTrialName(StringPiece* feature_name,
+ StringPiece* trial_name) const {
+ const char* src =
+ reinterpret_cast<const char*>(this) + sizeof(FeatureEntry);
+
+ Pickle pickle(src, pickle_size);
+ PickleIterator pickle_iter(pickle);
+
+ if (!pickle_iter.ReadStringPiece(feature_name))
+ return false;
+
+ // Return true because we are not guaranteed to have a trial name anyways.
+ auto sink = pickle_iter.ReadStringPiece(trial_name);
+ ALLOW_UNUSED_LOCAL(sink);
+ return true;
+ }
+};
+
// Some characters are not allowed to appear in feature names or the associated
// field trial names, as they are used as special characters for command-line
// serialization. This function checks that the strings are ASCII (since they
@@ -56,6 +93,26 @@ void FeatureList::InitializeFromCommandLine(
initialized_from_command_line_ = true;
}
+void FeatureList::InitializeFromSharedMemory(
+ PersistentMemoryAllocator* allocator) {
+ DCHECK(!initialized_);
+
+ PersistentMemoryAllocator::Iterator iter(allocator);
+ const FeatureEntry* entry;
+ while ((entry = iter.GetNextOfObject<FeatureEntry>()) != nullptr) {
+ OverrideState override_state =
+ static_cast<OverrideState>(entry->override_state);
+
+ StringPiece feature_name;
+ StringPiece trial_name;
+ if (!entry->GetFeatureAndTrialName(&feature_name, &trial_name))
+ continue;
+
+ FieldTrial* trial = FieldTrialList::Find(trial_name.as_string());
+ RegisterOverride(feature_name, override_state, trial);
+ }
+}
+
bool FeatureList::IsFeatureOverriddenFromCommandLine(
const std::string& feature_name,
OverrideState state) const {
@@ -98,6 +155,30 @@ void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
RegisterOverride(feature_name, override_state, field_trial);
}
+void FeatureList::AddFeaturesToAllocator(PersistentMemoryAllocator* allocator) {
+ DCHECK(initialized_);
+
+ for (const auto& override : overrides_) {
+ Pickle pickle;
+ pickle.WriteString(override.first);
+ if (override.second.field_trial)
+ pickle.WriteString(override.second.field_trial->trial_name());
+
+ size_t total_size = sizeof(FeatureEntry) + pickle.size();
+ FeatureEntry* entry = allocator->AllocateObject<FeatureEntry>(total_size);
+ if (!entry)
+ return;
+
+ entry->override_state = override.second.overridden_state;
+ entry->pickle_size = pickle.size();
+
+ char* dst = reinterpret_cast<char*>(entry) + sizeof(FeatureEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ allocator->MakeIterable(entry);
+ }
+}
+
void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
std::string* disable_overrides) {
DCHECK(initialized_);
diff --git a/chromium/base/feature_list.h b/chromium/base/feature_list.h
index 80209ba6588..09e8408aa84 100644
--- a/chromium/base/feature_list.h
+++ b/chromium/base/feature_list.h
@@ -13,6 +13,7 @@
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
@@ -31,6 +32,8 @@ enum FeatureState {
// for a given feature name - generally defined as a constant global variable or
// file static.
struct BASE_EXPORT Feature {
+ constexpr Feature(const char* name, FeatureState default_state)
+ : name(name), default_state(default_state) {}
// The name of the feature. This should be unique to each feature and is used
// for enabling/disabling features via command line flags and experiments.
const char* const name;
@@ -92,6 +95,11 @@ class BASE_EXPORT FeatureList {
void InitializeFromCommandLine(const std::string& enable_features,
const std::string& disable_features);
+ // Initializes feature overrides through the field trial allocator, which
+ // we're using to store the feature names, their override state, and the name
+ // of the associated field trial.
+ void InitializeFromSharedMemory(PersistentMemoryAllocator* allocator);
+
// Specifies whether a feature override enables or disables the feature.
enum OverrideState {
OVERRIDE_USE_DEFAULT,
@@ -124,6 +132,9 @@ class BASE_EXPORT FeatureList {
OverrideState override_state,
FieldTrial* field_trial);
+ // Loops through feature overrides and serializes them all into |allocator|.
+ void AddFeaturesToAllocator(PersistentMemoryAllocator* allocator);
+
// Returns comma-separated lists of feature names (in the same format that is
// accepted by InitializeFromCommandLine()) corresponding to features that
// have been overridden - either through command-line or via FieldTrials. For
@@ -180,6 +191,10 @@ class BASE_EXPORT FeatureList {
private:
FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+ StoreAndRetrieveFeaturesFromSharedMemory);
+ FRIEND_TEST_ALL_PREFIXES(FeatureListTest,
+ StoreAndRetrieveAssociatedFeaturesFromSharedMemory);
struct OverrideEntry {
// The overridden enable (on/off) state of the feature.
diff --git a/chromium/base/feature_list_unittest.cc b/chromium/base/feature_list_unittest.cc
index 9d1dcb72f30..189e9740176 100644
--- a/chromium/base/feature_list_unittest.cc
+++ b/chromium/base/feature_list_unittest.cc
@@ -13,6 +13,7 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/field_trial.h"
+#include "base/metrics/persistent_memory_allocator.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -468,4 +469,68 @@ TEST_F(FeatureListTest, UninitializedInstance_IsEnabledReturnsFalse) {
EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
}
+TEST_F(FeatureListTest, StoreAndRetrieveFeaturesFromSharedMemory) {
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+ // Create some overrides.
+ feature_list->RegisterOverride(kFeatureOffByDefaultName,
+ FeatureList::OVERRIDE_ENABLE_FEATURE, nullptr);
+ feature_list->RegisterOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, nullptr);
+ feature_list->FinalizeInitialization();
+
+ // Create an allocator and store the overrides.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ shm->CreateAndMapAnonymous(4 << 10);
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+ feature_list->AddFeaturesToAllocator(&allocator);
+
+ std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+
+ // Check that the new feature list is empty.
+ EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_FALSE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+
+ feature_list2->InitializeFromSharedMemory(&allocator);
+ // Check that the new feature list now has 2 overrides.
+ EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+ EXPECT_TRUE(feature_list2->IsFeatureOverriddenFromCommandLine(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+}
+
+TEST_F(FeatureListTest, StoreAndRetrieveAssociatedFeaturesFromSharedMemory) {
+ FieldTrialList field_trial_list(nullptr);
+ std::unique_ptr<base::FeatureList> feature_list(new base::FeatureList);
+
+ // Create some overrides.
+ FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+ FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOnByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial1);
+ feature_list->RegisterFieldTrialOverride(
+ kFeatureOffByDefaultName, FeatureList::OVERRIDE_USE_DEFAULT, trial2);
+ feature_list->FinalizeInitialization();
+
+ // Create an allocator and store the overrides.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory());
+ shm->CreateAndMapAnonymous(4 << 10);
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+ feature_list->AddFeaturesToAllocator(&allocator);
+
+ std::unique_ptr<base::FeatureList> feature_list2(new base::FeatureList);
+ feature_list2->InitializeFromSharedMemory(&allocator);
+ feature_list2->FinalizeInitialization();
+
+ // Check that the field trials are still associated.
+ FieldTrial* associated_trial1 =
+ feature_list2->GetAssociatedFieldTrial(kFeatureOnByDefault);
+ FieldTrial* associated_trial2 =
+ feature_list2->GetAssociatedFieldTrial(kFeatureOffByDefault);
+ EXPECT_EQ(associated_trial1, trial1);
+ EXPECT_EQ(associated_trial2, trial2);
+}
+
} // namespace base
diff --git a/chromium/base/files/file.h b/chromium/base/files/file.h
index 885e00b9577..0155c7c259e 100644
--- a/chromium/base/files/file.h
+++ b/chromium/base/files/file.h
@@ -63,28 +63,31 @@ class BASE_EXPORT File {
// FLAG_EXCLUSIVE_(READ|WRITE) only grant exclusive access to the file on
// creation on POSIX; for existing files, consider using Lock().
enum Flags {
- FLAG_OPEN = 1 << 0, // Opens a file, only if it exists.
- FLAG_CREATE = 1 << 1, // Creates a new file, only if it does not
- // already exist.
- FLAG_OPEN_ALWAYS = 1 << 2, // May create a new file.
- FLAG_CREATE_ALWAYS = 1 << 3, // May overwrite an old file.
- FLAG_OPEN_TRUNCATED = 1 << 4, // Opens a file and truncates it, only if it
- // exists.
+ FLAG_OPEN = 1 << 0, // Opens a file, only if it exists.
+ FLAG_CREATE = 1 << 1, // Creates a new file, only if it does not
+ // already exist.
+ FLAG_OPEN_ALWAYS = 1 << 2, // May create a new file.
+ FLAG_CREATE_ALWAYS = 1 << 3, // May overwrite an old file.
+ FLAG_OPEN_TRUNCATED = 1 << 4, // Opens a file and truncates it, only if it
+ // exists.
FLAG_READ = 1 << 5,
FLAG_WRITE = 1 << 6,
FLAG_APPEND = 1 << 7,
- FLAG_EXCLUSIVE_READ = 1 << 8, // EXCLUSIVE is opposite of Windows SHARE.
+ FLAG_EXCLUSIVE_READ = 1 << 8, // EXCLUSIVE is opposite of Windows SHARE.
FLAG_EXCLUSIVE_WRITE = 1 << 9,
FLAG_ASYNC = 1 << 10,
- FLAG_TEMPORARY = 1 << 11, // Used on Windows only.
- FLAG_HIDDEN = 1 << 12, // Used on Windows only.
+ FLAG_TEMPORARY = 1 << 11, // Used on Windows only.
+ FLAG_HIDDEN = 1 << 12, // Used on Windows only.
FLAG_DELETE_ON_CLOSE = 1 << 13,
- FLAG_WRITE_ATTRIBUTES = 1 << 14, // Used on Windows only.
- FLAG_SHARE_DELETE = 1 << 15, // Used on Windows only.
- FLAG_TERMINAL_DEVICE = 1 << 16, // Serial port flags.
- FLAG_BACKUP_SEMANTICS = 1 << 17, // Used on Windows only.
- FLAG_EXECUTE = 1 << 18, // Used on Windows only.
- FLAG_SEQUENTIAL_SCAN = 1 << 19, // Used on Windows only.
+ FLAG_WRITE_ATTRIBUTES = 1 << 14, // Used on Windows only.
+ FLAG_SHARE_DELETE = 1 << 15, // Used on Windows only.
+ FLAG_TERMINAL_DEVICE = 1 << 16, // Serial port flags.
+ FLAG_BACKUP_SEMANTICS = 1 << 17, // Used on Windows only.
+ FLAG_EXECUTE = 1 << 18, // Used on Windows only.
+ FLAG_SEQUENTIAL_SCAN = 1 << 19, // Used on Windows only.
+ FLAG_CAN_DELETE_ON_CLOSE = 1 << 20, // Requests permission to delete a file
+ // via DeleteOnClose() (Windows only).
+ // See DeleteOnClose() for details.
};
// This enum has been recorded in multiple histograms. If the order of the
@@ -305,6 +308,36 @@ class BASE_EXPORT File {
bool async() const { return async_; }
#if defined(OS_WIN)
+ // Sets or clears the DeleteFile disposition on the handle. Returns true if
+ // the disposition was set or cleared, as indicated by |delete_on_close|.
+ //
+ // Microsoft Windows deletes a file only when the last handle to the
+ // underlying kernel object is closed when the DeleteFile disposition has been
+ // set by any handle holder. This disposition is be set by:
+ // - Calling the Win32 DeleteFile function with the path to a file.
+ // - Opening/creating a file with FLAG_DELETE_ON_CLOSE.
+ // - Opening/creating a file with FLAG_CAN_DELETE_ON_CLOSE and subsequently
+ // calling DeleteOnClose(true).
+ //
+ // In all cases, all pre-existing handles to the file must have been opened
+ // with FLAG_SHARE_DELETE.
+ //
+ // So:
+ // - Use FLAG_SHARE_DELETE when creating/opening a file to allow another
+ // entity on the system to cause it to be deleted when it is closed. (Note:
+ // another entity can delete the file the moment after it is closed, so not
+ // using this permission doesn't provide any protections.)
+ // - Use FLAG_DELETE_ON_CLOSE for any file that is to be deleted after use.
+ // The OS will ensure it is deleted even in the face of process termination.
+ // - Use FLAG_CAN_DELETE_ON_CLOSE in conjunction with DeleteOnClose() to alter
+ // the DeleteFile disposition on an open handle. This fine-grained control
+ // allows for marking a file for deletion during processing so that it is
+ // deleted in the event of untimely process termination, and then clearing
+ // this state once the file is suitable for persistence.
+ bool DeleteOnClose(bool delete_on_close);
+#endif
+
+#if defined(OS_WIN)
static Error OSErrorToFileError(DWORD last_error);
#elif defined(OS_POSIX)
static Error OSErrorToFileError(int saved_errno);
diff --git a/chromium/base/files/file_locking_unittest.cc b/chromium/base/files/file_locking_unittest.cc
index beb93998c4c..b709b7536c4 100644
--- a/chromium/base/files/file_locking_unittest.cc
+++ b/chromium/base/files/file_locking_unittest.cc
@@ -165,8 +165,8 @@ class FileLockingTest : public testing::Test {
void ExitChildCleanly() {
ASSERT_TRUE(SignalEvent(kSignalExit));
int rv = -1;
- ASSERT_TRUE(lock_child_.WaitForExitWithTimeout(
- TestTimeouts::action_timeout(), &rv));
+ ASSERT_TRUE(WaitForMultiprocessTestChildExit(
+ lock_child_, TestTimeouts::action_timeout(), &rv));
ASSERT_EQ(0, rv);
}
@@ -220,7 +220,7 @@ TEST_F(FileLockingTest, UnlockOnTerminate) {
StartChildAndSignalLock(kExitUnlock);
ASSERT_NE(File::FILE_OK, lock_file_.Lock());
- ASSERT_TRUE(lock_child_.Terminate(0, true));
+ ASSERT_TRUE(TerminateMultiProcessTestChild(lock_child_, 0, true));
ASSERT_EQ(File::FILE_OK, lock_file_.Lock());
ASSERT_EQ(File::FILE_OK, lock_file_.Unlock());
}
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index cff862ae19d..21a44c6dd53 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -174,6 +174,7 @@ FilePath::FilePath() {
FilePath::FilePath(const FilePath& that) : path_(that.path_) {
}
+FilePath::FilePath(FilePath&& that) = default;
FilePath::FilePath(StringPieceType path) {
path.CopyToString(&path_);
@@ -190,6 +191,8 @@ FilePath& FilePath::operator=(const FilePath& that) {
return *this;
}
+FilePath& FilePath::operator=(FilePath&& that) = default;
+
bool FilePath::operator==(const FilePath& that) const {
#if defined(FILE_PATH_USES_DRIVE_LETTERS)
return EqualDriveLetterCaseInsensitive(this->path_, that.path_);
diff --git a/chromium/base/files/file_path.h b/chromium/base/files/file_path.h
index 3234df7bfb6..02846f68921 100644
--- a/chromium/base/files/file_path.h
+++ b/chromium/base/files/file_path.h
@@ -182,6 +182,13 @@ class BASE_EXPORT FilePath {
~FilePath();
FilePath& operator=(const FilePath& that);
+ // Constructs FilePath with the contents of |that|, which is left in valid but
+ // unspecified state.
+ FilePath(FilePath&& that);
+ // Replaces the contents with those of |that|, which is left in valid but
+ // unspecified state.
+ FilePath& operator=(FilePath&& that);
+
bool operator==(const FilePath& that) const;
bool operator!=(const FilePath& that) const;
diff --git a/chromium/base/files/file_path_watcher.h b/chromium/base/files/file_path_watcher.h
index 267c03ae00b..9e29d0a9d53 100644
--- a/chromium/base/files/file_path_watcher.h
+++ b/chromium/base/files/file_path_watcher.h
@@ -7,13 +7,15 @@
#ifndef BASE_FILES_FILE_PATH_WATCHER_H_
#define BASE_FILES_FILE_PATH_WATCHER_H_
+#include <memory>
+
#include "base/base_export.h"
#include "base/callback.h"
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
namespace base {
@@ -36,9 +38,10 @@ class BASE_EXPORT FilePathWatcher {
typedef base::Callback<void(const FilePath& path, bool error)> Callback;
// Used internally to encapsulate different members on different platforms.
- class PlatformDelegate : public base::RefCountedThreadSafe<PlatformDelegate> {
+ class PlatformDelegate {
public:
PlatformDelegate();
+ virtual ~PlatformDelegate();
// Start watching for the given |path| and notify |delegate| about changes.
virtual bool Watch(const FilePath& path,
@@ -47,20 +50,16 @@ class BASE_EXPORT FilePathWatcher {
// Stop watching. This is called from FilePathWatcher's dtor in order to
// allow to shut down properly while the object is still alive.
- // It can be called from any thread.
virtual void Cancel() = 0;
protected:
- friend class base::RefCountedThreadSafe<PlatformDelegate>;
friend class FilePathWatcher;
- virtual ~PlatformDelegate();
-
- scoped_refptr<base::SingleThreadTaskRunner> task_runner() const {
+ scoped_refptr<SequencedTaskRunner> task_runner() const {
return task_runner_;
}
- void set_task_runner(scoped_refptr<base::SingleThreadTaskRunner> runner) {
+ void set_task_runner(scoped_refptr<SequencedTaskRunner> runner) {
task_runner_ = std::move(runner);
}
@@ -74,32 +73,32 @@ class BASE_EXPORT FilePathWatcher {
}
private:
- scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ scoped_refptr<SequencedTaskRunner> task_runner_;
bool cancelled_;
+
+ DISALLOW_COPY_AND_ASSIGN(PlatformDelegate);
};
FilePathWatcher();
~FilePathWatcher();
- // A callback that always cleans up the PlatformDelegate, either when executed
- // or when deleted without having been executed at all, as can happen during
- // shutdown.
- static void CancelWatch(const scoped_refptr<PlatformDelegate>& delegate);
-
// Returns true if the platform and OS version support recursive watches.
static bool RecursiveWatchAvailable();
// Invokes |callback| whenever updates to |path| are detected. This should be
- // called at most once, and from a MessageLoop of TYPE_IO. Set |recursive| to
- // true, to watch |path| and its children. The callback will be invoked on
- // the same loop. Returns true on success.
+ // called at most once. Set |recursive| to true to watch |path| and its
+ // children. The callback will be invoked on the same sequence. Returns true
+ // on success.
+ //
+ // On POSIX, this must be called from a thread that supports
+ // FileDescriptorWatcher.
//
// Recursive watch is not supported on all platforms and file systems.
// Watch() will return false in the case of failure.
bool Watch(const FilePath& path, bool recursive, const Callback& callback);
private:
- scoped_refptr<PlatformDelegate> impl_;
+ std::unique_ptr<PlatformDelegate> impl_;
SequenceChecker sequence_checker_;
diff --git a/chromium/base/files/file_path_watcher_fsevents.cc b/chromium/base/files/file_path_watcher_fsevents.cc
index e9d25080e7d..e9a87b0e052 100644
--- a/chromium/base/files/file_path_watcher_fsevents.cc
+++ b/chromium/base/files/file_path_watcher_fsevents.cc
@@ -13,10 +13,8 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/mac/scoped_cftyperef.h"
-#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
@@ -70,16 +68,21 @@ FilePath ResolvePath(const FilePath& path) {
FilePathWatcherFSEvents::FilePathWatcherFSEvents()
: queue_(dispatch_queue_create(
- base::StringPrintf(
- "org.chromium.base.FilePathWatcher.%p", this).c_str(),
+ base::StringPrintf("org.chromium.base.FilePathWatcher.%p", this)
+ .c_str(),
DISPATCH_QUEUE_SERIAL)),
- fsevent_stream_(nullptr) {
+ fsevent_stream_(nullptr),
+ weak_factory_(this) {}
+
+FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(callback_.is_null())
+ << "Cancel() must be called before FilePathWatcher is destroyed.";
}
bool FilePathWatcherFSEvents::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
- DCHECK(MessageLoopForIO::current());
DCHECK(!callback.is_null());
DCHECK(callback_.is_null());
@@ -88,7 +91,7 @@ bool FilePathWatcherFSEvents::Watch(const FilePath& path,
if (!recursive)
return false;
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
callback_ = callback;
FSEventStreamEventId start_event = FSEventsGetCurrentEventId();
@@ -107,11 +110,15 @@ void FilePathWatcherFSEvents::Cancel() {
set_cancelled();
callback_.Reset();
- // Switch to the dispatch queue to tear down the event stream. As the queue
- // is owned by this object, and this method is called from the destructor,
- // execute the block synchronously.
+ // Switch to the dispatch queue to tear down the event stream. As the queue is
+ // owned by |this|, and this method is called from the destructor, execute the
+ // block synchronously.
dispatch_sync(queue_, ^{
- CancelOnMessageLoopThread();
+ if (fsevent_stream_) {
+ DestroyEventStream();
+ target_.clear();
+ resolved_target_.clear();
+ }
});
}
@@ -142,31 +149,40 @@ void FilePathWatcherFSEvents::FSEventsCallback(
// the directory to be watched gets created.
if (root_changed) {
// Resetting the event stream from within the callback fails (FSEvents spews
- // bad file descriptor errors), so post a task to do the reset.
- dispatch_async(watcher->queue_, ^{
- watcher->UpdateEventStream(root_change_at);
- });
+ // bad file descriptor errors), so do the reset asynchronously.
+ //
+ // We can't dispatch_async a call to UpdateEventStream() directly because
+ // there would be no guarantee that |watcher| still exists when it runs.
+ //
+ // Instead, bounce on task_runner() and use a WeakPtr to verify that
+ // |watcher| still exists. If it does, dispatch_async a call to
+ // UpdateEventStream(). Because the destructor of |watcher| runs on
+ // task_runner() and calls dispatch_sync, it is guaranteed that |watcher|
+ // still exists when UpdateEventStream() runs.
+ watcher->task_runner()->PostTask(
+ FROM_HERE, Bind(
+ [](WeakPtr<FilePathWatcherFSEvents> weak_watcher,
+ FSEventStreamEventId root_change_at) {
+ if (!weak_watcher)
+ return;
+ FilePathWatcherFSEvents* watcher = weak_watcher.get();
+ dispatch_async(watcher->queue_, ^{
+ watcher->UpdateEventStream(root_change_at);
+ });
+ },
+ watcher->weak_factory_.GetWeakPtr(), root_change_at));
}
watcher->OnFilePathsChanged(paths);
}
-FilePathWatcherFSEvents::~FilePathWatcherFSEvents() {
- // This method may be called on either the libdispatch or task_runner()
- // thread. Checking callback_ on the libdispatch thread here is safe because
- // it is executing in a task posted by Cancel() which first reset callback_.
- // PostTask forms a sufficient memory barrier to ensure that the value is
- // consistent on the target thread.
- DCHECK(callback_.is_null())
- << "Cancel() must be called before FilePathWatcher is destroyed.";
-}
-
void FilePathWatcherFSEvents::OnFilePathsChanged(
const std::vector<FilePath>& paths) {
DCHECK(!resolved_target_.empty());
task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths,
- target_, resolved_target_));
+ FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::DispatchEvents, weak_factory_.GetWeakPtr(),
+ paths, target_, resolved_target_));
}
void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
@@ -187,18 +203,6 @@ void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths,
}
}
-void FilePathWatcherFSEvents::CancelOnMessageLoopThread() {
- // For all other implementations, the "message loop thread" is the IO thread,
- // as returned by task_runner(). This implementation, however, needs to
- // cancel pending work on the Dispatch Queue thread.
-
- if (fsevent_stream_) {
- DestroyEventStream();
- target_.clear();
- resolved_target_.clear();
- }
-}
-
void FilePathWatcherFSEvents::UpdateEventStream(
FSEventStreamEventId start_event) {
// It can happen that the watcher gets canceled while tasks that call this
@@ -234,8 +238,9 @@ void FilePathWatcherFSEvents::UpdateEventStream(
FSEventStreamSetDispatchQueue(fsevent_stream_, queue_);
if (!FSEventStreamStart(fsevent_stream_)) {
- task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::ReportError,
+ weak_factory_.GetWeakPtr(), target_));
}
}
@@ -244,8 +249,9 @@ bool FilePathWatcherFSEvents::ResolveTargetPath() {
bool changed = resolved != resolved_target_;
resolved_target_ = resolved;
if (resolved_target_.empty()) {
- task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_));
+ task_runner()->PostTask(FROM_HERE,
+ Bind(&FilePathWatcherFSEvents::ReportError,
+ weak_factory_.GetWeakPtr(), target_));
}
return changed;
}
diff --git a/chromium/base/files/file_path_watcher_fsevents.h b/chromium/base/files/file_path_watcher_fsevents.h
index fbcca1f8101..dcdf2fbf9d1 100644
--- a/chromium/base/files/file_path_watcher_fsevents.h
+++ b/chromium/base/files/file_path_watcher_fsevents.h
@@ -14,6 +14,7 @@
#include "base/files/file_path_watcher.h"
#include "base/mac/scoped_dispatch_object.h"
#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
namespace base {
@@ -26,6 +27,7 @@ namespace base {
class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherFSEvents();
+ ~FilePathWatcherFSEvents() override;
// FilePathWatcher::PlatformDelegate overrides.
bool Watch(const FilePath& path,
@@ -41,8 +43,6 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
const FSEventStreamEventFlags flags[],
const FSEventStreamEventId event_ids[]);
- ~FilePathWatcherFSEvents() override;
-
// Called from FSEventsCallback whenever there is a change to the paths.
void OnFilePathsChanged(const std::vector<FilePath>& paths);
@@ -53,9 +53,6 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
const FilePath& target,
const FilePath& resolved_target);
- // Cleans up and stops the event stream.
- void CancelOnMessageLoopThread();
-
// (Re-)Initialize the event stream to start reporting events from
// |start_event|.
void UpdateEventStream(FSEventStreamEventId start_event);
@@ -92,6 +89,8 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate {
// (Only accessed from the libdispatch queue.)
FSEventStreamRef fsevent_stream_;
+ WeakPtrFactory<FilePathWatcherFSEvents> weak_factory_;
+
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents);
};
diff --git a/chromium/base/files/file_path_watcher_kqueue.cc b/chromium/base/files/file_path_watcher_kqueue.cc
index 8a7b5c54fd7..a28726acb0c 100644
--- a/chromium/base/files/file_path_watcher_kqueue.cc
+++ b/chromium/base/files/file_path_watcher_kqueue.cc
@@ -12,7 +12,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/strings/stringprintf.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
// On some platforms these are not defined.
#if !defined(EV_RECEIPT)
@@ -26,7 +26,9 @@ namespace base {
FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {}
-FilePathWatcherKQueue::~FilePathWatcherKQueue() {}
+FilePathWatcherKQueue::~FilePathWatcherKQueue() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+}
void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) {
CloseFileDescriptor(&event.ident);
@@ -229,10 +231,6 @@ bool FilePathWatcherKQueue::UpdateWatches(bool* target_file_affected) {
return true;
}
-void FilePathWatcherKQueue::WillDestroyCurrentMessageLoop() {
- CancelOnMessageLoopThread();
-}
-
bool FilePathWatcherKQueue::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
@@ -245,8 +243,7 @@ bool FilePathWatcherKQueue::Watch(const FilePath& path,
callback_ = callback;
target_ = path;
- MessageLoop::current()->AddDestructionObserver(this);
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
kqueue_ = kqueue();
if (kqueue_ == -1) {
@@ -270,11 +267,13 @@ bool FilePathWatcherKQueue::Watch(const FilePath& path,
return false;
}
- // This creates an ownership cycle (|this| owns |kqueue_watch_controller_|
- // which owns a callback which owns |this|). The cycle is broken when
- // |kqueue_watch_controller_| is reset in CancelOnMessageLoopThread().
+ // It's safe to use Unretained() because the watch is cancelled and the
+ // callback cannot be invoked after |kqueue_watch_controller_| (which is a
+ // member of |this|) has been deleted.
kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable(
- kqueue_, Bind(&FilePathWatcherKQueue::OnKQueueReadable, this));
+ kqueue_,
+ Bind(&FilePathWatcherKQueue::OnKQueueReadable, Unretained(this)));
+
return true;
}
@@ -283,16 +282,23 @@ void FilePathWatcherKQueue::Cancel() {
set_cancelled();
return;
}
- if (!task_runner()->BelongsToCurrentThread()) {
- task_runner()->PostTask(FROM_HERE,
- base::Bind(&FilePathWatcherKQueue::Cancel, this));
- return;
+
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ if (!is_cancelled()) {
+ set_cancelled();
+ kqueue_watch_controller_.reset();
+ if (IGNORE_EINTR(close(kqueue_)) != 0) {
+ DPLOG(ERROR) << "close kqueue";
+ }
+ kqueue_ = -1;
+ std::for_each(events_.begin(), events_.end(), ReleaseEvent);
+ events_.clear();
+ callback_.Reset();
}
- CancelOnMessageLoopThread();
}
void FilePathWatcherKQueue::OnKQueueReadable() {
- DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
DCHECK(events_.size());
// Request the file system update notifications that have occurred and return
@@ -363,20 +369,4 @@ void FilePathWatcherKQueue::OnKQueueReadable() {
}
}
-void FilePathWatcherKQueue::CancelOnMessageLoopThread() {
- DCHECK(!task_runner() || task_runner()->BelongsToCurrentThread());
- if (!is_cancelled()) {
- set_cancelled();
- kqueue_watch_controller_.reset();
- if (IGNORE_EINTR(close(kqueue_)) != 0) {
- DPLOG(ERROR) << "close kqueue";
- }
- kqueue_ = -1;
- std::for_each(events_.begin(), events_.end(), ReleaseEvent);
- events_.clear();
- MessageLoop::current()->RemoveDestructionObserver(this);
- callback_.Reset();
- }
-}
-
} // namespace base
diff --git a/chromium/base/files/file_path_watcher_kqueue.h b/chromium/base/files/file_path_watcher_kqueue.h
index 53205dd7ea5..ef79be5596b 100644
--- a/chromium/base/files/file_path_watcher_kqueue.h
+++ b/chromium/base/files/file_path_watcher_kqueue.h
@@ -14,8 +14,6 @@
#include "base/files/file_path.h"
#include "base/files/file_path_watcher.h"
#include "base/macros.h"
-#include "base/message_loop/message_loop.h"
-#include "base/single_thread_task_runner.h"
namespace base {
@@ -30,13 +28,10 @@ namespace base {
// detect the creation and deletion of files, just not the modification of
// files. It does however detect the attribute changes that the FSEvents impl
// would miss.
-class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
- public MessageLoop::DestructionObserver {
+class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherKQueue();
-
- // MessageLoop::DestructionObserver overrides.
- void WillDestroyCurrentMessageLoop() override;
+ ~FilePathWatcherKQueue() override;
// FilePathWatcher::PlatformDelegate overrides.
bool Watch(const FilePath& path,
@@ -44,9 +39,6 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
const FilePathWatcher::Callback& callback) override;
void Cancel() override;
- protected:
- ~FilePathWatcherKQueue() override;
-
private:
class EventData {
public:
@@ -61,9 +53,6 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate,
// Called when data is available in |kqueue_|.
void OnKQueueReadable();
- // Can only be called on |io_task_runner_|'s thread.
- void CancelOnMessageLoopThread();
-
// Returns true if the kevent values are error free.
bool AreKeventValuesValid(struct kevent* kevents, int count);
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 3b0ff6236ec..9589e9b788d 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -28,12 +28,14 @@
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/weak_ptr.h"
#include "base/posix/eintr_wrapper.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/synchronization/lock.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
namespace base {
@@ -91,6 +93,7 @@ class InotifyReader {
class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
public:
FilePathWatcherImpl();
+ ~FilePathWatcherImpl() override;
// Called for each event coming from the watch. |fired_watch| identifies the
// watch that fired, |child| indicates what has changed, and is relative to
@@ -105,13 +108,13 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
bool deleted,
bool is_dir);
- protected:
- ~FilePathWatcherImpl() override {
- in_destructor_ = true;
- CancelOnMessageLoopThreadOrInDestructor();
- }
-
private:
+ void OnFilePathChangedOnOriginSequence(InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir);
+
// Start watching |path| for changes and notify |delegate| on each change.
// Returns true if watch for |path| has been added successfully.
bool Watch(const FilePath& path,
@@ -120,7 +123,6 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
// Cancel the watch. This unregisters the instance with InotifyReader.
void Cancel() override;
- void CancelOnMessageLoopThreadOrInDestructor();
// Inotify watches are installed for all directory components of |target_|.
// A WatchEntry instance holds:
@@ -185,7 +187,7 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
hash_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_;
std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_;
- bool in_destructor_ = false;
+ WeakPtrFactory<FilePathWatcherImpl> weak_factory_;
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
@@ -312,7 +314,10 @@ void InotifyReader::OnInotifyEvent(const inotify_event* event) {
}
FilePathWatcherImpl::FilePathWatcherImpl()
- : recursive_(false) {
+ : recursive_(false), weak_factory_(this) {}
+
+FilePathWatcherImpl::~FilePathWatcherImpl() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
}
void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
@@ -320,22 +325,25 @@ void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch,
bool created,
bool deleted,
bool is_dir) {
- if (!task_runner()->BelongsToCurrentThread()) {
- // Switch to task_runner() to access |watches_| safely.
- task_runner()->PostTask(FROM_HERE,
- Bind(&FilePathWatcherImpl::OnFilePathChanged, this,
- fired_watch, child, created, deleted, is_dir));
- return;
- }
-
- // Check to see if CancelOnMessageLoopThreadOrInDestructor() has already been
- // called. May happen when code flow reaches here from the PostTask() above.
- if (watches_.empty()) {
- DCHECK(target_.empty());
- return;
- }
+ DCHECK(!task_runner()->RunsTasksOnCurrentThread());
+
+ // This method is invoked on the Inotify thread. Switch to task_runner() to
+ // access |watches_| safely. Use a WeakPtr to prevent the callback from
+ // running after |this| is destroyed (i.e. after the watch is cancelled).
+ task_runner()->PostTask(
+ FROM_HERE, Bind(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence,
+ weak_factory_.GetWeakPtr(), fired_watch, child, created,
+ deleted, is_dir));
+}
- DCHECK(MessageLoopForIO::current());
+void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence(
+ InotifyReader::Watch fired_watch,
+ const FilePath::StringType& child,
+ bool created,
+ bool deleted,
+ bool is_dir) {
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(!watches_.empty());
DCHECK(HasValidWatchVector());
// Used below to avoid multiple recursive updates.
@@ -420,9 +428,8 @@ bool FilePathWatcherImpl::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
DCHECK(target_.empty());
- DCHECK(MessageLoopForIO::current());
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
callback_ = callback;
target_ = path;
recursive_ = recursive;
@@ -438,47 +445,29 @@ bool FilePathWatcherImpl::Watch(const FilePath& path,
}
void FilePathWatcherImpl::Cancel() {
- if (callback_.is_null()) {
- // Watch was never called, or the message_loop() thread is already gone.
+ if (!callback_) {
+ // Watch() was never called.
set_cancelled();
return;
}
- // Switch to the task_runner() if necessary so we can access |watches_|.
- if (!task_runner()->BelongsToCurrentThread()) {
- task_runner()->PostTask(
- FROM_HERE,
- Bind(&FilePathWatcherImpl::CancelOnMessageLoopThreadOrInDestructor,
- this));
- } else {
- CancelOnMessageLoopThreadOrInDestructor();
- }
-}
-
-void FilePathWatcherImpl::CancelOnMessageLoopThreadOrInDestructor() {
- DCHECK(in_destructor_ || task_runner()->BelongsToCurrentThread());
-
- if (is_cancelled())
- return;
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ DCHECK(!is_cancelled());
set_cancelled();
-
- if (!callback_.is_null())
- callback_.Reset();
+ callback_.Reset();
for (size_t i = 0; i < watches_.size(); ++i)
g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this);
watches_.clear();
target_.clear();
-
- if (recursive_)
- RemoveRecursiveWatches();
+ RemoveRecursiveWatches();
}
void FilePathWatcherImpl::UpdateWatches() {
// Ensure this runs on the task_runner() exclusively in order to avoid
// concurrency issues.
- DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
DCHECK(HasValidWatchVector());
// Walk the list of watches and update them as we go.
@@ -658,7 +647,7 @@ bool FilePathWatcherImpl::HasValidWatchVector() const {
FilePathWatcher::FilePathWatcher() {
sequence_checker_.DetachFromSequence();
- impl_ = new FilePathWatcherImpl();
+ impl_ = MakeUnique<FilePathWatcherImpl>();
}
} // namespace base
diff --git a/chromium/base/files/file_path_watcher_mac.cc b/chromium/base/files/file_path_watcher_mac.cc
index d59ca2156be..2520b9288ae 100644
--- a/chromium/base/files/file_path_watcher_mac.cc
+++ b/chromium/base/files/file_path_watcher_mac.cc
@@ -2,8 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "base/files/file_path_watcher.h"
#include "base/files/file_path_watcher_kqueue.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
#include "build/build_config.h"
#if !defined(OS_IOS)
@@ -16,6 +20,9 @@ namespace {
class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
public:
+ FilePathWatcherImpl() = default;
+ ~FilePathWatcherImpl() override = default;
+
bool Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) override {
@@ -25,10 +32,10 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
if (!FilePathWatcher::RecursiveWatchAvailable())
return false;
#if !defined(OS_IOS)
- impl_ = new FilePathWatcherFSEvents();
+ impl_ = MakeUnique<FilePathWatcherFSEvents>();
#endif // OS_IOS
} else {
- impl_ = new FilePathWatcherKQueue();
+ impl_ = MakeUnique<FilePathWatcherKQueue>();
}
DCHECK(impl_.get());
return impl_->Watch(path, recursive, callback);
@@ -40,17 +47,17 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
set_cancelled();
}
- protected:
- ~FilePathWatcherImpl() override {}
+ private:
+ std::unique_ptr<PlatformDelegate> impl_;
- scoped_refptr<PlatformDelegate> impl_;
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
} // namespace
FilePathWatcher::FilePathWatcher() {
sequence_checker_.DetachFromSequence();
- impl_ = new FilePathWatcherImpl();
+ impl_ = MakeUnique<FilePathWatcherImpl>();
}
} // namespace base
diff --git a/chromium/base/files/file_path_watcher_stub.cc b/chromium/base/files/file_path_watcher_stub.cc
index c224e379f37..ae22c1ff06a 100644
--- a/chromium/base/files/file_path_watcher_stub.cc
+++ b/chromium/base/files/file_path_watcher_stub.cc
@@ -7,12 +7,18 @@
#include "base/files/file_path_watcher.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+
namespace base {
namespace {
class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
public:
+ FilePathWatcherImpl() = default;
+ ~FilePathWatcherImpl() override = default;
+
bool Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) override {
@@ -21,15 +27,15 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate {
void Cancel() override {}
- protected:
- ~FilePathWatcherImpl() override {}
+ private:
+ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
} // namespace
FilePathWatcher::FilePathWatcher() {
sequence_checker_.DetachFromSequence();
- impl_ = new FilePathWatcherImpl();
+ impl_ = MakeUnique<FilePathWatcherImpl>();
}
} // namespace base
diff --git a/chromium/base/files/file_path_watcher_win.cc b/chromium/base/files/file_path_watcher_win.cc
index 48470b6b3db..fba6625fb8c 100644
--- a/chromium/base/files/file_path_watcher_win.cc
+++ b/chromium/base/files/file_path_watcher_win.cc
@@ -10,9 +10,8 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/memory/ptr_util.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/time/time.h"
#include "base/win/object_watcher.h"
@@ -21,30 +20,23 @@ namespace base {
namespace {
class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
- public base::win::ObjectWatcher::Delegate,
- public MessageLoop::DestructionObserver {
+ public base::win::ObjectWatcher::Delegate {
public:
FilePathWatcherImpl()
: handle_(INVALID_HANDLE_VALUE),
recursive_watch_(false) {}
+ ~FilePathWatcherImpl() override;
- // FilePathWatcher::PlatformDelegate overrides.
+ // FilePathWatcher::PlatformDelegate:
bool Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) override;
void Cancel() override;
- // Deletion of the FilePathWatcher will call Cancel() to dispose of this
- // object in the right thread. This also observes destruction of the required
- // cleanup thread, in case it quits before Cancel() is called.
- void WillDestroyCurrentMessageLoop() override;
-
- // Callback from MessageLoopForIO.
+ // base::win::ObjectWatcher::Delegate:
void OnObjectSignaled(HANDLE object) override;
private:
- ~FilePathWatcherImpl() override {}
-
// Setup a watch handle for directory |dir|. Set |recursive| to true to watch
// the directory sub trees. Returns true if no fatal error occurs. |handle|
// will receive the handle value if |dir| is watchable, otherwise
@@ -59,15 +51,15 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
// Destroy the watch handle.
void DestroyWatch();
- // Cleans up and stops observing the |task_runner_| thread.
- void CancelOnMessageLoopThread();
-
// Callback to notify upon changes.
FilePathWatcher::Callback callback_;
// Path we're supposed to watch (passed to callback).
FilePath target_;
+ // Set to true in the destructor.
+ bool* was_deleted_ptr_ = nullptr;
+
// Handle for FindFirstChangeNotification.
HANDLE handle_;
@@ -88,16 +80,21 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate,
DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl);
};
+FilePathWatcherImpl::~FilePathWatcherImpl() {
+ DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread());
+ if (was_deleted_ptr_)
+ *was_deleted_ptr_ = true;
+}
+
bool FilePathWatcherImpl::Watch(const FilePath& path,
bool recursive,
const FilePathWatcher::Callback& callback) {
DCHECK(target_.value().empty()); // Can only watch one path.
- set_task_runner(ThreadTaskRunnerHandle::Get());
+ set_task_runner(SequencedTaskRunnerHandle::Get());
callback_ = callback;
target_ = path;
recursive_watch_ = recursive;
- MessageLoop::current()->AddDestructionObserver(this);
File::Info file_info;
if (GetFileInfo(target_, &file_info)) {
@@ -120,36 +117,22 @@ void FilePathWatcherImpl::Cancel() {
return;
}
- // Switch to the file thread if necessary so we can stop |watcher_|.
- if (!task_runner()->BelongsToCurrentThread()) {
- task_runner()->PostTask(
- FROM_HERE, Bind(&FilePathWatcherImpl::CancelOnMessageLoopThread, this));
- } else {
- CancelOnMessageLoopThread();
- }
-}
-
-void FilePathWatcherImpl::CancelOnMessageLoopThread() {
- DCHECK(task_runner()->BelongsToCurrentThread());
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
set_cancelled();
if (handle_ != INVALID_HANDLE_VALUE)
DestroyWatch();
- if (!callback_.is_null()) {
- MessageLoop::current()->RemoveDestructionObserver(this);
- callback_.Reset();
- }
-}
-
-void FilePathWatcherImpl::WillDestroyCurrentMessageLoop() {
- CancelOnMessageLoopThread();
+ callback_.Reset();
}
void FilePathWatcherImpl::OnObjectSignaled(HANDLE object) {
- DCHECK(object == handle_);
- // Make sure we stay alive through the body of this function.
- scoped_refptr<FilePathWatcherImpl> keep_alive(this);
+ DCHECK(task_runner()->RunsTasksOnCurrentThread());
+ DCHECK_EQ(object, handle_);
+ DCHECK(!was_deleted_ptr_);
+
+ bool was_deleted = false;
+ was_deleted_ptr_ = &was_deleted;
if (!UpdateWatch()) {
callback_.Run(target_, true /* error */);
@@ -199,8 +182,10 @@ void FilePathWatcherImpl::OnObjectSignaled(HANDLE object) {
}
// The watch may have been cancelled by the callback.
- if (handle_ != INVALID_HANDLE_VALUE)
+ if (!was_deleted) {
watcher_.StartWatchingOnce(handle_, this);
+ was_deleted_ptr_ = nullptr;
+ }
}
// static
@@ -297,7 +282,7 @@ void FilePathWatcherImpl::DestroyWatch() {
FilePathWatcher::FilePathWatcher() {
sequence_checker_.DetachFromSequence();
- impl_ = new FilePathWatcherImpl();
+ impl_ = MakeUnique<FilePathWatcherImpl>();
}
} // namespace base
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index a0f2328fe20..fcd190ac16f 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -321,7 +321,7 @@ int64_t File::GetLength() {
stat_wrapper_t file_info;
if (CallFstat(file_.get(), &file_info))
- return false;
+ return -1;
return file_info.st_size;
}
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index d3a5cdfa9b7..66c312b60d4 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -9,6 +9,7 @@
#include <utility>
#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
#include "base/files/scoped_temp_dir.h"
#include "base/time/time.h"
#include "build/build_config.h"
@@ -517,4 +518,158 @@ TEST(FileTest, GetInfoForDirectory) {
EXPECT_FALSE(info.is_symbolic_link);
EXPECT_EQ(0, info.size);
}
+
+TEST(FileTest, DeleteNoop) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating and closing a file with DELETE perms should do nothing special.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, Delete) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating a file with DELETE and then marking for delete on close should
+ // delete it.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_TRUE(file.DeleteOnClose(true));
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteThenRevoke) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Creating a file with DELETE, marking it for delete, then clearing delete on
+ // close should not delete it.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_TRUE(file.DeleteOnClose(true));
+ ASSERT_TRUE(file.DeleteOnClose(false));
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnClose) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // DELETE_ON_CLOSE cannot be revoked by this opener.
+ File file(
+ file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ // https://msdn.microsoft.com/library/windows/desktop/aa364221.aspx says that
+ // setting the dispositon has no effect if the handle was opened with
+ // FLAG_DELETE_ON_CLOSE. Do not make the test's success dependent on whether
+ // or not SetFileInformationByHandle indicates success or failure. (It happens
+ // to indicate success on Windows 10.)
+ file.DeleteOnClose(false);
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, IrrevokableDeleteOnCloseOther) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // DELETE_ON_CLOSE cannot be revoked by another opener.
+ File file(
+ file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_DELETE_ON_CLOSE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+
+ File file2(
+ file_path,
+ (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+ base::File::FLAG_SHARE_DELETE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file2.IsValid());
+
+ file2.DeleteOnClose(false);
+ file2.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+ file.Close();
+ ASSERT_FALSE(base::PathExists(file_path));
+}
+
+TEST(FileTest, DeleteWithoutPermission) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // It should not be possible to mark a file for deletion when it was not
+ // created/opened with DELETE.
+ File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_FALSE(file.DeleteOnClose(true));
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, UnsharedDeleteOnClose) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Opening with DELETE_ON_CLOSE when a previous opener hasn't enabled sharing
+ // will fail.
+ File file(file_path, (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE));
+ ASSERT_TRUE(file.IsValid());
+ File file2(
+ file_path,
+ (base::File::FLAG_OPEN | base::File::FLAG_READ | base::File::FLAG_WRITE |
+ base::File::FLAG_DELETE_ON_CLOSE | base::File::FLAG_SHARE_DELETE));
+ ASSERT_FALSE(file2.IsValid());
+
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
+
+TEST(FileTest, NoDeleteOnCloseWithMappedFile) {
+ base::ScopedTempDir temp_dir;
+ ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+ FilePath file_path = temp_dir.GetPath().AppendASCII("file");
+
+ // Mapping a file into memory blocks DeleteOnClose.
+ File file(file_path,
+ (base::File::FLAG_CREATE | base::File::FLAG_READ |
+ base::File::FLAG_WRITE | base::File::FLAG_CAN_DELETE_ON_CLOSE));
+ ASSERT_TRUE(file.IsValid());
+ ASSERT_EQ(5, file.WriteAtCurrentPos("12345", 5));
+
+ {
+ base::MemoryMappedFile mapping;
+ ASSERT_TRUE(mapping.Initialize(file.Duplicate()));
+ ASSERT_EQ(5U, mapping.length());
+
+ EXPECT_FALSE(file.DeleteOnClose(true));
+ }
+
+ file.Close();
+ ASSERT_TRUE(base::PathExists(file_path));
+}
#endif // defined(OS_WIN)
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index a8db2599c64..34a1c4d1e6e 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -276,11 +276,8 @@ bool CopyDirectory(const FilePath& from_path,
FilePath real_from_path = MakeAbsoluteFilePath(from_path);
if (real_from_path.empty())
return false;
- if (real_to_path.value().size() >= real_from_path.value().size() &&
- real_to_path.value().compare(0, real_from_path.value().size(),
- real_from_path.value()) == 0) {
+ if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
return false;
- }
int traverse_type = FileEnumerator::FILES | FileEnumerator::SHOW_SYM_LINKS;
if (recursive)
diff --git a/chromium/base/files/file_util_proxy.cc b/chromium/base/files/file_util_proxy.cc
index 633d162599c..54a56610630 100644
--- a/chromium/base/files/file_util_proxy.cc
+++ b/chromium/base/files/file_util_proxy.cc
@@ -48,19 +48,6 @@ class GetFileInfoHelper {
DISALLOW_COPY_AND_ASSIGN(GetFileInfoHelper);
};
-File::Error DeleteAdapter(const FilePath& file_path, bool recursive) {
- if (!PathExists(file_path)) {
- return File::FILE_ERROR_NOT_FOUND;
- }
- if (!base::DeleteFile(file_path, recursive)) {
- if (!recursive && !base::IsDirectoryEmpty(file_path)) {
- return File::FILE_ERROR_NOT_EMPTY;
- }
- return File::FILE_ERROR_FAILED;
- }
- return File::FILE_OK;
-}
-
} // namespace
// Retrieves the information about a file. It is invalid to pass NULL for the
@@ -78,17 +65,6 @@ bool FileUtilProxy::GetFileInfo(
}
// static
-bool FileUtilProxy::DeleteFile(TaskRunner* task_runner,
- const FilePath& file_path,
- bool recursive,
- const StatusCallback& callback) {
- return base::PostTaskAndReplyWithResult(
- task_runner, FROM_HERE,
- Bind(&DeleteAdapter, file_path, recursive),
- callback);
-}
-
-// static
bool FileUtilProxy::Touch(
TaskRunner* task_runner,
const FilePath& file_path,
diff --git a/chromium/base/files/file_util_proxy.h b/chromium/base/files/file_util_proxy.h
index db69737648b..2e8a1ea0064 100644
--- a/chromium/base/files/file_util_proxy.h
+++ b/chromium/base/files/file_util_proxy.h
@@ -35,14 +35,6 @@ class BASE_EXPORT FileUtilProxy {
const FilePath& file_path,
const GetFileInfoCallback& callback);
- // Deletes a file or a directory.
- // It is an error to delete a non-empty directory with recursive=false.
- // This returns false if task posting to |task_runner| has failed.
- static bool DeleteFile(TaskRunner* task_runner,
- const FilePath& file_path,
- bool recursive,
- const StatusCallback& callback);
-
// Touches a file. The callback can be null.
// This returns false if task posting to |task_runner| has failed.
static bool Touch(
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index d70454df383..4e67b5a3486 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -177,11 +177,8 @@ bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
FilePath real_from_path = MakeAbsoluteFilePath(from_path);
if (real_from_path.empty())
return false;
- if (real_to_path.value().size() >= real_from_path.value().size() &&
- real_to_path.value().compare(0, real_from_path.value().size(),
- real_from_path.value()) == 0) {
+ if (real_to_path == real_from_path || real_from_path.IsParent(real_to_path))
return false;
- }
int traverse_type = FileEnumerator::FILES;
if (recursive)
diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc
index 20c8908375c..acd3334f6af 100644
--- a/chromium/base/files/file_win.cc
+++ b/chromium/base/files/file_win.cc
@@ -273,6 +273,12 @@ File File::Duplicate() const {
return other;
}
+bool File::DeleteOnClose(bool delete_on_close) {
+ FILE_DISPOSITION_INFO disposition = {delete_on_close ? TRUE : FALSE};
+ return ::SetFileInformationByHandle(GetPlatformFile(), FileDispositionInfo,
+ &disposition, sizeof(disposition)) != 0;
+}
+
// Static.
File::Error File::OSErrorToFileError(DWORD last_error) {
switch (last_error) {
@@ -359,6 +365,8 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
access |= FILE_WRITE_ATTRIBUTES;
if (flags & FLAG_EXECUTE)
access |= GENERIC_EXECUTE;
+ if (flags & FLAG_CAN_DELETE_ON_CLOSE)
+ access |= DELETE;
DWORD sharing = (flags & FLAG_EXCLUSIVE_READ) ? 0 : FILE_SHARE_READ;
if (!(flags & FLAG_EXCLUSIVE_WRITE))
diff --git a/chromium/base/files/memory_mapped_file_posix.cc b/chromium/base/files/memory_mapped_file_posix.cc
index 4899cf0cda6..90ba6f49c15 100644
--- a/chromium/base/files/memory_mapped_file_posix.cc
+++ b/chromium/base/files/memory_mapped_file_posix.cc
@@ -31,7 +31,7 @@ bool MemoryMappedFile::MapFileRegionToMemory(
if (region == MemoryMappedFile::Region::kWholeFile) {
int64_t file_len = file_.GetLength();
- if (file_len == -1) {
+ if (file_len < 0) {
DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
return false;
}
@@ -78,7 +78,12 @@ bool MemoryMappedFile::MapFileRegionToMemory(
// POSIX won't auto-extend the file when it is written so it must first
// be explicitly extended to the maximum size. Zeros will fill the new
// space.
- file_.SetLength(std::max(file_.GetLength(), region.offset + region.size));
+ auto file_len = file_.GetLength();
+ if (file_len < 0) {
+ DPLOG(ERROR) << "fstat " << file_.GetPlatformFile();
+ return false;
+ }
+ file_.SetLength(std::max(file_len, region.offset + region.size));
flags |= PROT_READ | PROT_WRITE;
break;
}
diff --git a/chromium/base/i18n/character_encoding.cc b/chromium/base/i18n/character_encoding.cc
new file mode 100644
index 00000000000..05d2f50fff2
--- /dev/null
+++ b/chromium/base/i18n/character_encoding.cc
@@ -0,0 +1,42 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/character_encoding.h"
+
+#include "base/macros.h"
+#include "third_party/icu/source/common/unicode/ucnv.h"
+
+namespace base {
+namespace {
+
+// An array of all supported canonical encoding names.
+const char* const kCanonicalEncodingNames[] = {
+ "Big5", "EUC-JP", "EUC-KR", "gb18030",
+ "GBK", "IBM866", "ISO-2022-JP", "ISO-8859-10",
+ "ISO-8859-13", "ISO-8859-14", "ISO-8859-15", "ISO-8859-16",
+ "ISO-8859-2", "ISO-8859-3", "ISO-8859-4", "ISO-8859-5",
+ "ISO-8859-6", "ISO-8859-7", "ISO-8859-8", "ISO-8859-8-I",
+ "KOI8-R", "KOI8-U", "macintosh", "Shift_JIS",
+ "UTF-16LE", "UTF-8", "windows-1250", "windows-1251",
+ "windows-1252", "windows-1253", "windows-1254", "windows-1255",
+ "windows-1256", "windows-1257", "windows-1258", "windows-874"};
+
+} // namespace
+
+std::string GetCanonicalEncodingNameByAliasName(const std::string& alias_name) {
+ for (auto& encoding_name : kCanonicalEncodingNames) {
+ if (alias_name == encoding_name)
+ return alias_name;
+ }
+ static const char* kStandards[3] = {"HTML", "MIME", "IANA"};
+ for (auto& standard : kStandards) {
+ UErrorCode error_code = U_ZERO_ERROR;
+ const char* canonical_name =
+ ucnv_getStandardName(alias_name.c_str(), standard, &error_code);
+ if (U_SUCCESS(error_code) && canonical_name)
+ return canonical_name;
+ }
+ return std::string();
+}
+} // namespace base
diff --git a/chromium/base/i18n/character_encoding.h b/chromium/base/i18n/character_encoding.h
new file mode 100644
index 00000000000..974cb5a6f92
--- /dev/null
+++ b/chromium/base/i18n/character_encoding.h
@@ -0,0 +1,20 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_I18N_CHARACTER_ENCODING_H_
+#define BASE_I18N_CHARACTER_ENCODING_H_
+
+#include <string>
+
+#include "base/i18n/base_i18n_export.h"
+
+namespace base {
+
+// Return canonical encoding name according to the encoding alias name.
+BASE_I18N_EXPORT std::string GetCanonicalEncodingNameByAliasName(
+ const std::string& alias_name);
+
+} // namespace base
+
+#endif // BASE_I18N_CHARACTER_ENCODING_H_
diff --git a/chromium/base/i18n/character_encoding_unittest.cc b/chromium/base/i18n/character_encoding_unittest.cc
new file mode 100644
index 00000000000..3c11ba30aa1
--- /dev/null
+++ b/chromium/base/i18n/character_encoding_unittest.cc
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/i18n/character_encoding.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(CharacterEncodingTest, GetCanonicalEncodingNameByAliasName) {
+ EXPECT_EQ("Big5", GetCanonicalEncodingNameByAliasName("Big5"));
+ EXPECT_EQ("windows-874", GetCanonicalEncodingNameByAliasName("windows-874"));
+ EXPECT_EQ("ISO-8859-8", GetCanonicalEncodingNameByAliasName("ISO-8859-8"));
+
+ // Non-canonical alias names should be converted to a canonical one.
+ EXPECT_EQ("UTF-8", GetCanonicalEncodingNameByAliasName("utf8"));
+ EXPECT_EQ("gb18030", GetCanonicalEncodingNameByAliasName("GB18030"));
+ EXPECT_EQ("windows-874", GetCanonicalEncodingNameByAliasName("tis-620"));
+ EXPECT_EQ("EUC-KR", GetCanonicalEncodingNameByAliasName("ks_c_5601-1987"));
+}
+
+} // namespace base
diff --git a/chromium/base/i18n/rtl.h b/chromium/base/i18n/rtl.h
index bba93ce8f56..df15cd0208a 100644
--- a/chromium/base/i18n/rtl.h
+++ b/chromium/base/i18n/rtl.h
@@ -31,7 +31,7 @@ enum TextDirection {
UNKNOWN_DIRECTION = 0,
RIGHT_TO_LEFT = 1,
LEFT_TO_RIGHT = 2,
- TEXT_DIRECTION_NUM_DIRECTIONS = 3,
+ TEXT_DIRECTION_MAX = LEFT_TO_RIGHT,
};
// Get the locale that the currently running process has been configured to use.
diff --git a/chromium/base/i18n/time_formatting.cc b/chromium/base/i18n/time_formatting.cc
index 024b86510b5..f245ca5a6bd 100644
--- a/chromium/base/i18n/time_formatting.cc
+++ b/chromium/base/i18n/time_formatting.cc
@@ -11,6 +11,7 @@
#include "base/logging.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
+#include "third_party/icu/source/common/unicode/utypes.h"
#include "third_party/icu/source/i18n/unicode/datefmt.h"
#include "third_party/icu/source/i18n/unicode/dtptngen.h"
#include "third_party/icu/source/i18n/unicode/fmtable.h"
@@ -153,22 +154,68 @@ string16 TimeFormatFriendlyDate(const Time& time) {
return TimeFormat(formatter.get(), time);
}
-string16 TimeDurationFormat(const TimeDelta& time,
- const DurationFormatWidth width) {
+bool TimeDurationFormat(const TimeDelta time,
+ const DurationFormatWidth width,
+ string16* out) {
+ DCHECK(out);
UErrorCode status = U_ZERO_ERROR;
const int total_minutes = static_cast<int>(time.InSecondsF() / 60 + 0.5);
- int hours = total_minutes / 60;
- int minutes = total_minutes % 60;
+ const int hours = total_minutes / 60;
+ const int minutes = total_minutes % 60;
UMeasureFormatWidth u_width = DurationWidthToMeasureWidth(width);
+ // TODO(derat): Delete the |status| checks and LOG(ERROR) calls throughout
+ // this function once the cause of http://crbug.com/677043 is tracked down.
const icu::Measure measures[] = {
icu::Measure(hours, icu::MeasureUnit::createHour(status), status),
icu::Measure(minutes, icu::MeasureUnit::createMinute(status), status)};
+ if (U_FAILURE(status)) {
+ LOG(ERROR) << "Creating MeasureUnit or Measure for " << hours << "h"
+ << minutes << "m failed: " << u_errorName(status);
+ return false;
+ }
+
icu::MeasureFormat measure_format(icu::Locale::getDefault(), u_width, status);
+ if (U_FAILURE(status)) {
+ LOG(ERROR) << "Creating MeasureFormat for "
+ << icu::Locale::getDefault().getName()
+ << " failed: " << u_errorName(status);
+ return false;
+ }
+
icu::UnicodeString formatted;
icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
measure_format.formatMeasures(measures, 2, formatted, ignore, status);
- return base::string16(formatted.getBuffer(), formatted.length());
+ if (U_FAILURE(status)) {
+ LOG(ERROR) << "formatMeasures failed: " << u_errorName(status);
+ return false;
+ }
+
+ *out = base::string16(formatted.getBuffer(), formatted.length());
+ return true;
+}
+
+bool TimeDurationFormatWithSeconds(const TimeDelta time,
+ const DurationFormatWidth width,
+ string16* out) {
+ DCHECK(out);
+ UErrorCode status = U_ZERO_ERROR;
+ const int64_t total_seconds = static_cast<int>(time.InSecondsF() + 0.5);
+ const int hours = total_seconds / 3600;
+ const int minutes = (total_seconds - hours * 3600) / 60;
+ const int seconds = total_seconds % 60;
+ UMeasureFormatWidth u_width = DurationWidthToMeasureWidth(width);
+
+ const icu::Measure measures[] = {
+ icu::Measure(hours, icu::MeasureUnit::createHour(status), status),
+ icu::Measure(minutes, icu::MeasureUnit::createMinute(status), status),
+ icu::Measure(seconds, icu::MeasureUnit::createSecond(status), status)};
+ icu::MeasureFormat measure_format(icu::Locale::getDefault(), u_width, status);
+ icu::UnicodeString formatted;
+ icu::FieldPosition ignore(icu::FieldPosition::DONT_CARE);
+ measure_format.formatMeasures(measures, 3, formatted, ignore, status);
+ *out = base::string16(formatted.getBuffer(), formatted.length());
+ return U_SUCCESS(status) == TRUE;
}
HourClockType GetHourClockType() {
diff --git a/chromium/base/i18n/time_formatting.h b/chromium/base/i18n/time_formatting.h
index 761dd98a5d4..f79473da0a1 100644
--- a/chromium/base/i18n/time_formatting.h
+++ b/chromium/base/i18n/time_formatting.h
@@ -8,6 +8,7 @@
#ifndef BASE_I18N_TIME_FORMATTING_H_
#define BASE_I18N_TIME_FORMATTING_H_
+#include "base/compiler_specific.h"
#include "base/i18n/base_i18n_export.h"
#include "base/strings/string16.h"
@@ -76,9 +77,31 @@ BASE_I18N_EXPORT string16 TimeFormatFriendlyDateAndTime(const Time& time);
BASE_I18N_EXPORT string16 TimeFormatFriendlyDate(const Time& time);
// Formats a time duration of hours and minutes into various formats, e.g.,
-// "3:07" or "3 hours, 7 minutes". See DurationFormatWidth for details.
-BASE_I18N_EXPORT string16 TimeDurationFormat(const TimeDelta& time,
- const DurationFormatWidth width);
+// "3:07" or "3 hours, 7 minutes", and returns true on success. See
+// DurationFormatWidth for details.
+//
+// Please don't use width = DURATION_WIDTH_NUMERIC when the time duration
+// can possibly be larger than 24h, as the hour value will be cut below 24
+// after formatting.
+// TODO(chengx): fix function output when width = DURATION_WIDTH_NUMERIC
+// (http://crbug.com/675791)
+BASE_I18N_EXPORT bool TimeDurationFormat(const TimeDelta time,
+ const DurationFormatWidth width,
+ string16* out) WARN_UNUSED_RESULT;
+
+// Formats a time duration of hours, minutes and seconds into various formats,
+// e.g., "3:07:30" or "3 hours, 7 minutes, 30 seconds", and returns true on
+// success. See DurationFormatWidth for details.
+//
+// Please don't use width = DURATION_WIDTH_NUMERIC when the time duration
+// can possibly be larger than 24h, as the hour value will be cut below 24
+// after formatting.
+// TODO(chengx): fix function output when width = DURATION_WIDTH_NUMERIC
+// (http://crbug.com/675791)
+BASE_I18N_EXPORT bool TimeDurationFormatWithSeconds(
+ const TimeDelta time,
+ const DurationFormatWidth width,
+ string16* out) WARN_UNUSED_RESULT;
// Gets the hour clock type of the current locale. e.g.
// k12HourClock (en-US).
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 51a48513aca..ca31cc61961 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -27,7 +27,7 @@ const Time::Exploded kTestDateTimeExploded = {
// Returns difference between the local time and GMT formatted as string.
// This function gets |time| because the difference depends on time,
// see https://en.wikipedia.org/wiki/Daylight_saving_time for details.
-base::string16 GetShortTimeZone(const Time& time) {
+string16 GetShortTimeZone(const Time& time) {
UErrorCode status = U_ZERO_ERROR;
std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
std::unique_ptr<icu::TimeZoneFormat> zone_formatter(
@@ -37,7 +37,30 @@ base::string16 GetShortTimeZone(const Time& time) {
zone_formatter->format(UTZFMT_STYLE_SPECIFIC_SHORT, *zone,
static_cast<UDate>(time.ToDoubleT() * 1000),
name, nullptr);
- return base::string16(name.getBuffer(), name.length());
+ return string16(name.getBuffer(), name.length());
+}
+
+// Calls TimeDurationFormat() with |delta| and |width| and returns the resulting
+// string. On failure, adds a failed expectation and returns an empty string.
+string16 TimeDurationFormatString(const TimeDelta& delta,
+ DurationFormatWidth width) {
+ string16 str;
+ EXPECT_TRUE(TimeDurationFormat(delta, width, &str))
+ << "Failed to format " << delta.ToInternalValue() << " with width "
+ << width;
+ return str;
+}
+
+// Calls TimeDurationFormatWithSeconds() with |delta| and |width| and returns
+// the resulting string. On failure, adds a failed expectation and returns an
+// empty string.
+string16 TimeDurationFormatWithSecondsString(const TimeDelta& delta,
+ DurationFormatWidth width) {
+ string16 str;
+ EXPECT_TRUE(TimeDurationFormatWithSeconds(delta, width, &str))
+ << "Failed to format " << delta.ToInternalValue() << " with width "
+ << width;
+ return str;
}
#if defined(OS_ANDROID)
@@ -228,24 +251,24 @@ TEST(TimeFormattingTest, TimeDurationFormat) {
// US English.
i18n::SetICUDefaultLocale("en_US");
EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes"),
- TimeDurationFormat(delta, DURATION_WIDTH_WIDE));
+ TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min"),
- TimeDurationFormat(delta, DURATION_WIDTH_SHORT));
+ TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
EXPECT_EQ(ASCIIToUTF16("15h 42m"),
- TimeDurationFormat(delta, DURATION_WIDTH_NARROW));
+ TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
EXPECT_EQ(ASCIIToUTF16("15:42"),
- TimeDurationFormat(delta, DURATION_WIDTH_NUMERIC));
+ TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
// Danish, with Latin alphabet but different abbreviations and punctuation.
i18n::SetICUDefaultLocale("da");
EXPECT_EQ(ASCIIToUTF16("15 timer og 42 minutter"),
- TimeDurationFormat(delta, DURATION_WIDTH_WIDE));
+ TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
EXPECT_EQ(ASCIIToUTF16("15 t og 42 min."),
- TimeDurationFormat(delta, DURATION_WIDTH_SHORT));
+ TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
EXPECT_EQ(ASCIIToUTF16("15 t og 42 min"),
- TimeDurationFormat(delta, DURATION_WIDTH_NARROW));
+ TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
EXPECT_EQ(ASCIIToUTF16("15.42"),
- TimeDurationFormat(delta, DURATION_WIDTH_NUMERIC));
+ TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
// Persian, with non-Arabic numbers.
i18n::SetICUDefaultLocale("fa");
@@ -259,10 +282,60 @@ TEST(TimeFormattingTest, TimeDurationFormat) {
L"\x6f1\x6f5\x20\x633\x627\x639\x62a\x20\x6f4\x6f2\x20\x62f\x642\x6cc"
L"\x642\x647");
string16 fa_numeric = WideToUTF16(L"\x6f1\x6f5\x3a\x6f4\x6f2");
- EXPECT_EQ(fa_wide, TimeDurationFormat(delta, DURATION_WIDTH_WIDE));
- EXPECT_EQ(fa_short, TimeDurationFormat(delta, DURATION_WIDTH_SHORT));
- EXPECT_EQ(fa_narrow, TimeDurationFormat(delta, DURATION_WIDTH_NARROW));
- EXPECT_EQ(fa_numeric, TimeDurationFormat(delta, DURATION_WIDTH_NUMERIC));
+ EXPECT_EQ(fa_wide, TimeDurationFormatString(delta, DURATION_WIDTH_WIDE));
+ EXPECT_EQ(fa_short, TimeDurationFormatString(delta, DURATION_WIDTH_SHORT));
+ EXPECT_EQ(fa_narrow, TimeDurationFormatString(delta, DURATION_WIDTH_NARROW));
+ EXPECT_EQ(fa_numeric,
+ TimeDurationFormatString(delta, DURATION_WIDTH_NUMERIC));
+}
+
+TEST(TimeFormattingTest, TimeDurationFormatWithSeconds) {
+ test::ScopedRestoreICUDefaultLocale restore_locale;
+
+ // US English.
+ i18n::SetICUDefaultLocale("en_US");
+
+ // Test different formats.
+ TimeDelta delta = TimeDelta::FromSeconds(15 * 3600 + 42 * 60 + 30);
+ EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes, 30 seconds"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+ EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min, 30 sec"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+ EXPECT_EQ(ASCIIToUTF16("15h 42m 30s"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+ EXPECT_EQ(ASCIIToUTF16("15:42:30"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
+
+ // Test edge case when hour >= 100.
+ delta = TimeDelta::FromSeconds(125 * 3600 + 42 * 60 + 30);
+ EXPECT_EQ(ASCIIToUTF16("125 hours, 42 minutes, 30 seconds"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+ EXPECT_EQ(ASCIIToUTF16("125 hr, 42 min, 30 sec"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+ EXPECT_EQ(ASCIIToUTF16("125h 42m 30s"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+
+ // Test edge case when minute = 0.
+ delta = TimeDelta::FromSeconds(15 * 3600 + 0 * 60 + 30);
+ EXPECT_EQ(ASCIIToUTF16("15 hours, 0 minutes, 30 seconds"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+ EXPECT_EQ(ASCIIToUTF16("15 hr, 0 min, 30 sec"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+ EXPECT_EQ(ASCIIToUTF16("15h 0m 30s"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+ EXPECT_EQ(ASCIIToUTF16("15:00:30"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
+
+ // Test edge case when second = 0.
+ delta = TimeDelta::FromSeconds(15 * 3600 + 42 * 60 + 0);
+ EXPECT_EQ(ASCIIToUTF16("15 hours, 42 minutes, 0 seconds"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_WIDE));
+ EXPECT_EQ(ASCIIToUTF16("15 hr, 42 min, 0 sec"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_SHORT));
+ EXPECT_EQ(ASCIIToUTF16("15h 42m 0s"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NARROW));
+ EXPECT_EQ(ASCIIToUTF16("15:42:00"),
+ TimeDurationFormatWithSecondsString(delta, DURATION_WIDTH_NUMERIC));
}
} // namespace
diff --git a/chromium/base/id_map.h b/chromium/base/id_map.h
index 8702982f172..8545de84084 100644
--- a/chromium/base/id_map.h
+++ b/chromium/base/id_map.h
@@ -17,13 +17,6 @@
#include "base/macros.h"
#include "base/sequence_checker.h"
-// Ownership semantics - own pointer means the pointer is deleted in Remove()
-// & during destruction
-enum IDMapOwnershipSemantics {
- IDMapExternalPointer,
- IDMapOwnPointer
-};
-
// This object maintains a list of IDs that can be quickly converted to
// pointers to objects. It is implemented as a hash table, optimized for
// relatively small data sets (in the common case, there will be exactly one
@@ -32,19 +25,16 @@ enum IDMapOwnershipSemantics {
// Items can be inserted into the container with arbitrary ID, but the caller
// must ensure they are unique. Inserting IDs and relying on automatically
// generated ones is not allowed because they can collide.
-//
-// This class does not have a virtual destructor, do not inherit from it when
-// ownership semantics are set to own because pointers will leak.
-template <typename T,
- IDMapOwnershipSemantics OS = IDMapExternalPointer,
- typename K = int32_t>
-class IDMap {
+
+// The map's value type (the V param) can be any dereferenceable type, such as a
+// raw pointer or smart pointer
+template <typename V, typename K = int32_t>
+class IDMap final {
public:
using KeyType = K;
private:
- using V = typename std::
- conditional<OS == IDMapExternalPointer, T*, std::unique_ptr<T>>::type;
+ using T = typename std::remove_reference<decltype(*V())>::type;
using HashTable = base::hash_map<KeyType, V>;
public:
@@ -68,30 +58,13 @@ class IDMap {
void set_check_on_null_data(bool value) { check_on_null_data_ = value; }
// Adds a view with an automatically generated unique ID. See AddWithID.
- // (This unique_ptr<> variant will not compile in IDMapExternalPointer mode.)
- KeyType Add(std::unique_ptr<T> data) {
- return AddInternal(std::move(data));
- }
+ KeyType Add(V data) { return AddInternal(std::move(data)); }
// Adds a new data member with the specified ID. The ID must not be in
// the list. The caller either must generate all unique IDs itself and use
// this function, or allow this object to generate IDs and call Add. These
// two methods may not be mixed, or duplicate IDs may be generated.
- // (This unique_ptr<> variant will not compile in IDMapExternalPointer mode.)
- void AddWithID(std::unique_ptr<T> data, KeyType id) {
- AddWithIDInternal(std::move(data), id);
- }
-
- // http://crbug.com/647091: Raw pointer Add()s in IDMapOwnPointer mode are
- // deprecated. Users of IDMapOwnPointer should transition to the unique_ptr
- // variant above, and the following methods should only be used in
- // IDMapExternalPointer mode.
- KeyType Add(T* data) {
- return AddInternal(V(data));
- }
- void AddWithID(T* data, KeyType id) {
- AddWithIDInternal(V(data), id);
- }
+ void AddWithID(V data, KeyType id) { AddWithIDInternal(std::move(data), id); }
void Remove(KeyType id) {
DCHECK(sequence_checker_.CalledOnValidSequence());
@@ -161,9 +134,7 @@ class IDMap {
template<class ReturnType>
class Iterator {
public:
- Iterator(IDMap<T, OS, K>* map)
- : map_(map),
- iter_(map_->data_.begin()) {
+ Iterator(IDMap<V, K>* map) : map_(map), iter_(map_->data_.begin()) {
Init();
}
@@ -227,7 +198,7 @@ class IDMap {
}
}
- IDMap<T, OS, K>* map_;
+ IDMap<V, K>* map_;
typename HashTable::const_iterator iter_;
};
diff --git a/chromium/base/id_map_unittest.cc b/chromium/base/id_map_unittest.cc
index a3f0808915d..42949bb5b95 100644
--- a/chromium/base/id_map_unittest.cc
+++ b/chromium/base/id_map_unittest.cc
@@ -6,6 +6,9 @@
#include <stdint.h>
+#include <memory>
+
+#include "base/memory/ptr_util.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -23,7 +26,7 @@ class DestructorCounter {
};
TEST(IDMapTest, Basic) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
EXPECT_TRUE(map.IsEmpty());
EXPECT_EQ(0U, map.size());
@@ -62,7 +65,7 @@ TEST(IDMapTest, Basic) {
}
TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -73,7 +76,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
map.Add(&obj3);
{
- IDMap<TestObject>::const_iterator iter(&map);
+ IDMap<TestObject*>::const_iterator iter(&map);
EXPECT_EQ(1, map.iteration_depth());
@@ -95,7 +98,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingCurrentElement) {
}
TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
const int kCount = 5;
TestObject obj[kCount];
@@ -107,16 +110,16 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
int32_t ids_in_iteration_order[kCount];
const TestObject* objs_in_iteration_order[kCount];
int counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
ids_in_iteration_order[counter] = iter.GetCurrentKey();
objs_in_iteration_order[counter] = iter.GetCurrentValue();
counter++;
}
counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
EXPECT_EQ(1, map.iteration_depth());
switch (counter) {
@@ -147,7 +150,7 @@ TEST(IDMapTest, IteratorRemainsValidWhenRemovingOtherElements) {
}
TEST(IDMapTest, CopyIterator) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -160,12 +163,12 @@ TEST(IDMapTest, CopyIterator) {
EXPECT_EQ(0, map.iteration_depth());
{
- IDMap<TestObject>::const_iterator iter1(&map);
+ IDMap<TestObject*>::const_iterator iter1(&map);
EXPECT_EQ(1, map.iteration_depth());
// Make sure that copying the iterator correctly increments
// map's iteration depth.
- IDMap<TestObject>::const_iterator iter2(iter1);
+ IDMap<TestObject*>::const_iterator iter2(iter1);
EXPECT_EQ(2, map.iteration_depth());
}
@@ -175,7 +178,7 @@ TEST(IDMapTest, CopyIterator) {
}
TEST(IDMapTest, AssignIterator) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
TestObject obj1;
TestObject obj2;
@@ -188,10 +191,10 @@ TEST(IDMapTest, AssignIterator) {
EXPECT_EQ(0, map.iteration_depth());
{
- IDMap<TestObject>::const_iterator iter1(&map);
+ IDMap<TestObject*>::const_iterator iter1(&map);
EXPECT_EQ(1, map.iteration_depth());
- IDMap<TestObject>::const_iterator iter2(&map);
+ IDMap<TestObject*>::const_iterator iter2(&map);
EXPECT_EQ(2, map.iteration_depth());
// Make sure that assigning the iterator correctly updates
@@ -205,7 +208,7 @@ TEST(IDMapTest, AssignIterator) {
}
TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
- IDMap<TestObject> map;
+ IDMap<TestObject*> map;
const int kCount = 5;
TestObject obj[kCount];
@@ -217,16 +220,16 @@ TEST(IDMapTest, IteratorRemainsValidWhenClearing) {
int32_t ids_in_iteration_order[kCount];
const TestObject* objs_in_iteration_order[kCount];
int counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
ids_in_iteration_order[counter] = iter.GetCurrentKey();
objs_in_iteration_order[counter] = iter.GetCurrentValue();
counter++;
}
counter = 0;
- for (IDMap<TestObject>::const_iterator iter(&map);
- !iter.IsAtEnd(); iter.Advance()) {
+ for (IDMap<TestObject*>::const_iterator iter(&map); !iter.IsAtEnd();
+ iter.Advance()) {
switch (counter) {
case 0:
EXPECT_EQ(ids_in_iteration_order[0], iter.GetCurrentKey());
@@ -258,18 +261,17 @@ TEST(IDMapTest, OwningPointersDeletesThemOnRemove) {
int map_external_ids[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
int map_owned_ids[kCount];
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external_ids[i] = map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned_ids[i] = map_owned.Add(owned_obj[i]);
+ map_owned_ids[i] =
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
for (int i = 0; i < kCount; ++i) {
@@ -295,17 +297,15 @@ TEST(IDMapTest, OwningPointersDeletesThemOnClear) {
DestructorCounter* external_obj[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned.Add(owned_obj[i]);
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
EXPECT_EQ(external_del_count, 0);
@@ -332,18 +332,16 @@ TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
DestructorCounter* external_obj[kCount];
int owned_del_count = 0;
- DestructorCounter* owned_obj[kCount];
{
- IDMap<DestructorCounter> map_external;
- IDMap<DestructorCounter, IDMapOwnPointer> map_owned;
+ IDMap<DestructorCounter*> map_external;
+ IDMap<std::unique_ptr<DestructorCounter>> map_owned;
for (int i = 0; i < kCount; ++i) {
external_obj[i] = new DestructorCounter(&external_del_count);
map_external.Add(external_obj[i]);
- owned_obj[i] = new DestructorCounter(&owned_del_count);
- map_owned.Add(owned_obj[i]);
+ map_owned.Add(base::MakeUnique<DestructorCounter>(&owned_del_count));
}
}
@@ -358,14 +356,14 @@ TEST(IDMapTest, OwningPointersDeletesThemOnDestruct) {
}
TEST(IDMapTest, Int64KeyType) {
- IDMap<TestObject, IDMapExternalPointer, int64_t> map;
+ IDMap<TestObject*, int64_t> map;
TestObject obj1;
const int64_t kId1 = 999999999999999999;
map.AddWithID(&obj1, kId1);
EXPECT_EQ(&obj1, map.Lookup(kId1));
- IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
+ IDMap<TestObject*, int64_t>::const_iterator iter(&map);
ASSERT_FALSE(iter.IsAtEnd());
EXPECT_EQ(kId1, iter.GetCurrentKey());
EXPECT_EQ(&obj1, iter.GetCurrentValue());
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index cd427da9ea2..e85c3d256c4 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -24,7 +24,9 @@ namespace internal {
namespace {
-const int kStackMaxDepth = 100;
+// Chosen to support 99.9% of documents found in the wild late 2016.
+// http://crbug.com/673263
+const int kStackMaxDepth = 200;
const int32_t kExtendedASCIIStart = 0x80;
@@ -39,7 +41,7 @@ class DictionaryHiddenRootValue : public DictionaryValue {
DictionaryHiddenRootValue(std::unique_ptr<std::string> json,
std::unique_ptr<Value> root)
: json_(std::move(json)) {
- DCHECK(root->IsType(Value::TYPE_DICTIONARY));
+ DCHECK(root->IsType(Value::Type::DICTIONARY));
DictionaryValue::Swap(static_cast<DictionaryValue*>(root.get()));
}
@@ -91,7 +93,7 @@ class ListHiddenRootValue : public ListValue {
ListHiddenRootValue(std::unique_ptr<std::string> json,
std::unique_ptr<Value> root)
: json_(std::move(json)) {
- DCHECK(root->IsType(Value::TYPE_LIST));
+ DCHECK(root->IsType(Value::Type::LIST));
ListValue::Swap(static_cast<ListValue*>(root.get()));
}
@@ -140,7 +142,7 @@ class ListHiddenRootValue : public ListValue {
class JSONStringValue : public Value {
public:
explicit JSONStringValue(StringPiece piece)
- : Value(TYPE_STRING), string_piece_(piece) {}
+ : Value(Type::STRING), string_piece_(piece) {}
// Overridden from Value:
bool GetAsString(std::string* out_value) const override {
@@ -151,11 +153,15 @@ class JSONStringValue : public Value {
*out_value = UTF8ToUTF16(string_piece_);
return true;
}
+ bool GetAsString(StringPiece* out_value) const override {
+ *out_value = string_piece_;
+ return true;
+ }
Value* DeepCopy() const override { return new StringValue(string_piece_); }
bool Equals(const Value* other) const override {
std::string other_string;
- return other->IsType(TYPE_STRING) && other->GetAsString(&other_string) &&
- StringPiece(other_string) == string_piece_;
+ return other->IsType(Type::STRING) && other->GetAsString(&other_string) &&
+ StringPiece(other_string) == string_piece_;
}
private:
@@ -255,15 +261,15 @@ std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
// Dictionaries and lists can contain JSONStringValues, so wrap them in a
// hidden root.
if (!(options_ & JSON_DETACHABLE_CHILDREN)) {
- if (root->IsType(Value::TYPE_DICTIONARY)) {
+ if (root->IsType(Value::Type::DICTIONARY)) {
return MakeUnique<DictionaryHiddenRootValue>(std::move(input_copy),
std::move(root));
}
- if (root->IsType(Value::TYPE_LIST)) {
+ if (root->IsType(Value::Type::LIST)) {
return MakeUnique<ListHiddenRootValue>(std::move(input_copy),
std::move(root));
}
- if (root->IsType(Value::TYPE_STRING)) {
+ if (root->IsType(Value::Type::STRING)) {
// A string type could be a JSONStringValue, but because there's no
// corresponding HiddenRootValue, the memory will be lost. Deep copy to
// preserve it.
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index 6023288925f..d004c480cf6 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -124,7 +124,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
TestLastThree(parser.get());
ASSERT_TRUE(value.get());
- EXPECT_TRUE(value->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(value->IsType(Value::Type::NONE));
}
TEST_F(JSONParserTest, ConsumeNumbers) {
@@ -244,14 +244,14 @@ TEST_F(JSONParserTest, ErrorMessages) {
EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, error_code);
std::string nested_json;
- for (int i = 0; i < 101; ++i) {
+ for (int i = 0; i < 201; ++i) {
nested_json.insert(nested_json.begin(), '[');
nested_json.append(1, ']');
}
root = JSONReader::ReadAndReturnError(nested_json, JSON_PARSE_RFC,
&error_code, &error_message);
EXPECT_FALSE(root.get());
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 100, JSONReader::kTooMuchNesting),
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 200, JSONReader::kTooMuchNesting),
error_message);
EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, error_code);
diff --git a/chromium/base/json/json_reader_unittest.cc b/chromium/base/json/json_reader_unittest.cc
index 2d6f8f37f0b..1b00b1d9e91 100644
--- a/chromium/base/json/json_reader_unittest.cc
+++ b/chromium/base/json/json_reader_unittest.cc
@@ -26,7 +26,7 @@ TEST(JSONReaderTest, Reading) {
// some whitespace checking
std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
}
{
@@ -38,23 +38,23 @@ TEST(JSONReaderTest, Reading) {
// Simple bool
std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
}
{
// Embedded comment
std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
root = JSONReader().ReadToValue("40 /* comment */");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
root = JSONReader().ReadToValue("true // comment");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
root = JSONReader().ReadToValue("/* comment */\"sample string\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string value;
EXPECT_TRUE(root->GetAsString(&value));
EXPECT_EQ("sample string", value);
@@ -72,7 +72,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(3u, list->GetSize());
root = JSONReader().ReadToValue("/* comment **/42");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(42, int_val);
root = JSONReader().ReadToValue(
@@ -80,7 +80,7 @@ TEST(JSONReaderTest, Reading) {
"// */ 43\n"
"44");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(44, int_val);
}
@@ -89,7 +89,7 @@ TEST(JSONReaderTest, Reading) {
// Test number formats
std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
int int_val = 0;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(43, int_val);
@@ -107,7 +107,7 @@ TEST(JSONReaderTest, Reading) {
// clause).
std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_INTEGER));
+ EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
int int_val = 1;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(0, int_val);
@@ -119,13 +119,13 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
ASSERT_TRUE(root);
double double_val;
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2147483648.0, double_val);
root = JSONReader().ReadToValue("-2147483649");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
@@ -135,42 +135,42 @@ TEST(JSONReaderTest, Reading) {
// Parse a double
std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(43.1, double_val);
root = JSONReader().ReadToValue("4.3e-1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(.43, double_val);
root = JSONReader().ReadToValue("2.1e0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2.1, double_val);
root = JSONReader().ReadToValue("2.1e+0001");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(21.0, double_val);
root = JSONReader().ReadToValue("0.01");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(0.01, double_val);
root = JSONReader().ReadToValue("1.00");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(1.0, double_val);
@@ -210,7 +210,7 @@ TEST(JSONReaderTest, Reading) {
// Test string parser
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("hello world", str_val);
@@ -220,7 +220,7 @@ TEST(JSONReaderTest, Reading) {
// Empty string
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("", str_val);
@@ -231,7 +231,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
@@ -242,7 +242,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\\x41\\x00\\u1234\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(std::wstring(L"A\0\x1234", 3), UTF8ToWide(str_val));
@@ -316,7 +316,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(1U, list->GetSize());
Value* tmp_value = nullptr;
ASSERT_TRUE(list->Get(0, &tmp_value));
- EXPECT_TRUE(tmp_value->IsType(Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(tmp_value->IsType(Value::Type::BOOLEAN));
bool bool_value = false;
EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
EXPECT_TRUE(bool_value);
@@ -345,7 +345,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_DOUBLE_EQ(9.87654321, double_val);
Value* null_val = nullptr;
ASSERT_TRUE(dict_val->Get("null", &null_val));
- EXPECT_TRUE(null_val->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(null_val->IsType(Value::Type::NONE));
std::string str_val;
EXPECT_TRUE(dict_val->GetString("S", &str_val));
EXPECT_EQ("str", str_val);
@@ -483,7 +483,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
@@ -507,7 +507,7 @@ TEST(JSONReaderTest, Reading) {
// Test utf16 encoded strings.
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(
@@ -517,7 +517,7 @@ TEST(JSONReaderTest, Reading) {
root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::TYPE_STRING));
+ EXPECT_TRUE(root->IsType(Value::Type::STRING));
str_val.clear();
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
@@ -545,7 +545,7 @@ TEST(JSONReaderTest, Reading) {
{
// Test literal root objects.
std::unique_ptr<Value> root = JSONReader::Read("null");
- EXPECT_TRUE(root->IsType(Value::TYPE_NULL));
+ EXPECT_TRUE(root->IsType(Value::Type::NONE));
root = JSONReader::Read("true");
ASSERT_TRUE(root);
@@ -579,7 +579,7 @@ TEST(JSONReaderTest, ReadFromFile) {
JSONReader reader;
std::unique_ptr<Value> root(reader.ReadToValue(input));
ASSERT_TRUE(root) << reader.GetErrorMessage();
- EXPECT_TRUE(root->IsType(Value::TYPE_DICTIONARY));
+ EXPECT_TRUE(root->IsType(Value::Type::DICTIONARY));
}
// Tests that the root of a JSON object can be deleted safely while its
diff --git a/chromium/base/json/json_value_converter.h b/chromium/base/json/json_value_converter.h
index 187c4c44db7..68ebfa23de6 100644
--- a/chromium/base/json/json_value_converter.h
+++ b/chromium/base/json/json_value_converter.h
@@ -14,7 +14,7 @@
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/scoped_vector.h"
+#include "base/memory/ptr_util.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
#include "base/values.h"
@@ -65,9 +65,9 @@
// }
// };
//
-// For repeated field, we just assume ScopedVector for its container
-// and you can put RegisterRepeatedInt or some other types. Use
-// RegisterRepeatedMessage for nested repeated fields.
+// For repeated field, we just assume std::vector<std::unique_ptr<ElementType>>
+// for its container and you can put RegisterRepeatedInt or some other types.
+// Use RegisterRepeatedMessage for nested repeated fields.
//
// Sometimes JSON format uses string representations for other types such
// like enum, timestamp, or URL. You can use RegisterCustomField method
@@ -247,12 +247,13 @@ class NestedValueConverter : public ValueConverter<NestedType> {
};
template <typename Element>
-class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
+class RepeatedValueConverter
+ : public ValueConverter<std::vector<std::unique_ptr<Element>>> {
public:
RepeatedValueConverter() {}
bool Convert(const base::Value& value,
- ScopedVector<Element>* field) const override {
+ std::vector<std::unique_ptr<Element>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list)) {
// The field is not a list.
@@ -267,7 +268,7 @@ class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
std::unique_ptr<Element> e(new Element);
if (basic_converter_.Convert(*element, e.get())) {
- field->push_back(e.release());
+ field->push_back(std::move(e));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -283,12 +284,12 @@ class RepeatedValueConverter : public ValueConverter<ScopedVector<Element> > {
template <typename NestedType>
class RepeatedMessageConverter
- : public ValueConverter<ScopedVector<NestedType> > {
+ : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
public:
RepeatedMessageConverter() {}
bool Convert(const base::Value& value,
- ScopedVector<NestedType>* field) const override {
+ std::vector<std::unique_ptr<NestedType>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list))
return false;
@@ -301,7 +302,7 @@ class RepeatedMessageConverter
std::unique_ptr<NestedType> nested(new NestedType);
if (converter_.Convert(*element, nested.get())) {
- field->push_back(nested.release());
+ field->push_back(std::move(nested));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -317,7 +318,7 @@ class RepeatedMessageConverter
template <typename NestedType>
class RepeatedCustomValueConverter
- : public ValueConverter<ScopedVector<NestedType> > {
+ : public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
public:
typedef bool(*ConvertFunc)(const base::Value* value, NestedType* field);
@@ -325,7 +326,7 @@ class RepeatedCustomValueConverter
: convert_func_(convert_func) {}
bool Convert(const base::Value& value,
- ScopedVector<NestedType>* field) const override {
+ std::vector<std::unique_ptr<NestedType>>* field) const override {
const base::ListValue* list = NULL;
if (!value.GetAsList(&list))
return false;
@@ -338,7 +339,7 @@ class RepeatedCustomValueConverter
std::unique_ptr<NestedType> nested(new NestedType);
if ((*convert_func_)(element, nested.get())) {
- field->push_back(nested.release());
+ field->push_back(std::move(nested));
} else {
DVLOG(1) << "failure at " << i << "-th element";
return false;
@@ -364,41 +365,42 @@ class JSONValueConverter {
void RegisterIntField(const std::string& field_name,
int StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, int>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, int>>(
field_name, field, new internal::BasicValueConverter<int>));
}
void RegisterStringField(const std::string& field_name,
std::string StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, std::string>(
- field_name, field, new internal::BasicValueConverter<std::string>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, std::string>>(
+ field_name, field, new internal::BasicValueConverter<std::string>));
}
void RegisterStringField(const std::string& field_name,
string16 StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, string16>(
- field_name, field, new internal::BasicValueConverter<string16>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, string16>>(
+ field_name, field, new internal::BasicValueConverter<string16>));
}
void RegisterBoolField(const std::string& field_name,
bool StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, bool>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, bool>>(
field_name, field, new internal::BasicValueConverter<bool>));
}
void RegisterDoubleField(const std::string& field_name,
double StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, double>(
+ fields_.push_back(MakeUnique<internal::FieldConverter<StructType, double>>(
field_name, field, new internal::BasicValueConverter<double>));
}
template <class NestedType>
void RegisterNestedField(
const std::string& field_name, NestedType StructType::* field) {
- fields_.push_back(new internal::FieldConverter<StructType, NestedType>(
- field_name,
- field,
- new internal::NestedValueConverter<NestedType>));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, NestedType>>(
+ field_name, field, new internal::NestedValueConverter<NestedType>));
}
template <typename FieldType>
@@ -406,10 +408,10 @@ class JSONValueConverter {
const std::string& field_name,
FieldType StructType::* field,
bool (*convert_func)(const StringPiece&, FieldType*)) {
- fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
- field_name,
- field,
- new internal::CustomFieldConverter<FieldType>(convert_func)));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, FieldType>>(
+ field_name, field,
+ new internal::CustomFieldConverter<FieldType>(convert_func)));
}
template <typename FieldType>
@@ -417,71 +419,76 @@ class JSONValueConverter {
const std::string& field_name,
FieldType StructType::* field,
bool (*convert_func)(const base::Value*, FieldType*)) {
- fields_.push_back(new internal::FieldConverter<StructType, FieldType>(
- field_name,
- field,
- new internal::ValueFieldConverter<FieldType>(convert_func)));
+ fields_.push_back(
+ MakeUnique<internal::FieldConverter<StructType, FieldType>>(
+ field_name, field,
+ new internal::ValueFieldConverter<FieldType>(convert_func)));
}
- void RegisterRepeatedInt(const std::string& field_name,
- ScopedVector<int> StructType::* field) {
+ void RegisterRepeatedInt(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<int>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<int> >(
+ MakeUnique<internal::FieldConverter<StructType,
+ std::vector<std::unique_ptr<int>>>>(
field_name, field, new internal::RepeatedValueConverter<int>));
}
- void RegisterRepeatedString(const std::string& field_name,
- ScopedVector<std::string> StructType::* field) {
+ void RegisterRepeatedString(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<std::string>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<std::string> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<std::string>>>>(
+ field_name, field,
new internal::RepeatedValueConverter<std::string>));
}
- void RegisterRepeatedString(const std::string& field_name,
- ScopedVector<string16> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<string16> >(
- field_name,
- field,
- new internal::RepeatedValueConverter<string16>));
+ void RegisterRepeatedString(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<string16>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<string16>>>>(
+ field_name, field, new internal::RepeatedValueConverter<string16>));
}
- void RegisterRepeatedDouble(const std::string& field_name,
- ScopedVector<double> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<double> >(
- field_name, field, new internal::RepeatedValueConverter<double>));
+ void RegisterRepeatedDouble(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<double>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<double>>>>(
+ field_name, field, new internal::RepeatedValueConverter<double>));
}
- void RegisterRepeatedBool(const std::string& field_name,
- ScopedVector<bool> StructType::* field) {
- fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<bool> >(
- field_name, field, new internal::RepeatedValueConverter<bool>));
+ void RegisterRepeatedBool(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<bool>> StructType::*field) {
+ fields_.push_back(MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<bool>>>>(
+ field_name, field, new internal::RepeatedValueConverter<bool>));
}
template <class NestedType>
void RegisterRepeatedCustomValue(
const std::string& field_name,
- ScopedVector<NestedType> StructType::* field,
+ std::vector<std::unique_ptr<NestedType>> StructType::*field,
bool (*convert_func)(const base::Value*, NestedType*)) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<NestedType>>>>(
+ field_name, field,
new internal::RepeatedCustomValueConverter<NestedType>(
convert_func)));
}
template <class NestedType>
- void RegisterRepeatedMessage(const std::string& field_name,
- ScopedVector<NestedType> StructType::* field) {
+ void RegisterRepeatedMessage(
+ const std::string& field_name,
+ std::vector<std::unique_ptr<NestedType>> StructType::*field) {
fields_.push_back(
- new internal::FieldConverter<StructType, ScopedVector<NestedType> >(
- field_name,
- field,
+ MakeUnique<internal::FieldConverter<
+ StructType, std::vector<std::unique_ptr<NestedType>>>>(
+ field_name, field,
new internal::RepeatedMessageConverter<NestedType>));
}
@@ -492,7 +499,7 @@ class JSONValueConverter {
for (size_t i = 0; i < fields_.size(); ++i) {
const internal::FieldConverterBase<StructType>* field_converter =
- fields_[i];
+ fields_[i].get();
const base::Value* field = NULL;
if (dictionary_value->Get(field_converter->field_path(), &field)) {
if (!field_converter->ConvertField(*field, output)) {
@@ -505,7 +512,8 @@ class JSONValueConverter {
}
private:
- ScopedVector<internal::FieldConverterBase<StructType> > fields_;
+ std::vector<std::unique_ptr<internal::FieldConverterBase<StructType>>>
+ fields_;
DISALLOW_COPY_AND_ASSIGN(JSONValueConverter);
};
diff --git a/chromium/base/json/json_value_converter_unittest.cc b/chromium/base/json/json_value_converter_unittest.cc
index 56ade24ac3a..6a603d3a92a 100644
--- a/chromium/base/json/json_value_converter_unittest.cc
+++ b/chromium/base/json/json_value_converter_unittest.cc
@@ -9,7 +9,6 @@
#include <vector>
#include "base/json/json_reader.h"
-#include "base/memory/scoped_vector.h"
#include "base/strings/string_piece.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -27,8 +26,8 @@ struct SimpleMessage {
bool baz;
bool bstruct;
SimpleEnum simple_enum;
- ScopedVector<int> ints;
- ScopedVector<std::string> string_values;
+ std::vector<std::unique_ptr<int>> ints;
+ std::vector<std::unique_ptr<std::string>> string_values;
SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
static bool ParseSimpleEnum(const StringPiece& value, SimpleEnum* field) {
@@ -80,7 +79,7 @@ struct SimpleMessage {
struct NestedMessage {
double foo;
SimpleMessage child;
- ScopedVector<SimpleMessage> children;
+ std::vector<std::unique_ptr<SimpleMessage>> children;
NestedMessage() : foo(0) {}
@@ -163,7 +162,7 @@ TEST(JSONValueConverterTest, ParseNestedMessage) {
EXPECT_EQ("value_2", *message.child.string_values[1]);
EXPECT_EQ(2, static_cast<int>(message.children.size()));
- const SimpleMessage* first_child = message.children[0];
+ const SimpleMessage* first_child = message.children[0].get();
ASSERT_TRUE(first_child);
EXPECT_EQ(2, first_child->foo);
EXPECT_EQ("foobar", first_child->bar);
@@ -172,7 +171,7 @@ TEST(JSONValueConverterTest, ParseNestedMessage) {
ASSERT_EQ(1U, first_child->string_values.size());
EXPECT_EQ("value_1", *first_child->string_values[0]);
- const SimpleMessage* second_child = message.children[1];
+ const SimpleMessage* second_child = message.children[1].get();
ASSERT_TRUE(second_child);
EXPECT_EQ(3, second_child->foo);
EXPECT_EQ("barbaz", second_child->bar);
diff --git a/chromium/base/json/json_value_serializer_unittest.cc b/chromium/base/json/json_value_serializer_unittest.cc
index 43ddc9c4574..e835700a1a5 100644
--- a/chromium/base/json/json_value_serializer_unittest.cc
+++ b/chromium/base/json/json_value_serializer_unittest.cc
@@ -224,7 +224,7 @@ TEST(JSONValueSerializerTest, Roundtrip) {
Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -417,7 +417,7 @@ TEST_F(JSONFileValueSerializerTest, Roundtrip) {
Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
diff --git a/chromium/base/json/json_writer.cc b/chromium/base/json/json_writer.cc
index 0b658eed59d..07b9d5091c8 100644
--- a/chromium/base/json/json_writer.cc
+++ b/chromium/base/json/json_writer.cc
@@ -57,12 +57,12 @@ JSONWriter::JSONWriter(int options, std::string* json)
bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
switch (node.GetType()) {
- case Value::TYPE_NULL: {
+ case Value::Type::NONE: {
json_string_->append("null");
return true;
}
- case Value::TYPE_BOOLEAN: {
+ case Value::Type::BOOLEAN: {
bool value;
bool result = node.GetAsBoolean(&value);
DCHECK(result);
@@ -70,7 +70,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_INTEGER: {
+ case Value::Type::INTEGER: {
int value;
bool result = node.GetAsInteger(&value);
DCHECK(result);
@@ -78,7 +78,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_DOUBLE: {
+ case Value::Type::DOUBLE: {
double value;
bool result = node.GetAsDouble(&value);
DCHECK(result);
@@ -110,7 +110,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_STRING: {
+ case Value::Type::STRING: {
std::string value;
bool result = node.GetAsString(&value);
DCHECK(result);
@@ -118,7 +118,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_LIST: {
+ case Value::Type::LIST: {
json_string_->push_back('[');
if (pretty_print_)
json_string_->push_back(' ');
@@ -128,7 +128,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
bool result = node.GetAsList(&list);
DCHECK(result);
for (const auto& value : *list) {
- if (omit_binary_values_ && value->GetType() == Value::TYPE_BINARY)
+ if (omit_binary_values_ && value->GetType() == Value::Type::BINARY)
continue;
if (first_value_has_been_output) {
@@ -149,7 +149,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_DICTIONARY: {
+ case Value::Type::DICTIONARY: {
json_string_->push_back('{');
if (pretty_print_)
json_string_->append(kPrettyPrintLineEnding);
@@ -161,7 +161,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
for (DictionaryValue::Iterator itr(*dict); !itr.IsAtEnd();
itr.Advance()) {
if (omit_binary_values_ &&
- itr.value().GetType() == Value::TYPE_BINARY) {
+ itr.value().GetType() == Value::Type::BINARY) {
continue;
}
@@ -194,7 +194,7 @@ bool JSONWriter::BuildJSONString(const Value& node, size_t depth) {
return result;
}
- case Value::TYPE_BINARY:
+ case Value::Type::BINARY:
// Successful only if we're allowed to omit it.
DLOG_IF(ERROR, !omit_binary_values_) << "Cannot serialize binary value.";
return omit_binary_values_;
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index e0ca19933e7..cea6edae4a3 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -7,12 +7,12 @@
#include <limits.h>
#include <stdint.h>
+#include "base/debug/activity_tracker.h"
#include "base/macros.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <io.h>
-#include <windows.h>
typedef HANDLE FileHandle;
typedef HANDLE MutexHandle;
// Windows warns on using write(). It prefers _write().
@@ -342,6 +342,11 @@ void CloseLogFileUnlocked() {
} // namespace
+// This is never instantiated, it's just used for EAT_STREAM_PARAMETERS to have
+// an object of the correct type on the LHS of the unused part of the ternary
+// operator.
+std::ostream* g_swallow_stream;
+
LoggingSettings::LoggingSettings()
: logging_dest(LOG_DEFAULT),
log_file(nullptr),
@@ -722,6 +727,12 @@ LogMessage::~LogMessage() {
}
if (severity_ == LOG_FATAL) {
+ // Write the log message to the global activity tracker, if running.
+ base::debug::GlobalActivityTracker* tracker =
+ base::debug::GlobalActivityTracker::Get();
+ if (tracker)
+ tracker->RecordLogMessage(str_newline);
+
// Ensure the first characters of the string are on the stack so they
// are contained in minidumps for diagnostic purposes.
char str_stack[1024];
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index 714545c02bb..5a43957b7b6 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -15,6 +15,7 @@
#include <utility>
#include "base/base_export.h"
+#include "base/compiler_specific.h"
#include "base/debug/debugger.h"
#include "base/macros.h"
#include "base/template_util.h"
@@ -426,9 +427,23 @@ const LogSeverity LOG_0 = LOG_ERROR;
#define PLOG_IF(severity, condition) \
LAZY_STREAM(PLOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
-// The actual stream used isn't important.
-#define EAT_STREAM_PARAMETERS \
- true ? (void) 0 : ::logging::LogMessageVoidify() & LOG_STREAM(FATAL)
+BASE_EXPORT extern std::ostream* g_swallow_stream;
+
+// Note that g_swallow_stream is used instead of an arbitrary LOG() stream to
+// avoid the creation of an object with a non-trivial destructor (LogMessage).
+// On MSVC x86 (checked on 2015 Update 3), this causes a few additional
+// pointless instructions to be emitted even at full optimization level, even
+// though the : arm of the ternary operator is clearly never executed. Using a
+// simpler object to be &'d with Voidify() avoids these extra instructions.
+// Using a simpler POD object with a templated operator<< also works to avoid
+// these instructions. However, this causes warnings on statically defined
+// implementations of operator<<(std::ostream, ...) in some .cc files, because
+// they become defined-but-unreferenced functions. A reinterpret_cast of 0 to an
+// ostream* also is not suitable, because some compilers warn of undefined
+// behavior.
+#define EAT_STREAM_PARAMETERS \
+ true ? (void)0 \
+ : ::logging::LogMessageVoidify() & (*::logging::g_swallow_stream)
// Captures the result of a CHECK_EQ (for example) and facilitates testing as a
// boolean.
@@ -445,6 +460,15 @@ class CheckOpResult {
std::string* message_;
};
+// Crashes in the fastest, simplest possible way with no attempt at logging.
+#if defined(COMPILER_GCC)
+#define IMMEDIATE_CRASH() __builtin_trap()
+#elif defined(COMPILER_MSVC)
+#define IMMEDIATE_CRASH() __debugbreak()
+#else
+#error Port
+#endif
+
// CHECK dies with a fatal error if condition is not true. It is *not*
// controlled by NDEBUG, so the check will be executed regardless of
// compilation mode.
@@ -454,20 +478,14 @@ class CheckOpResult {
#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
-// Make all CHECK functions discard their log strings to reduce code
-// bloat, and improve performance, for official release builds.
-
-#if defined(COMPILER_GCC) || __clang__
-#define LOGGING_CRASH() __builtin_trap()
-#else
-#define LOGGING_CRASH() ((void)(*(volatile char*)0 = 0))
-#endif
-
+// Make all CHECK functions discard their log strings to reduce code bloat, and
+// improve performance, for official release builds.
+//
// This is not calling BreakDebugger since this is called frequently, and
// calling an out-of-line function instead of a noreturn inline macro prevents
// compiler optimizations.
-#define CHECK(condition) \
- !(condition) ? LOGGING_CRASH() : EAT_STREAM_PARAMETERS
+#define CHECK(condition) \
+ UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_STREAM_PARAMETERS
#define PCHECK(condition) CHECK(condition)
@@ -527,12 +545,26 @@ class CheckOpResult {
// it uses the definition for operator<<, with a few special cases below.
template <typename T>
inline typename std::enable_if<
- base::internal::SupportsOstreamOperator<const T&>::value,
+ base::internal::SupportsOstreamOperator<const T&>::value &&
+ !std::is_function<typename std::remove_pointer<T>::type>::value,
void>::type
MakeCheckOpValueString(std::ostream* os, const T& v) {
(*os) << v;
}
+// Provide an overload for functions and function pointers. Function pointers
+// don't implicitly convert to void* but do implicitly convert to bool, so
+// without this function pointers are always printed as 1 or 0. (MSVC isn't
+// standards-conforming here and converts function pointers to regular
+// pointers, so this is a no-op for MSVC.)
+template <typename T>
+inline typename std::enable_if<
+ std::is_function<typename std::remove_pointer<T>::type>::value,
+ void>::type
+MakeCheckOpValueString(std::ostream* os, const T& v) {
+ (*os) << reinterpret_cast<const void*>(v);
+}
+
// We need overloads for enums that don't support operator<<.
// (i.e. scoped enums where no operator<< overload was declared).
template <typename T>
@@ -682,6 +714,10 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// whether DCHECKs are enabled; this is so that we don't get unused
// variable warnings if the only use of a variable is in a DCHECK.
// This behavior is different from DLOG_IF et al.
+//
+// Note that the definition of the DCHECK macros depends on whether or not
+// DCHECK_IS_ON() is true. When DCHECK_IS_ON() is false, the macros use
+// EAT_STREAM_PARAMETERS to avoid expressions that would create temporaries.
#if defined(_PREFAST_) && defined(OS_WIN)
// See comments on the previous use of __analysis_assume.
@@ -698,13 +734,21 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
#else // _PREFAST_
-#define DCHECK(condition) \
- LAZY_STREAM(LOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
- << "Check failed: " #condition ". "
+#if DCHECK_IS_ON()
-#define DPCHECK(condition) \
- LAZY_STREAM(PLOG_STREAM(DCHECK), DCHECK_IS_ON() ? !(condition) : false) \
+#define DCHECK(condition) \
+ LAZY_STREAM(LOG_STREAM(DCHECK), !(condition)) \
<< "Check failed: " #condition ". "
+#define DPCHECK(condition) \
+ LAZY_STREAM(PLOG_STREAM(DCHECK), !(condition)) \
+ << "Check failed: " #condition ". "
+
+#else // DCHECK_IS_ON()
+
+#define DCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+#define DPCHECK(condition) EAT_STREAM_PARAMETERS << !(condition)
+
+#endif // DCHECK_IS_ON()
#endif // _PREFAST_
@@ -714,6 +758,8 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
// macro is used in an 'if' clause such as:
// if (a == 1)
// DCHECK_EQ(2, a);
+#if DCHECK_IS_ON()
+
#define DCHECK_OP(name, op, val1, val2) \
switch (0) case 0: default: \
if (::logging::CheckOpResult true_if_passed = \
@@ -725,6 +771,25 @@ const LogSeverity LOG_DCHECK = LOG_INFO;
::logging::LogMessage(__FILE__, __LINE__, ::logging::LOG_DCHECK, \
true_if_passed.message()).stream()
+#else // DCHECK_IS_ON()
+
+// When DCHECKs aren't enabled, DCHECK_OP still needs to reference operator<<
+// overloads for |val1| and |val2| to avoid potential compiler warnings about
+// unused functions. For the same reason, it also compares |val1| and |val2|
+// using |op|.
+//
+// Note that the contract of DCHECK_EQ, etc is that arguments are only evaluated
+// once. Even though |val1| and |val2| appear twice in this version of the macro
+// expansion, this is OK, since the expression is never actually evaluated.
+#define DCHECK_OP(name, op, val1, val2) \
+ EAT_STREAM_PARAMETERS << (::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val1), \
+ ::logging::MakeCheckOpValueString( \
+ ::logging::g_swallow_stream, val2), \
+ (val1)op(val2))
+
+#endif // DCHECK_IS_ON()
+
// Equality/Inequality checks - compare two values, and log a
// LOG_DCHECK message including the two values when the result is not
// as expected. The values must have operator<<(ostream, ...)
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 8a20c54fb4c..9fe718c8799 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -9,6 +9,11 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_WIN)
+#include <excpt.h>
+#include <windows.h>
+#endif // OS_WIN
+
namespace logging {
namespace {
@@ -190,6 +195,59 @@ TEST_F(LoggingTest, CheckStreamsAreLazy) {
#endif
+#if defined(OFFICIAL_BUILD) && defined(OS_WIN)
+NOINLINE void CheckContainingFunc(int death_location) {
+ CHECK(death_location != 1);
+ CHECK(death_location != 2);
+ CHECK(death_location != 3);
+}
+
+int GetCheckExceptionData(EXCEPTION_POINTERS* p, DWORD* code, void** addr) {
+ *code = p->ExceptionRecord->ExceptionCode;
+ *addr = p->ExceptionRecord->ExceptionAddress;
+ return EXCEPTION_EXECUTE_HANDLER;
+}
+
+TEST_F(LoggingTest, CheckCausesDistinctBreakpoints) {
+ DWORD code1 = 0;
+ DWORD code2 = 0;
+ DWORD code3 = 0;
+ void* addr1 = nullptr;
+ void* addr2 = nullptr;
+ void* addr3 = nullptr;
+
+ // Record the exception code and addresses.
+ __try {
+ CheckContainingFunc(1);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code1, &addr1)) {
+ }
+
+ __try {
+ CheckContainingFunc(2);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code2, &addr2)) {
+ }
+
+ __try {
+ CheckContainingFunc(3);
+ } __except (
+ GetCheckExceptionData(GetExceptionInformation(), &code3, &addr3)) {
+ }
+
+ // Ensure that the exception codes are correct (in particular, breakpoints,
+ // not access violations).
+ EXPECT_EQ(STATUS_BREAKPOINT, code1);
+ EXPECT_EQ(STATUS_BREAKPOINT, code2);
+ EXPECT_EQ(STATUS_BREAKPOINT, code3);
+
+ // Ensure that none of the CHECKs are colocated.
+ EXPECT_NE(addr1, addr2);
+ EXPECT_NE(addr1, addr3);
+ EXPECT_NE(addr2, addr3);
+}
+#endif // OFFICIAL_BUILD && OS_WIN
+
TEST_F(LoggingTest, DebugLoggingReleaseBehavior) {
#if !defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)
int debug_only_variable = 1;
@@ -217,6 +275,14 @@ TEST_F(LoggingTest, DcheckStreamsAreLazy) {
#endif
}
+void DcheckEmptyFunction1() {
+ // Provide a body so that Release builds do not cause the compiler to
+ // optimize DcheckEmptyFunction1 and DcheckEmptyFunction2 as a single
+ // function, which breaks the Dcheck tests below.
+ LOG(INFO) << "DcheckEmptyFunction1";
+}
+void DcheckEmptyFunction2() {}
+
TEST_F(LoggingTest, Dcheck) {
#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
// Release build.
@@ -258,6 +324,31 @@ TEST_F(LoggingTest, Dcheck) {
EXPECT_EQ(0, log_sink_call_count);
DCHECK_EQ(Animal::DOG, Animal::CAT);
EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+
+ // Test DCHECK on functions and function pointers.
+ log_sink_call_count = 0;
+ struct MemberFunctions {
+ void MemberFunction1() {
+ // See the comment in DcheckEmptyFunction1().
+ LOG(INFO) << "Do not merge with MemberFunction2.";
+ }
+ void MemberFunction2() {}
+ };
+ void (MemberFunctions::*mp1)() = &MemberFunctions::MemberFunction1;
+ void (MemberFunctions::*mp2)() = &MemberFunctions::MemberFunction2;
+ void (*fp1)() = DcheckEmptyFunction1;
+ void (*fp2)() = DcheckEmptyFunction2;
+ void (*fp3)() = DcheckEmptyFunction1;
+ DCHECK_EQ(fp1, fp3);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(mp1, &MemberFunctions::MemberFunction1);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(mp2, &MemberFunctions::MemberFunction2);
+ EXPECT_EQ(0, log_sink_call_count);
+ DCHECK_EQ(fp1, fp2);
+ EXPECT_EQ(DCHECK_IS_ON() ? 1 : 0, log_sink_call_count);
+ DCHECK_EQ(mp2, &MemberFunctions::MemberFunction1);
+ EXPECT_EQ(DCHECK_IS_ON() ? 2 : 0, log_sink_call_count);
}
TEST_F(LoggingTest, DcheckReleaseBehavior) {
diff --git a/chromium/base/mac/scoped_authorizationref.h b/chromium/base/mac/scoped_authorizationref.h
index 03cde86140e..b83f8dfb357 100644
--- a/chromium/base/mac/scoped_authorizationref.h
+++ b/chromium/base/mac/scoped_authorizationref.h
@@ -11,7 +11,7 @@
#include "base/macros.h"
// ScopedAuthorizationRef maintains ownership of an AuthorizationRef. It is
-// patterned after the scoped_ptr interface.
+// patterned after the unique_ptr interface.
namespace base {
namespace mac {
diff --git a/chromium/base/memory/discardable_memory_allocator.cc b/chromium/base/memory/discardable_memory_allocator.cc
index 002a3ba5e47..ee288ff4b20 100644
--- a/chromium/base/memory/discardable_memory_allocator.cc
+++ b/chromium/base/memory/discardable_memory_allocator.cc
@@ -16,12 +16,7 @@ DiscardableMemoryAllocator* g_allocator = nullptr;
// static
void DiscardableMemoryAllocator::SetInstance(
DiscardableMemoryAllocator* allocator) {
- DCHECK(allocator);
-
- // Make sure this function is only called once before the first call
- // to GetInstance().
- DCHECK(!g_allocator);
-
+ DCHECK(!allocator || !g_allocator);
g_allocator = allocator;
}
diff --git a/chromium/base/memory/linked_ptr.h b/chromium/base/memory/linked_ptr.h
index 649dc10db7b..68512864b25 100644
--- a/chromium/base/memory/linked_ptr.h
+++ b/chromium/base/memory/linked_ptr.h
@@ -69,7 +69,7 @@ class linked_ptr_internal {
mutable linked_ptr_internal const* next_;
};
-// TODO(http://crbug.com/556939): DEPRECATED: Use scoped_ptr instead (now that
+// TODO(http://crbug.com/556939): DEPRECATED: Use unique_ptr instead (now that
// we have support for moveable types inside STL containers).
template <typename T>
class linked_ptr {
diff --git a/chromium/base/memory/memory_pressure_monitor.h b/chromium/base/memory/memory_pressure_monitor.h
index 033379cdb8d..e48244b433c 100644
--- a/chromium/base/memory/memory_pressure_monitor.h
+++ b/chromium/base/memory/memory_pressure_monitor.h
@@ -35,7 +35,7 @@ class BASE_EXPORT MemoryPressureMonitor {
static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
// Returns the currently observed memory pressure.
- virtual MemoryPressureLevel GetCurrentPressureLevel() const = 0;
+ virtual MemoryPressureLevel GetCurrentPressureLevel() = 0;
// Sets a notification callback. The default callback invokes
// base::MemoryPressureListener::NotifyMemoryPressure.
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.cc b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
index b90075d903e..05fcc9e1511 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.cc
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.cc
@@ -134,7 +134,7 @@ void MemoryPressureMonitor::ScheduleEarlyCheck() {
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() const {
+MemoryPressureMonitor::GetCurrentPressureLevel() {
return current_memory_pressure_level_;
}
diff --git a/chromium/base/memory/memory_pressure_monitor_chromeos.h b/chromium/base/memory/memory_pressure_monitor_chromeos.h
index 88dccc9df8e..563ba85081f 100644
--- a/chromium/base/memory/memory_pressure_monitor_chromeos.h
+++ b/chromium/base/memory/memory_pressure_monitor_chromeos.h
@@ -58,7 +58,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
void ScheduleEarlyCheck();
// Get the current memory pressure level.
- MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel() const
+ MemoryPressureListener::MemoryPressureLevel GetCurrentPressureLevel()
override;
void SetDispatchCallback(const DispatchCallback& callback) override;
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.cc b/chromium/base/memory/memory_pressure_monitor_mac.cc
index 5ea381fd3bd..391589021bd 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.cc
+++ b/chromium/base/memory/memory_pressure_monitor_mac.cc
@@ -18,6 +18,10 @@
DISPATCH_EXPORT const struct dispatch_source_type_s
_dispatch_source_type_memorypressure;
+namespace {
+static const int kUMATickSize = 5;
+} // namespace
+
namespace base {
namespace mac {
@@ -44,58 +48,77 @@ MemoryPressureMonitor::MemoryPressureMonitor()
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0))),
dispatch_callback_(
base::Bind(&MemoryPressureListener::NotifyMemoryPressure)),
- last_pressure_change_(CFAbsoluteTimeGetCurrent()),
+ last_statistic_report_(CFAbsoluteTimeGetCurrent()),
+ last_pressure_level_(MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE),
reporting_error_(0) {
- last_pressure_level_ = GetCurrentPressureLevel();
- dispatch_source_set_event_handler(memory_level_event_source_, ^{
- OnMemoryPressureChanged(memory_level_event_source_.get(),
- dispatch_callback_);
- });
- dispatch_resume(memory_level_event_source_);
+ if (memory_level_event_source_.get() != nullptr) {
+ dispatch_source_set_event_handler(memory_level_event_source_, ^{
+ OnMemoryPressureChanged(memory_level_event_source_.get(),
+ dispatch_callback_);
+ });
+ dispatch_resume(memory_level_event_source_);
+ }
}
MemoryPressureMonitor::~MemoryPressureMonitor() {
- dispatch_source_cancel(memory_level_event_source_);
+ if (memory_level_event_source_.get() != nullptr)
+ dispatch_source_cancel(memory_level_event_source_);
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() const {
+MemoryPressureMonitor::GetCurrentPressureLevel() {
int mac_memory_pressure;
size_t length = sizeof(int);
sysctlbyname("kern.memorystatus_vm_pressure_level", &mac_memory_pressure,
&length, nullptr, 0);
- return MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
+ MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
+ MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
+ bool pressure_level_changed = false;
+ if (last_pressure_level_ != memory_pressure_level) {
+ pressure_level_changed = true;
+ }
+ SendStatisticsIfNecessary(pressure_level_changed);
+ last_pressure_level_ = memory_pressure_level;
+ return memory_pressure_level;
}
+
void MemoryPressureMonitor::OnMemoryPressureChanged(
dispatch_source_s* event_source,
const MemoryPressureMonitor::DispatchCallback& dispatch_callback) {
int mac_memory_pressure = dispatch_source_get_data(event_source);
MemoryPressureListener::MemoryPressureLevel memory_pressure_level =
MemoryPressureLevelForMacMemoryPressure(mac_memory_pressure);
- CFTimeInterval now = CFAbsoluteTimeGetCurrent();
- CFTimeInterval since_last_change = now - last_pressure_change_;
- last_pressure_change_ = now;
-
- double ticks_to_report;
- reporting_error_ =
- modf(since_last_change + reporting_error_, &ticks_to_report);
-
- // Sierra fails to call the handler when pressure returns to normal,
- // which would skew our data. For example, if pressure went to 'warn'
- // at T0, back to 'normal' at T1, then to 'critical' at T10, we would
- // report 10 ticks of 'warn' instead of 1 tick of 'warn' and 9 ticks
- // of 'normal'.
- // This is rdar://29114314
- if (mac::IsAtMostOS10_11())
- RecordMemoryPressure(last_pressure_level_,
- static_cast<int>(ticks_to_report));
-
+ bool pressure_level_changed = false;
+ if (last_pressure_level_ != memory_pressure_level) {
+ pressure_level_changed = true;
+ }
+ SendStatisticsIfNecessary(pressure_level_changed);
last_pressure_level_ = memory_pressure_level;
if (memory_pressure_level !=
MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE)
dispatch_callback.Run(memory_pressure_level);
}
+void MemoryPressureMonitor::SendStatisticsIfNecessary(
+ bool pressure_level_changed) {
+ CFTimeInterval now = CFAbsoluteTimeGetCurrent();
+ CFTimeInterval since_last_report = now - last_statistic_report_;
+ last_statistic_report_ = now;
+
+ double accumulated_time = since_last_report + reporting_error_;
+ int ticks_to_report = static_cast<int>(accumulated_time / kUMATickSize);
+ reporting_error_ = std::fmod(accumulated_time, kUMATickSize);
+
+ // Round up on change to ensure we capture it
+ if (pressure_level_changed && ticks_to_report < 1) {
+ ticks_to_report = 1;
+ reporting_error_ = 0;
+ }
+
+ if (ticks_to_report >= 1)
+ RecordMemoryPressure(last_pressure_level_, ticks_to_report);
+}
+
void MemoryPressureMonitor::SetDispatchCallback(
const DispatchCallback& callback) {
dispatch_callback_ = callback;
diff --git a/chromium/base/memory/memory_pressure_monitor_mac.h b/chromium/base/memory/memory_pressure_monitor_mac.h
index 9f89b1661a6..9118632bd66 100644
--- a/chromium/base/memory/memory_pressure_monitor_mac.h
+++ b/chromium/base/memory/memory_pressure_monitor_mac.h
@@ -27,7 +27,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
~MemoryPressureMonitor() override;
// Returns the currently-observed memory pressure.
- MemoryPressureLevel GetCurrentPressureLevel() const override;
+ MemoryPressureLevel GetCurrentPressureLevel() override;
void SetDispatchCallback(const DispatchCallback& callback) override;
@@ -38,12 +38,13 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
MemoryPressureLevelForMacMemoryPressure(int mac_memory_pressure);
void OnMemoryPressureChanged(dispatch_source_s* event_source,
const DispatchCallback& dispatch_callback);
+ void SendStatisticsIfNecessary(bool pressure_level_changed);
ScopedDispatchObject<dispatch_source_t> memory_level_event_source_;
DispatchCallback dispatch_callback_;
- CFTimeInterval last_pressure_change_;
+ CFTimeInterval last_statistic_report_;
MemoryPressureLevel last_pressure_level_;
diff --git a/chromium/base/memory/memory_pressure_monitor_win.cc b/chromium/base/memory/memory_pressure_monitor_win.cc
index cad1fcb8d05..3effe2cc2cd 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.cc
+++ b/chromium/base/memory/memory_pressure_monitor_win.cc
@@ -90,7 +90,7 @@ void MemoryPressureMonitor::CheckMemoryPressureSoon() {
}
MemoryPressureListener::MemoryPressureLevel
-MemoryPressureMonitor::GetCurrentPressureLevel() const {
+MemoryPressureMonitor::GetCurrentPressureLevel() {
return current_memory_pressure_level_;
}
diff --git a/chromium/base/memory/memory_pressure_monitor_win.h b/chromium/base/memory/memory_pressure_monitor_win.h
index b52a2d27bfa..a65c191a494 100644
--- a/chromium/base/memory/memory_pressure_monitor_win.h
+++ b/chromium/base/memory/memory_pressure_monitor_win.h
@@ -62,7 +62,7 @@ class BASE_EXPORT MemoryPressureMonitor : public base::MemoryPressureMonitor {
void CheckMemoryPressureSoon();
// Get the current memory pressure level. This can be called from any thread.
- MemoryPressureLevel GetCurrentPressureLevel() const override;
+ MemoryPressureLevel GetCurrentPressureLevel() override;
void SetDispatchCallback(const DispatchCallback& callback) override;
// Returns the moderate pressure level free memory threshold, in MB.
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 960c8a28453..784a1788a81 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -224,6 +224,9 @@ class RefCountedData
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
+// private:
+// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
+// ~MyFoo(); // Destructor must be private/protected.
// };
//
// void some_function() {
diff --git a/chromium/base/memory/ref_counted_delete_on_message_loop.h b/chromium/base/memory/ref_counted_delete_on_message_loop.h
deleted file mode 100644
index eac1add50fe..00000000000
--- a/chromium/base/memory/ref_counted_delete_on_message_loop.h
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
-#define BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
-
-#include "base/location.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
-
-namespace base {
-
-// RefCountedDeleteOnMessageLoop is similar to RefCountedThreadSafe, and ensures
-// that the object will be deleted on a specified message loop.
-//
-// Sample usage:
-// class Foo : public RefCountedDeleteOnMessageLoop<Foo> {
-//
-// Foo(scoped_refptr<SingleThreadTaskRunner> loop)
-// : RefCountedDeleteOnMessageLoop<Foo>(std::move(loop)) {}
-// ...
-// private:
-// friend class RefCountedDeleteOnMessageLoop<Foo>;
-// friend class DeleteHelper<Foo>;
-//
-// ~Foo();
-// };
-
-// TODO(skyostil): Rename this to RefCountedDeleteOnTaskRunner.
-template <class T>
-class RefCountedDeleteOnMessageLoop : public subtle::RefCountedThreadSafeBase {
- public:
- // A SingleThreadTaskRunner for the current thread can be acquired by calling
- // ThreadTaskRunnerHandle::Get().
- RefCountedDeleteOnMessageLoop(
- scoped_refptr<SingleThreadTaskRunner> task_runner)
- : task_runner_(std::move(task_runner)) {
- DCHECK(task_runner_);
- }
-
- void AddRef() const {
- subtle::RefCountedThreadSafeBase::AddRef();
- }
-
- void Release() const {
- if (subtle::RefCountedThreadSafeBase::Release())
- DestructOnMessageLoop();
- }
-
- protected:
- friend class DeleteHelper<RefCountedDeleteOnMessageLoop>;
- ~RefCountedDeleteOnMessageLoop() {}
-
- void DestructOnMessageLoop() const {
- const T* t = static_cast<const T*>(this);
- if (task_runner_->BelongsToCurrentThread())
- delete t;
- else
- task_runner_->DeleteSoon(FROM_HERE, t);
- }
-
- scoped_refptr<SingleThreadTaskRunner> task_runner_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnMessageLoop);
-};
-
-} // namespace base
-
-#endif // BASE_MEMORY_REF_COUNTED_DELETE_ON_MESSAGE_LOOP_H_
diff --git a/chromium/base/memory/ref_counted_delete_on_sequence.h b/chromium/base/memory/ref_counted_delete_on_sequence.h
new file mode 100644
index 00000000000..a7600f9f854
--- /dev/null
+++ b/chromium/base/memory/ref_counted_delete_on_sequence.h
@@ -0,0 +1,70 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+#define BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
+
+#include <utility>
+
+#include "base/location.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/sequenced_task_runner.h"
+
+namespace base {
+
+// RefCountedDeleteOnSequence is similar to RefCountedThreadSafe, and ensures
+// that the object will be deleted on a specified sequence.
+//
+// Sample usage:
+// class Foo : public RefCountedDeleteOnSequence<Foo> {
+//
+// Foo(scoped_refptr<SequencedTaskRunner> task_runner)
+// : RefCountedDeleteOnSequence<Foo>(std::move(task_runner)) {}
+// ...
+// private:
+// friend class RefCountedDeleteOnSequence<Foo>;
+// friend class DeleteHelper<Foo>;
+//
+// ~Foo();
+// };
+template <class T>
+class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
+ public:
+ // A SequencedTaskRunner for the current sequence can be acquired by calling
+ // SequencedTaskRunnerHandle::Get().
+ RefCountedDeleteOnSequence(scoped_refptr<SequencedTaskRunner> task_runner)
+ : task_runner_(std::move(task_runner)) {
+ DCHECK(task_runner_);
+ }
+
+ void AddRef() const { subtle::RefCountedThreadSafeBase::AddRef(); }
+
+ void Release() const {
+ if (subtle::RefCountedThreadSafeBase::Release())
+ DestructOnSequence();
+ }
+
+ protected:
+ friend class DeleteHelper<RefCountedDeleteOnSequence>;
+ ~RefCountedDeleteOnSequence() = default;
+
+ private:
+ void DestructOnSequence() const {
+ const T* t = static_cast<const T*>(this);
+ if (task_runner_->RunsTasksOnCurrentThread())
+ delete t;
+ else
+ task_runner_->DeleteSoon(FROM_HERE, t);
+ }
+
+ const scoped_refptr<SequencedTaskRunner> task_runner_;
+
+ DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnSequence);
+};
+
+} // namespace base
+
+#endif // BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
diff --git a/chromium/base/memory/scoped_vector_unittest.cc b/chromium/base/memory/scoped_vector_unittest.cc
index ea3dcdc4858..916dab9a15c 100644
--- a/chromium/base/memory/scoped_vector_unittest.cc
+++ b/chromium/base/memory/scoped_vector_unittest.cc
@@ -322,7 +322,7 @@ TEST(ScopedVectorTest, InsertRange) {
EXPECT_EQ(LC_CONSTRUCTED, it->life_cycle_state());
}
-// Assertions for push_back(scoped_ptr).
+// Assertions for push_back(unique_ptr).
TEST(ScopedVectorTest, PushBackScopedPtr) {
int delete_counter = 0;
std::unique_ptr<DeleteCounter> elem(new DeleteCounter(&delete_counter));
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index f68c861647c..3c68e90a031 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -34,7 +34,10 @@ class FilePath;
// Options for creating a shared memory object.
struct BASE_EXPORT SharedMemoryCreateOptions {
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The type of OS primitive that should back the SharedMemory object.
+ SharedMemoryHandle::Type type = SharedMemoryHandle::MACH;
+#else
// DEPRECATED (crbug.com/345734):
// If NULL, the object is anonymous. This pointer is owned by the caller
// and must live through the call to Create().
@@ -46,7 +49,7 @@ struct BASE_EXPORT SharedMemoryCreateOptions {
// shared memory must not exist. This flag is meaningless unless
// name_deprecated is non-NULL.
bool open_existing_deprecated = false;
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
+#endif // defined(OS_MACOSX) && !defined(OS_IOS)
// Size of the shared memory object to be created.
// When opening an existing object, this has no effect.
@@ -101,7 +104,7 @@ class BASE_EXPORT SharedMemory {
// The caller is responsible for destroying the duplicated OS primitive.
static SharedMemoryHandle DuplicateHandle(const SharedMemoryHandle& handle);
-#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_POSIX)
// This method requires that the SharedMemoryHandle is backed by a POSIX fd.
static int GetFdFromSharedMemoryHandle(const SharedMemoryHandle& handle);
#endif
@@ -255,13 +258,18 @@ class BASE_EXPORT SharedMemory {
private:
#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID) && \
!(defined(OS_MACOSX) && !defined(OS_IOS))
- bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly);
bool FilePathForMemoryName(const std::string& mem_name, FilePath* path);
#endif
+
enum ShareMode {
SHARE_READONLY,
SHARE_CURRENT_MODE,
};
+
+#if defined(OS_MACOSX)
+ bool Share(SharedMemoryHandle* new_handle, ShareMode share_mode);
+#endif
+
bool ShareToProcessCommon(ProcessHandle process,
SharedMemoryHandle* new_handle,
bool close_self,
@@ -276,6 +284,12 @@ class BASE_EXPORT SharedMemory {
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// The OS primitive that backs the shared memory region.
SharedMemoryHandle shm_;
+
+ // The mechanism by which the memory is mapped. Only valid if |memory_| is not
+ // |nullptr|.
+ SharedMemoryHandle::Type mapped_memory_mechanism_;
+
+ int readonly_mapped_file_;
#elif defined(OS_POSIX)
int mapped_file_;
int readonly_mapped_file_;
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index c3fd7ae34bf..dc33eeafa11 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -15,6 +15,7 @@
#elif defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach.h>
#include "base/base_export.h"
+#include "base/file_descriptor_posix.h"
#include "base/macros.h"
#include "base/process/process_handle.h"
#elif defined(OS_POSIX)
@@ -83,9 +84,25 @@ class BASE_EXPORT SharedMemoryHandle {
#else
class BASE_EXPORT SharedMemoryHandle {
public:
+ enum Type {
+ // The SharedMemoryHandle is backed by a POSIX fd.
+ POSIX,
+ // The SharedMemoryHandle is backed by the Mach primitive "memory object".
+ MACH,
+ };
+
// The default constructor returns an invalid SharedMemoryHandle.
SharedMemoryHandle();
+ // Constructs a SharedMemoryHandle backed by the components of a
+ // FileDescriptor. The newly created instance has the same ownership semantics
+ // as base::FileDescriptor. This typically means that the SharedMemoryHandle
+ // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
+ // common for existing code to make shallow copies of SharedMemoryHandle, and
+ // the one that is finally passed into a base::SharedMemory is the one that
+ // "consumes" the fd.
+ explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
+
// Makes a Mach-based SharedMemoryHandle of the given size. On error,
// subsequent calls to IsValid() return false.
explicit SharedMemoryHandle(mach_vm_size_t size);
@@ -120,7 +137,7 @@ class BASE_EXPORT SharedMemoryHandle {
mach_port_t GetMemoryObject() const;
// Returns false on a failure to determine the size. On success, populates the
- // output variable |size|. Returns 0 if the handle is invalid.
+ // output variable |size|.
bool GetSize(size_t* size) const;
// The SharedMemoryHandle must be valid.
@@ -136,24 +153,36 @@ class BASE_EXPORT SharedMemoryHandle {
bool OwnershipPassesToIPC() const;
private:
+ friend class SharedMemory;
+
// Shared code between copy constructor and operator=.
void CopyRelevantData(const SharedMemoryHandle& handle);
- mach_port_t memory_object_ = MACH_PORT_NULL;
+ Type type_;
- // The size of the shared memory region when |type_| is MACH. Only
- // relevant if |memory_object_| is not |MACH_PORT_NULL|.
- mach_vm_size_t size_ = 0;
+ // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+ // mach port. |type_| determines the backing member.
+ union {
+ FileDescriptor file_descriptor_;
- // The pid of the process in which |memory_object_| is usable. Only
- // relevant if |memory_object_| is not |MACH_PORT_NULL|.
- base::ProcessId pid_ = 0;
+ struct {
+ mach_port_t memory_object_;
- // Whether passing this object as a parameter to an IPC message passes
- // ownership of |memory_object_| to the IPC stack. This is meant to mimic
- // the behavior of the |auto_close| parameter of FileDescriptor.
- // Defaults to |false|.
- bool ownership_passes_to_ipc_ = false;
+ // The size of the shared memory region when |type_| is MACH. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ mach_vm_size_t size_;
+
+ // The pid of the process in which |memory_object_| is usable. Only
+ // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+ base::ProcessId pid_;
+
+ // Whether passing this object as a parameter to an IPC message passes
+ // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+ // the behavior of the |auto_close| parameter of FileDescriptor.
+ // Defaults to |false|.
+ bool ownership_passes_to_ipc_;
+ };
+ };
};
#endif
diff --git a/chromium/base/memory/shared_memory_handle_mac.cc b/chromium/base/memory/shared_memory_handle_mac.cc
index ad470bea816..9dfd3c1aeac 100644
--- a/chromium/base/memory/shared_memory_handle_mac.cc
+++ b/chromium/base/memory/shared_memory_handle_mac.cc
@@ -10,13 +10,20 @@
#include <unistd.h>
#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
#include "base/posix/eintr_wrapper.h"
namespace base {
-SharedMemoryHandle::SharedMemoryHandle() {}
+SharedMemoryHandle::SharedMemoryHandle()
+ : type_(MACH), memory_object_(MACH_PORT_NULL) {}
+
+SharedMemoryHandle::SharedMemoryHandle(
+ const base::FileDescriptor& file_descriptor)
+ : type_(POSIX), file_descriptor_(file_descriptor) {}
SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
+ type_ = MACH;
mach_port_t named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(),
@@ -39,7 +46,8 @@ SharedMemoryHandle::SharedMemoryHandle(mach_vm_size_t size) {
SharedMemoryHandle::SharedMemoryHandle(mach_port_t memory_object,
mach_vm_size_t size,
base::ProcessId pid)
- : memory_object_(memory_object),
+ : type_(MACH),
+ memory_object_(memory_object),
size_(size),
pid_(pid),
ownership_passes_to_ipc_(false) {}
@@ -53,29 +61,50 @@ SharedMemoryHandle& SharedMemoryHandle::operator=(
if (this == &handle)
return *this;
+ type_ = handle.type_;
CopyRelevantData(handle);
return *this;
}
SharedMemoryHandle SharedMemoryHandle::Duplicate() const {
- if (!IsValid())
- return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
-
- // Increment the ref count.
- kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
- MACH_PORT_RIGHT_SEND, 1);
- DCHECK_EQ(kr, KERN_SUCCESS);
- SharedMemoryHandle handle(*this);
- handle.SetOwnershipPassesToIPC(true);
- return handle;
+ switch (type_) {
+ case POSIX: {
+ if (!IsValid())
+ return SharedMemoryHandle();
+ int duped_fd = HANDLE_EINTR(dup(file_descriptor_.fd));
+ if (duped_fd < 0)
+ return SharedMemoryHandle();
+ return SharedMemoryHandle(FileDescriptor(duped_fd, true));
+ }
+ case MACH: {
+ if (!IsValid())
+ return SharedMemoryHandle(MACH_PORT_NULL, 0, 0);
+
+ // Increment the ref count.
+ kern_return_t kr = mach_port_mod_refs(mach_task_self(), memory_object_,
+ MACH_PORT_RIGHT_SEND, 1);
+ DCHECK_EQ(kr, KERN_SUCCESS);
+ SharedMemoryHandle handle(*this);
+ handle.SetOwnershipPassesToIPC(true);
+ return handle;
+ }
+ }
}
bool SharedMemoryHandle::operator==(const SharedMemoryHandle& handle) const {
if (!IsValid() && !handle.IsValid())
return true;
- return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
- pid_ == handle.pid_;
+ if (type_ != handle.type_)
+ return false;
+
+ switch (type_) {
+ case POSIX:
+ return file_descriptor_.fd == handle.file_descriptor_.fd;
+ case MACH:
+ return memory_object_ == handle.memory_object_ && size_ == handle.size_ &&
+ pid_ == handle.pid_;
+ }
}
bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
@@ -83,10 +112,16 @@ bool SharedMemoryHandle::operator!=(const SharedMemoryHandle& handle) const {
}
bool SharedMemoryHandle::IsValid() const {
- return memory_object_ != MACH_PORT_NULL;
+ switch (type_) {
+ case POSIX:
+ return file_descriptor_.fd >= 0;
+ case MACH:
+ return memory_object_ != MACH_PORT_NULL;
+ }
}
mach_port_t SharedMemoryHandle::GetMemoryObject() const {
+ DCHECK_EQ(type_, MACH);
return memory_object_;
}
@@ -96,8 +131,19 @@ bool SharedMemoryHandle::GetSize(size_t* size) const {
return true;
}
- *size = size_;
- return true;
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ struct stat st;
+ if (fstat(file_descriptor_.fd, &st) != 0)
+ return false;
+ if (st.st_size < 0)
+ return false;
+ *size = st.st_size;
+ return true;
+ case SharedMemoryHandle::MACH:
+ *size = size_;
+ return true;
+ }
}
bool SharedMemoryHandle::MapAt(off_t offset,
@@ -105,42 +151,69 @@ bool SharedMemoryHandle::MapAt(off_t offset,
void** memory,
bool read_only) {
DCHECK(IsValid());
- DCHECK_EQ(pid_, GetCurrentProcId());
- kern_return_t kr = mach_vm_map(
- mach_task_self(),
- reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
- bytes,
- 0, // Alignment mask
- VM_FLAGS_ANYWHERE, memory_object_, offset,
- FALSE, // Copy
- VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
- VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
- VM_INHERIT_NONE);
- return kr == KERN_SUCCESS;
+ switch (type_) {
+ case SharedMemoryHandle::POSIX:
+ *memory = mmap(nullptr, bytes, PROT_READ | (read_only ? 0 : PROT_WRITE),
+ MAP_SHARED, file_descriptor_.fd, offset);
+ return *memory != MAP_FAILED;
+ case SharedMemoryHandle::MACH:
+ DCHECK_EQ(pid_, GetCurrentProcId());
+ kern_return_t kr = mach_vm_map(
+ mach_task_self(),
+ reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
+ bytes,
+ 0, // Alignment mask
+ VM_FLAGS_ANYWHERE,
+ memory_object_,
+ offset,
+ FALSE, // Copy
+ VM_PROT_READ | (read_only ? 0 : VM_PROT_WRITE), // Current protection
+ VM_PROT_WRITE | VM_PROT_READ | VM_PROT_IS_MASK, // Maximum protection
+ VM_INHERIT_NONE);
+ return kr == KERN_SUCCESS;
+ }
}
void SharedMemoryHandle::Close() const {
if (!IsValid())
return;
- kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
- if (kr != KERN_SUCCESS)
- DPLOG(ERROR) << "Error deallocating mach port: " << kr;
+ switch (type_) {
+ case POSIX:
+ if (IGNORE_EINTR(close(file_descriptor_.fd)) < 0)
+ DPLOG(ERROR) << "Error closing fd";
+ break;
+ case MACH:
+ kern_return_t kr = mach_port_deallocate(mach_task_self(), memory_object_);
+ if (kr != KERN_SUCCESS)
+ MACH_DLOG(ERROR, kr) << "Error deallocating mach port";
+ break;
+ }
}
void SharedMemoryHandle::SetOwnershipPassesToIPC(bool ownership_passes) {
+ DCHECK_EQ(type_, MACH);
ownership_passes_to_ipc_ = ownership_passes;
}
bool SharedMemoryHandle::OwnershipPassesToIPC() const {
+ DCHECK_EQ(type_, MACH);
return ownership_passes_to_ipc_;
}
void SharedMemoryHandle::CopyRelevantData(const SharedMemoryHandle& handle) {
- memory_object_ = handle.memory_object_;
- size_ = handle.size_;
- pid_ = handle.pid_;
- ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+ type_ = handle.type_;
+ switch (type_) {
+ case POSIX:
+ file_descriptor_ = handle.file_descriptor_;
+ break;
+ case MACH:
+ memory_object_ = handle.memory_object_;
+ size_ = handle.size_;
+ pid_ = handle.pid_;
+ ownership_passes_to_ipc_ = handle.ownership_passes_to_ipc_;
+ break;
+ }
}
} // namespace base
diff --git a/chromium/base/memory/shared_memory_helper.cc b/chromium/base/memory/shared_memory_helper.cc
new file mode 100644
index 00000000000..7fbfb7afad5
--- /dev/null
+++ b/chromium/base/memory/shared_memory_helper.cc
@@ -0,0 +1,98 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory_helper.h"
+
+#include "base/threading/thread_restrictions.h"
+
+namespace base {
+
+struct ScopedPathUnlinkerTraits {
+ static const FilePath* InvalidValue() { return nullptr; }
+
+ static void Free(const FilePath* path) {
+ if (unlink(path->value().c_str()))
+ PLOG(WARNING) << "unlink";
+ }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+using ScopedPathUnlinker =
+ ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
+
+#if !defined(OS_ANDROID)
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+ ScopedFILE* fp,
+ ScopedFD* readonly_fd,
+ FilePath* path) {
+#if !(defined(OS_MACOSX) && !defined(OS_IOS))
+ // It doesn't make sense to have a open-existing private piece of shmem
+ DCHECK(!options.open_existing_deprecated);
+#endif // !(defined(OS_MACOSX) && !defined(OS_IOS)
+ // Q: Why not use the shm_open() etc. APIs?
+ // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
+ FilePath directory;
+ ScopedPathUnlinker path_unlinker;
+ if (!GetShmemTempDir(options.executable, &directory))
+ return false;
+
+ fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+ if (!*fp)
+ return false;
+
+ // Deleting the file prevents anyone else from mapping it in (making it
+ // private), and prevents the need for cleanup (once the last fd is
+ // closed, it is truly freed).
+ path_unlinker.reset(path);
+
+ if (options.share_read_only) {
+ // Also open as readonly so that we can ShareReadOnlyToProcess.
+ readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+ if (!readonly_fd->is_valid()) {
+ DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+ fp->reset();
+ return false;
+ }
+ }
+ return true;
+}
+
+bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
+ int* readonly_mapped_file) {
+ DCHECK_EQ(-1, *mapped_file);
+ DCHECK_EQ(-1, *readonly_mapped_file);
+ if (fp == NULL)
+ return false;
+
+ // This function theoretically can block on the disk, but realistically
+ // the temporary files we create will just go into the buffer cache
+ // and be deleted before they ever make it out to disk.
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ if (readonly_fd.is_valid()) {
+ struct stat st = {};
+ if (fstat(fileno(fp.get()), &st))
+ NOTREACHED();
+
+ struct stat readonly_st = {};
+ if (fstat(readonly_fd.get(), &readonly_st))
+ NOTREACHED();
+ if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+ LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+ return false;
+ }
+ }
+
+ *mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
+ if (*mapped_file == -1) {
+ NOTREACHED() << "Call to dup failed, errno=" << errno;
+ }
+ *readonly_mapped_file = readonly_fd.release();
+
+ return true;
+}
+#endif // !defined(OS_ANDROID)
+
+} // namespace base
diff --git a/chromium/base/memory/shared_memory_helper.h b/chromium/base/memory/shared_memory_helper.h
new file mode 100644
index 00000000000..b515828c08c
--- /dev/null
+++ b/chromium/base/memory/shared_memory_helper.h
@@ -0,0 +1,33 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+#define BASE_MEMORY_SHARED_MEMORY_HELPER_H_
+
+#include "base/memory/shared_memory.h"
+
+#include <fcntl.h>
+
+namespace base {
+
+#if !defined(OS_ANDROID)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+ ScopedFILE* fp,
+ ScopedFD* readonly_fd,
+ FilePath* path);
+
+// Takes the outputs of CreateAnonymousSharedMemory and maps them properly to
+// |mapped_file| or |readonly_mapped_file|, depending on which one is populated.
+bool PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd, int* mapped_file,
+ int* readonly_mapped_file);
+#endif
+
+} // namespace base
+
+#endif // BASE_MEMORY_SHARED_MEMORY_HELPER_H_
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index a8f09555d96..d376daa579e 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -4,22 +4,33 @@
#include "base/memory/shared_memory.h"
+#include <errno.h>
#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
-#include "base/mac/foundation_util.h"
#include "base/mac/mac_util.h"
#include "base/mac/scoped_mach_vm.h"
+#include "base/memory/shared_memory_helper.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
#include "base/process/process_metrics.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/scoped_generic.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
+#if defined(OS_MACOSX)
+#include "base/mac/foundation_util.h"
+#endif // OS_MACOSX
+
namespace base {
namespace {
@@ -67,13 +78,21 @@ bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
return true;
}
+
} // namespace
SharedMemory::SharedMemory()
- : mapped_size_(0), memory_(NULL), read_only_(false), requested_size_(0) {}
+ : mapped_memory_mechanism_(SharedMemoryHandle::MACH),
+ readonly_mapped_file_(-1),
+ mapped_size_(0),
+ memory_(NULL),
+ read_only_(false),
+ requested_size_(0) {}
SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
: shm_(handle),
+ mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+ readonly_mapped_file_(-1),
mapped_size_(0),
memory_(NULL),
read_only_(read_only),
@@ -101,8 +120,7 @@ void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
// static
size_t SharedMemory::GetHandleLimit() {
- // This should be effectively unlimited on OS X.
- return 10000;
+ return GetMaxFds();
}
// static
@@ -111,6 +129,12 @@ SharedMemoryHandle SharedMemory::DuplicateHandle(
return handle.Duplicate();
}
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+ const SharedMemoryHandle& handle) {
+ return handle.file_descriptor_.fd;
+}
+
bool SharedMemory::CreateAndMapAnonymous(size_t size) {
return CreateAnonymous(size) && Map(size);
}
@@ -125,20 +149,53 @@ bool SharedMemory::GetSizeFromSharedMemoryHandle(
// Chromium mostly only uses the unique/private shmem as specified by
// "name == L"". The exception is in the StatsTable.
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Start"));
DCHECK(!shm_.IsValid());
if (options.size == 0) return false;
if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
return false;
- shm_ = SharedMemoryHandle(options.size);
+ if (options.type == SharedMemoryHandle::MACH) {
+ shm_ = SharedMemoryHandle(options.size);
+ requested_size_ = options.size;
+ return shm_.IsValid();
+ }
+
+ // This function theoretically can block on the disk. Both profiling of real
+ // users and local instrumentation shows that this is a real problem.
+ // https://code.google.com/p/chromium/issues/detail?id=466437
+ base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+ ScopedFILE fp;
+ ScopedFD readonly_fd;
+
+ FilePath path;
+ bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+ if (!result)
+ return false;
+
+ if (!fp) {
+ PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+ return false;
+ }
+
+ // Get current size.
+ struct stat stat;
+ if (fstat(fileno(fp.get()), &stat) != 0)
+ return false;
+ const size_t current_size = stat.st_size;
+ if (current_size != options.size) {
+ if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+ return false;
+ }
requested_size_ = options.size;
- return shm_.IsValid();
+
+ int mapped_file = -1;
+ result = PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file,
+ &readonly_mapped_file_);
+
+ shm_ = SharedMemoryHandle(FileDescriptor(mapped_file, false));
+ return result;
}
bool SharedMemory::MapAt(off_t offset, size_t bytes) {
@@ -154,6 +211,7 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
mapped_size_ = bytes;
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
(SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+ mapped_memory_mechanism_ = shm_.type_;
} else {
memory_ = NULL;
}
@@ -165,16 +223,30 @@ bool SharedMemory::Unmap() {
if (memory_ == NULL)
return false;
- mach_vm_deallocate(mach_task_self(),
- reinterpret_cast<mach_vm_address_t>(memory_),
- mapped_size_);
+ switch (mapped_memory_mechanism_) {
+ case SharedMemoryHandle::POSIX:
+ munmap(memory_, mapped_size_);
+ break;
+ case SharedMemoryHandle::MACH:
+ mach_vm_deallocate(mach_task_self(),
+ reinterpret_cast<mach_vm_address_t>(memory_),
+ mapped_size_);
+ break;
+ }
+
memory_ = NULL;
mapped_size_ = 0;
return true;
}
SharedMemoryHandle SharedMemory::handle() const {
- return shm_;
+ switch (shm_.type_) {
+ case SharedMemoryHandle::POSIX:
+ return SharedMemoryHandle(
+ FileDescriptor(shm_.file_descriptor_.fd, false));
+ case SharedMemoryHandle::MACH:
+ return shm_;
+ }
}
SharedMemoryHandle SharedMemory::TakeHandle() {
@@ -186,33 +258,70 @@ SharedMemoryHandle SharedMemory::TakeHandle() {
void SharedMemory::Close() {
shm_.Close();
shm_ = SharedMemoryHandle();
+ if (shm_.type_ == SharedMemoryHandle::POSIX) {
+ if (readonly_mapped_file_ > 0) {
+ if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+ PLOG(ERROR) << "close";
+ readonly_mapped_file_ = -1;
+ }
+ }
}
-bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
- SharedMemoryHandle* new_handle,
- bool close_self,
- ShareMode share_mode) {
- DCHECK(shm_.IsValid());
+bool SharedMemory::Share(SharedMemoryHandle* new_handle, ShareMode share_mode) {
+ if (shm_.type_ == SharedMemoryHandle::MACH) {
+ DCHECK(shm_.IsValid());
+
+ bool success = false;
+ switch (share_mode) {
+ case SHARE_CURRENT_MODE:
+ *new_handle = shm_.Duplicate();
+ success = true;
+ break;
+ case SHARE_READONLY:
+ success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+ break;
+ }
+
+ if (success)
+ new_handle->SetOwnershipPassesToIPC(true);
+
+ return success;
+ }
- bool success = false;
+ int handle_to_dup = -1;
switch (share_mode) {
case SHARE_CURRENT_MODE:
- *new_handle = shm_.Duplicate();
- success = true;
+ handle_to_dup = shm_.file_descriptor_.fd;
break;
case SHARE_READONLY:
- success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+ // We could imagine re-opening the file from /dev/fd, but that can't make
+ // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+ CHECK_GE(readonly_mapped_file_, 0);
+ handle_to_dup = readonly_mapped_file_;
break;
}
- if (success)
- new_handle->SetOwnershipPassesToIPC(true);
+ const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+ if (new_fd < 0) {
+ DPLOG(ERROR) << "dup() failed.";
+ return false;
+ }
+
+ new_handle->file_descriptor_.fd = new_fd;
+ new_handle->type_ = SharedMemoryHandle::POSIX;
+
+ return true;
+}
+bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
+ SharedMemoryHandle* new_handle,
+ bool close_self,
+ ShareMode share_mode) {
+ bool success = Share(new_handle, share_mode);
if (close_self) {
Unmap();
Close();
}
-
return success;
}
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index 783bdfce86c..3a18faa83dd 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -14,12 +14,13 @@
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/memory/shared_memory_helper.h"
#include "base/posix/eintr_wrapper.h"
#include "base/posix/safe_strerror.h"
#include "base/process/process_metrics.h"
-#include "base/profiler/scoped_tracker.h"
#include "base/scoped_generic.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
@@ -29,77 +30,6 @@
namespace base {
-namespace {
-
-struct ScopedPathUnlinkerTraits {
- static FilePath* InvalidValue() { return nullptr; }
-
- static void Free(FilePath* path) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Unlink"));
- if (unlink(path->value().c_str()))
- PLOG(WARNING) << "unlink";
- }
-};
-
-// Unlinks the FilePath when the object is destroyed.
-typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
-
-#if !defined(OS_ANDROID)
-// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
-// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
-// options.share_read_only is true. |path| is populated with the location of
-// the file before it was unlinked.
-// Returns false if there's an unhandled failure.
-bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
- ScopedFILE* fp,
- ScopedFD* readonly_fd,
- FilePath* path) {
- // It doesn't make sense to have a open-existing private piece of shmem
- DCHECK(!options.open_existing_deprecated);
- // Q: Why not use the shm_open() etc. APIs?
- // A: Because they're limited to 4mb on OS X. FFFFFFFUUUUUUUUUUU
- FilePath directory;
- ScopedPathUnlinker path_unlinker;
- if (GetShmemTempDir(options.executable, &directory)) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::OpenTemporaryFile"));
- fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
-
- // Deleting the file prevents anyone else from mapping it in (making it
- // private), and prevents the need for cleanup (once the last fd is
- // closed, it is truly freed).
- if (*fp)
- path_unlinker.reset(path);
- }
-
- if (*fp) {
- if (options.share_read_only) {
- // TODO(erikchen): Remove ScopedTracker below once
- // http://crbug.com/466437 is fixed.
- tracked_objects::ScopedTracker tracking_profile(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::OpenReadonly"));
- // Also open as readonly so that we can ShareReadOnlyToProcess.
- readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
- if (!readonly_fd->is_valid()) {
- DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
- fp->reset();
- return false;
- }
- }
- }
- return true;
-}
-#endif // !defined(OS_ANDROID)
-}
-
SharedMemory::SharedMemory()
: mapped_file_(-1),
readonly_mapped_file_(-1),
@@ -185,11 +115,6 @@ bool SharedMemory::GetSizeFromSharedMemoryHandle(
// In case we want to delete it later, it may be useful to save the value
// of mem_filename after FilePathForMemoryName().
bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
- // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
- // is fixed.
- tracked_objects::ScopedTracker tracking_profile1(
- FROM_HERE_WITH_EXPLICIT_FUNCTION(
- "466437 SharedMemory::Create::Start"));
DCHECK_EQ(-1, mapped_file_);
if (options.size == 0) return false;
@@ -292,7 +217,8 @@ bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
return false;
}
- return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
+ &readonly_mapped_file_);
}
// Our current implementation of shmem is with mmap()ing of files.
@@ -324,7 +250,8 @@ bool SharedMemory::Open(const std::string& name, bool read_only) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
return false;
}
- return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+ return PrepareMapFile(std::move(fp), std::move(readonly_fd), &mapped_file_,
+ &readonly_mapped_file_);
}
#endif // !defined(OS_ANDROID)
@@ -401,44 +328,6 @@ void SharedMemory::Close() {
}
#if !defined(OS_ANDROID)
-bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
- DCHECK_EQ(-1, mapped_file_);
- DCHECK_EQ(-1, readonly_mapped_file_);
- if (fp == NULL)
- return false;
-
- // This function theoretically can block on the disk, but realistically
- // the temporary files we create will just go into the buffer cache
- // and be deleted before they ever make it out to disk.
- base::ThreadRestrictions::ScopedAllowIO allow_io;
-
- struct stat st = {};
- if (fstat(fileno(fp.get()), &st))
- NOTREACHED();
- if (readonly_fd.is_valid()) {
- struct stat readonly_st = {};
- if (fstat(readonly_fd.get(), &readonly_st))
- NOTREACHED();
- if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
- LOG(ERROR) << "writable and read-only inodes don't match; bailing";
- return false;
- }
- }
-
- mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
- if (mapped_file_ == -1) {
- if (errno == EMFILE) {
- LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
- return false;
- } else {
- NOTREACHED() << "Call to dup failed, errno=" << errno;
- }
- }
- readonly_mapped_file_ = readonly_fd.release();
-
- return true;
-}
-
// For the given shmem named |mem_name|, return a filename to mmap()
// (and possibly create). Modifies |filename|. Return false on
// error, or true of we are happy.
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index f29865c21ae..19dedccb476 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -316,8 +316,6 @@ TEST(SharedMemoryTest, AnonymousPrivate) {
}
}
-// The Mach functionality is tested in shared_memory_mac_unittest.cc.
-#if !(defined(OS_MACOSX) && !defined(OS_IOS))
TEST(SharedMemoryTest, ShareReadOnly) {
StringPiece contents = "Hello World";
@@ -325,6 +323,10 @@ TEST(SharedMemoryTest, ShareReadOnly) {
SharedMemoryCreateOptions options;
options.size = contents.size();
options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
ASSERT_TRUE(writable_shmem.Create(options));
ASSERT_TRUE(writable_shmem.Map(options.size));
memcpy(writable_shmem.memory(), contents.data(), contents.size());
@@ -400,7 +402,6 @@ TEST(SharedMemoryTest, ShareReadOnly) {
#error Unexpected platform; write a test that tries to make 'handle' writable.
#endif // defined(OS_POSIX) || defined(OS_WIN)
}
-#endif // !(defined(OS_MACOSX) && !defined(OS_IOS))
TEST(SharedMemoryTest, ShareToSelf) {
StringPiece contents = "Hello World";
@@ -474,7 +475,7 @@ TEST(SharedMemoryTest, MapTwice) {
EXPECT_EQ(old_address, memory.memory());
}
-#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(OS_POSIX)
// This test is not applicable for iOS (crbug.com/399384).
#if !defined(OS_IOS)
// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
@@ -485,6 +486,10 @@ TEST(SharedMemoryTest, AnonymousExecutable) {
SharedMemoryCreateOptions options;
options.size = kTestSize;
options.executable = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
EXPECT_TRUE(shared_memory.Create(options));
EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
@@ -518,6 +523,10 @@ TEST(SharedMemoryTest, FilePermissionsAnonymous) {
SharedMemory shared_memory;
SharedMemoryCreateOptions options;
options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
// Set a file mode creation mask that gives all permissions.
ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -540,6 +549,10 @@ TEST(SharedMemoryTest, FilePermissionsNamed) {
SharedMemory shared_memory;
SharedMemoryCreateOptions options;
options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+ options.type = SharedMemoryHandle::POSIX;
+#endif
// Set a file mode creation mask that gives all permissions.
ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
@@ -556,7 +569,7 @@ TEST(SharedMemoryTest, FilePermissionsNamed) {
}
#endif // !defined(OS_ANDROID)
-#endif // defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+#endif // defined(OS_POSIX)
// Map() will return addresses which are aligned to the platform page size, this
// varies from platform to platform though. Since we'd like to advertise a
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 1581f6cfb4b..2212941db9a 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -357,12 +357,20 @@ std::string MessageLoop::GetThreadName() const {
void MessageLoop::SetTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
DCHECK_EQ(this, current());
+ DCHECK(task_runner);
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(!unbound_task_runner_);
task_runner_ = std::move(task_runner);
SetThreadTaskRunnerHandle();
}
+void MessageLoop::ClearTaskRunnerForTesting() {
+ DCHECK_EQ(this, current());
+ DCHECK(!unbound_task_runner_);
+ task_runner_ = nullptr;
+ thread_task_runner_handle_.reset();
+}
+
void MessageLoop::SetThreadTaskRunnerHandle() {
DCHECK_EQ(this, current());
// Clear the previous thread task runner first, because only one can exist at
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index ac7a3035636..91a7b1d3326 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -234,6 +234,10 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate {
// thread to which the message loop is bound.
void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
+ // Clears task_runner() and the ThreadTaskRunnerHandle for the target thread.
+ // Must be called on the thread to which the message loop is bound.
+ void ClearTaskRunnerForTesting();
+
// Enables or disables the recursive task processing. This happens in the case
// of recursive message loops. Some unwanted message loops may occur when
// using common controls or printer functions. By default, recursive task
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
index 97b7da77640..263442839f7 100644
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ b/chromium/base/message_loop/message_loop_unittest.cc
@@ -614,9 +614,6 @@ RUN_MESSAGE_LOOP_TESTS(UI, &TypeUIMessagePumpFactory);
RUN_MESSAGE_LOOP_TESTS(IO, &TypeIOMessagePumpFactory);
#if defined(OS_WIN)
-// Additional set of tests for GPU version of UI message loop.
-RUN_MESSAGE_LOOP_TESTS(GPU, &MessagePumpForGpu::CreateMessagePumpForGpu);
-
TEST(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
RunTest_PostDelayedTask_SharedTimer_SubPump();
}
diff --git a/chromium/base/message_loop/message_pump_default.cc b/chromium/base/message_loop/message_pump_default.cc
index 3449aec8605..cf68270c56d 100644
--- a/chromium/base/message_loop/message_pump_default.cc
+++ b/chromium/base/message_loop/message_pump_default.cc
@@ -4,8 +4,6 @@
#include "base/message_loop/message_pump_default.h"
-#include <algorithm>
-
#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -54,38 +52,11 @@ void MessagePumpDefault::Run(Delegate* delegate) {
if (delayed_work_time_.is_null()) {
event_.Wait();
} else {
- TimeDelta delay = delayed_work_time_ - TimeTicks::Now();
- if (delay > TimeDelta()) {
-#if defined(OS_WIN)
- // TODO(stanisc): crbug.com/623223: Consider moving the OS_WIN specific
- // logic into TimedWait implementation in waitable_event_win.cc.
-
- // crbug.com/487724: on Windows, waiting for less than 1 ms results in
- // returning from TimedWait promptly and spinning
- // MessagePumpDefault::Run loop for up to 1 ms - until it is time to
- // run a delayed task. |min_delay| is the minimum possible wait to
- // to avoid the spinning.
- constexpr TimeDelta min_delay = TimeDelta::FromMilliseconds(1);
- do {
- delay = std::max(delay, min_delay);
- if (event_.TimedWait(delay))
- break;
-
- // TimedWait can time out earlier than the specified |delay| on
- // Windows. It doesn't make sense to run the outer loop in that case
- // because there isn't going to be any new work. It is less overhead
- // to just go back to wait.
- // In practice this inner wait loop might have up to 3 iterations.
- delay = delayed_work_time_ - TimeTicks::Now();
- } while (delay > TimeDelta());
-#else
- event_.TimedWait(delay);
-#endif
- } else {
- // It looks like delayed_work_time_ indicates a time in the past, so we
- // need to call DoDelayedWork now.
- delayed_work_time_ = TimeTicks();
- }
+ // No need to handle already expired |delayed_work_time_| in any special
+ // way. When |delayed_work_time_| is in the past TimeWaitUntil returns
+ // promptly and |delayed_work_time_| will re-initialized on a next
+ // DoDelayedWork call which has to be called in order to get here again.
+ event_.TimedWaitUntil(delayed_work_time_);
}
// Since event_ is auto-reset, we don't need to do anything special here
// other than service each delegate method.
diff --git a/chromium/base/message_loop/message_pump_glib.h b/chromium/base/message_loop/message_pump_glib.h
index a2b54d8542c..d79dba55a2a 100644
--- a/chromium/base/message_loop/message_pump_glib.h
+++ b/chromium/base/message_loop/message_pump_glib.h
@@ -69,7 +69,7 @@ class BASE_EXPORT MessagePumpGlib : public MessagePump {
// Dispatch() will be called.
int wakeup_pipe_read_;
int wakeup_pipe_write_;
- // Use a scoped_ptr to avoid needing the definition of GPollFD in the header.
+ // Use a unique_ptr to avoid needing the definition of GPollFD in the header.
std::unique_ptr<GPollFD> wakeup_gpollfd_;
DISALLOW_COPY_AND_ASSIGN(MessagePumpGlib);
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index f1ec727e7c6..30638df789e 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -400,143 +400,6 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
}
//-----------------------------------------------------------------------------
-// MessagePumpForGpu public:
-
-MessagePumpForGpu::MessagePumpForGpu() {
- event_.Set(CreateEvent(nullptr, FALSE, FALSE, nullptr));
-}
-
-MessagePumpForGpu::~MessagePumpForGpu() = default;
-
-// static
-void MessagePumpForGpu::InitFactory() {
- bool init_result = MessageLoop::InitMessagePumpForUIFactory(
- &MessagePumpForGpu::CreateMessagePumpForGpu);
- DCHECK(init_result);
-}
-
-// static
-std::unique_ptr<MessagePump> MessagePumpForGpu::CreateMessagePumpForGpu() {
- return WrapUnique<MessagePump>(new MessagePumpForGpu);
-}
-
-void MessagePumpForGpu::ScheduleWork() {
- if (InterlockedExchange(&work_state_, HAVE_WORK) != READY)
- return; // Someone else continued the pumping.
-
- // TODO(stanisc): crbug.com/596190: Preserve for crash dump analysis.
- // Remove this when the bug is fixed.
- last_set_event_timeticks_ = TimeTicks::Now();
-
- // Make sure the MessagePump does some work for us.
- SetEvent(event_.Get());
-}
-
-void MessagePumpForGpu::ScheduleDelayedWork(
- const TimeTicks& delayed_work_time) {
- // We know that we can't be blocked right now since this method can only be
- // called on the same thread as Run, so we only need to update our record of
- // how long to sleep when we do sleep.
- delayed_work_time_ = delayed_work_time;
-}
-
-//-----------------------------------------------------------------------------
-// MessagePumpForGpu private:
-
-void MessagePumpForGpu::DoRunLoop() {
- while (!state_->should_quit) {
- // Indicate that the loop is handling the work.
- // If there is a race condition between switching to WORKING state here and
- // the producer thread setting the HAVE_WORK state after exiting the wait,
- // the event might remain in the signalled state. That might be less than
- // optimal but wouldn't result in failing to handle the work.
- InterlockedExchange(&work_state_, WORKING);
-
- bool more_work_is_plausible = ProcessNextMessage();
- if (state_->should_quit)
- break;
-
- more_work_is_plausible |= state_->delegate->DoWork();
- if (state_->should_quit)
- break;
-
- more_work_is_plausible |=
- state_->delegate->DoDelayedWork(&delayed_work_time_);
- if (state_->should_quit)
- break;
-
- if (more_work_is_plausible)
- continue;
-
- more_work_is_plausible = state_->delegate->DoIdleWork();
- if (state_->should_quit)
- break;
-
- if (more_work_is_plausible)
- continue;
-
- // Switch that working state to READY to indicate that the loop is
- // waiting for accepting new work if it is still in WORKING state and hasn't
- // been signalled. Otherwise if it is in HAVE_WORK state skip the wait
- // and proceed to handing the work.
- if (InterlockedCompareExchange(&work_state_, READY, WORKING) == HAVE_WORK)
- continue; // Skip wait, more work was requested.
-
- WaitForWork(); // Wait (sleep) until we have work to do again.
- }
-}
-
-void MessagePumpForGpu::WaitForWork() {
- // Wait until a message is available, up to the time needed by the timer
- // manager to fire the next set of timers.
- int delay;
-
- // The while loop handles the situation where on Windows 7 and later versions
- // MsgWaitForMultipleObjectsEx might time out slightly earlier (less than one
- // ms) than the specified |delay|. In that situation it is more optimal to
- // just wait again rather than waste a DoRunLoop cycle.
- while ((delay = GetCurrentDelay()) != 0) {
- if (delay < 0) // Negative value means no timers waiting.
- delay = INFINITE;
-
- // TODO(stanisc): crbug.com/596190: Preserve for crash dump analysis.
- // Remove this when the bug is fixed.
- TimeTicks wait_for_work_timeticks = TimeTicks::Now();
- debug::Alias(&wait_for_work_timeticks);
- debug::Alias(&delay);
-
- HANDLE handle = event_.Get();
- DWORD result =
- MsgWaitForMultipleObjectsEx(1, &handle, delay, QS_ALLINPUT, 0);
- DCHECK_NE(WAIT_FAILED, result) << GetLastError();
- if (result != WAIT_TIMEOUT) {
- // Either work or message available.
- return;
- }
- }
-}
-
-bool MessagePumpForGpu::ProcessNextMessage() {
- MSG msg;
- if (!PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE))
- return false;
-
- if (msg.message == WM_QUIT) {
- UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem",
- RECEIVED_WM_QUIT_ERROR, MESSAGE_LOOP_PROBLEM_MAX);
- // WM_QUIT messages shouldn't be received by any threads in the GPU
- // process. If they are, just ignore them instead of causing threads to
- // exit prematurely.
- return true;
- }
-
- TranslateMessage(&msg);
- DispatchMessage(&msg);
-
- return true;
-}
-
-//-----------------------------------------------------------------------------
// MessagePumpForIO public:
MessagePumpForIO::IOContext::IOContext() {
diff --git a/chromium/base/message_loop/message_pump_win.h b/chromium/base/message_loop/message_pump_win.h
index f6683e7c547..f8a8557014b 100644
--- a/chromium/base/message_loop/message_pump_win.h
+++ b/chromium/base/message_loop/message_pump_win.h
@@ -140,39 +140,6 @@ class BASE_EXPORT MessagePumpForUI : public MessagePumpWin {
};
//-----------------------------------------------------------------------------
-// MessagePumpForGpu is a simplified version of UI message pump that is
-// optimized for the GPU process. Unlike MessagePumpForUI it doesn't have a
-// hidden window and doesn't handle a situation where a native message pump
-// might take over message processing.
-//
-class BASE_EXPORT MessagePumpForGpu : public MessagePumpWin {
- public:
- MessagePumpForGpu();
- ~MessagePumpForGpu() override;
-
- // Factory methods.
- static void InitFactory();
- static std::unique_ptr<MessagePump> CreateMessagePumpForGpu();
-
- // MessagePump methods:
- void ScheduleWork() override;
- void ScheduleDelayedWork(const TimeTicks& delayed_work_time) override;
-
- private:
- // MessagePumpWin methods:
- void DoRunLoop() override;
-
- void WaitForWork();
- bool ProcessNextMessage();
-
- win::ScopedHandle event_;
-
- // Used to help diagnose hangs.
- // TODO(stanisc): crbug.com/596190: Remove these once the bug is fixed.
- TimeTicks last_set_event_timeticks_;
-};
-
-//-----------------------------------------------------------------------------
// MessagePumpForIO extends MessagePumpWin with methods that are particular to a
// MessageLoop instantiated with TYPE_IO. This version of MessagePump does not
// deal with Windows mesagges, and instead has a Run loop based on Completion
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index c3c353dd67c..a4f40cd45e6 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -10,10 +10,8 @@
#include "base/base_switches.h"
#include "base/build_time.h"
#include "base/command_line.h"
-#include "base/debug/alias.h"
-#include "base/feature_list.h"
#include "base/logging.h"
-#include "base/pickle.h"
+#include "base/metrics/field_trial_param_associator.h"
#include "base/process/memory.h"
#include "base/rand_util.h"
#include "base/strings/string_number_conversions.h"
@@ -21,6 +19,17 @@
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
+// On systems that use the zygote process to spawn child processes, we must
+// retrieve the correct fd using the mapping in GlobalDescriptors.
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_MACOSX) && \
+ !defined(OS_ANDROID)
+#define POSIX_WITH_ZYGOTE 1
+#endif
+
+#if defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
+#include "base/posix/global_descriptors.h"
+#endif
+
namespace base {
namespace {
@@ -38,54 +47,58 @@ const char kActivationMarker = '*';
// for now while the implementation is fleshed out (e.g. data format, single
// shared memory segment). See https://codereview.chromium.org/2365273004/ and
// crbug.com/653874
-const bool kUseSharedMemoryForFieldTrials = false;
+// The browser is the only process that has write access to the shared memory.
+// This is safe from race conditions because MakeIterable is a release operation
+// and GetNextOfType is an acquire operation, so memory writes before
+// MakeIterable happen before memory reads after GetNextOfType.
+const bool kUseSharedMemoryForFieldTrials = true;
// Constants for the field trial allocator.
const char kAllocatorName[] = "FieldTrialAllocator";
-const uint32_t kFieldTrialType = 0xABA17E13 + 2; // SHA1(FieldTrialEntry) v2
-
-// We allocate 64 KiB to hold all the field trial data. This should be enough,
-// as currently we use ~8KiB for the field trials, and ~10KiB for experiment
-// parameters (as of 9/11/2016). This also doesn't allocate all 64 KiB at once
-// -- the pages only get mapped to physical memory when they are touched. If the
-// size of the allocated field trials does get larger than 64 KiB, then we will
-// drop some field trials in child processes, leading to an inconsistent view
-// between browser and child processes and possibly causing crashes (see
-// crbug.com/661617).
+
+// We allocate 128 KiB to hold all the field trial data. This should be enough,
+// as most people use 3 - 25 KiB for field trials (as of 11/25/2016).
+// This also doesn't allocate all 128 KiB at once -- the pages only get mapped
+// to physical memory when they are touched. If the size of the allocated field
+// trials does get larger than 128 KiB, then we will drop some field trials in
+// child processes, leading to an inconsistent view between browser and child
+// processes and possibly causing crashes (see crbug.com/661617).
#if !defined(OS_NACL)
-const size_t kFieldTrialAllocationSize = 64 << 10; // 64 KiB
+const size_t kFieldTrialAllocationSize = 128 << 10; // 128 KiB
#endif
-// We create one FieldTrialEntry per field trial in shared memory, via
-// AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a base::Pickle
-// object that we unpickle and read from. Any changes to this structure requires
-// a bump in kFieldTrialType id defined above.
-struct FieldTrialEntry {
- // Whether or not this field trial is activated. This is really just a boolean
- // but marked as a uint32_t for portability reasons.
- uint32_t activated;
-
- // Size of the pickled structure, NOT the total size of this entry.
- uint32_t size;
-
- // Calling this is only valid when the entry is initialized. That is, it
- // resides in shared memory and has a pickle containing the trial name and
- // group name following it.
- bool GetTrialAndGroupName(StringPiece* trial_name,
- StringPiece* group_name) const {
- char* src = reinterpret_cast<char*>(const_cast<FieldTrialEntry*>(this)) +
- sizeof(FieldTrialEntry);
-
- Pickle pickle(src, size);
- PickleIterator pickle_iter(pickle);
-
- if (!pickle_iter.ReadStringPiece(trial_name))
- return false;
- if (!pickle_iter.ReadStringPiece(group_name))
+// Writes out string1 and then string2 to pickle.
+bool WriteStringPair(Pickle* pickle,
+ const StringPiece& string1,
+ const StringPiece& string2) {
+ if (!pickle->WriteString(string1))
+ return false;
+ if (!pickle->WriteString(string2))
+ return false;
+ return true;
+}
+
+// Writes out the field trial's contents (via trial_state) to the pickle. The
+// format of the pickle looks like:
+// TrialName, GroupName, ParamKey1, ParamValue1, ParamKey2, ParamValue2, ...
+// If there are no parameters, then it just ends at GroupName.
+bool PickleFieldTrial(const FieldTrial::State& trial_state, Pickle* pickle) {
+ if (!WriteStringPair(pickle, trial_state.trial_name, trial_state.group_name))
+ return false;
+
+ // Get field trial params.
+ std::map<std::string, std::string> params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+ trial_state.trial_name.as_string(), trial_state.group_name.as_string(),
+ &params);
+
+ // Write params to pickle.
+ for (const auto& param : params) {
+ if (!WriteStringPair(pickle, param.first, param.second))
return false;
- return true;
}
-};
+ return true;
+}
// Created a time value based on |year|, |month| and |day_of_month| parameters.
Time CreateTimeFromParams(int year, int month, int day_of_month) {
@@ -167,12 +180,24 @@ bool ParseFieldTrialsString(const std::string& trials_string,
trials_string_piece.substr(name_end + 1, group_name_end - name_end - 1);
next_item = group_name_end + 1;
- entries->push_back(entry);
+ entries->push_back(std::move(entry));
}
return true;
}
-void AddForceFieldTrialsFlag(CommandLine* cmd_line) {
+void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
+ const char* disable_features_switch,
+ CommandLine* cmd_line) {
+ std::string enabled_features;
+ std::string disabled_features;
+ FeatureList::GetInstance()->GetFeatureOverrides(&enabled_features,
+ &disabled_features);
+
+ if (!enabled_features.empty())
+ cmd_line->AppendSwitchASCII(enable_features_switch, enabled_features);
+ if (!disabled_features.empty())
+ cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
+
std::string field_trial_states;
FieldTrialList::AllStatesToString(&field_trial_states);
if (!field_trial_states.empty()) {
@@ -182,17 +207,26 @@ void AddForceFieldTrialsFlag(CommandLine* cmd_line) {
}
#if defined(OS_WIN)
-HANDLE CreateReadOnlyHandle(SharedPersistentMemoryAllocator* allocator) {
+HANDLE CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
HANDLE src = allocator->shared_memory()->handle().GetHandle();
ProcessHandle process = GetCurrentProcess();
DWORD access = SECTION_MAP_READ | SECTION_QUERY;
HANDLE dst;
if (!::DuplicateHandle(process, src, process, &dst, access, true, 0))
- return nullptr;
+ return kInvalidPlatformFile;
return dst;
}
#endif
+#if defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
+int CreateReadOnlyHandle(FieldTrialList::FieldTrialAllocator* allocator) {
+ SharedMemoryHandle new_handle;
+ allocator->shared_memory()->ShareReadOnlyToProcess(GetCurrentProcessHandle(),
+ &new_handle);
+ return SharedMemory::GetFdFromSharedMemoryHandle(new_handle);
+}
+#endif
+
} // namespace
// statics
@@ -214,6 +248,49 @@ FieldTrial::State::State(const State& other) = default;
FieldTrial::State::~State() {}
+bool FieldTrial::FieldTrialEntry::GetTrialAndGroupName(
+ StringPiece* trial_name,
+ StringPiece* group_name) const {
+ PickleIterator iter = GetPickleIterator();
+ return ReadStringPair(&iter, trial_name, group_name);
+}
+
+bool FieldTrial::FieldTrialEntry::GetParams(
+ std::map<std::string, std::string>* params) const {
+ PickleIterator iter = GetPickleIterator();
+ StringPiece tmp;
+ // Skip reading trial and group name.
+ if (!ReadStringPair(&iter, &tmp, &tmp))
+ return false;
+
+ while (true) {
+ StringPiece key;
+ StringPiece value;
+ if (!ReadStringPair(&iter, &key, &value))
+ return key.empty(); // Non-empty is bad: got one of a pair.
+ (*params)[key.as_string()] = value.as_string();
+ }
+}
+
+PickleIterator FieldTrial::FieldTrialEntry::GetPickleIterator() const {
+ const char* src =
+ reinterpret_cast<const char*>(this) + sizeof(FieldTrialEntry);
+
+ Pickle pickle(src, pickle_size);
+ return PickleIterator(pickle);
+}
+
+bool FieldTrial::FieldTrialEntry::ReadStringPair(
+ PickleIterator* iter,
+ StringPiece* trial_name,
+ StringPiece* group_name) const {
+ if (!iter->ReadStringPiece(trial_name))
+ return false;
+ if (!iter->ReadStringPiece(group_name))
+ return false;
+ return true;
+}
+
void FieldTrial::Disable() {
DCHECK(!group_reported_);
enable_field_trial_ = false;
@@ -325,7 +402,7 @@ FieldTrial::FieldTrial(const std::string& trial_name,
forced_(false),
group_reported_(false),
trial_registered_(false),
- ref_(SharedPersistentMemoryAllocator::kReferenceNull) {
+ ref_(FieldTrialList::FieldTrialAllocator::kReferenceNull) {
DCHECK_GT(total_probability, 0);
DCHECK(!trial_name_.empty());
DCHECK(!default_group_name_.empty());
@@ -362,7 +439,7 @@ void FieldTrial::FinalizeGroupChoiceImpl(bool is_locked) {
SetGroupChoice(default_group_name_, kDefaultGroupNumber);
// Add the field trial to shared memory.
- if (kUseSharedMemoryForFieldTrials)
+ if (kUseSharedMemoryForFieldTrials && trial_registered_)
FieldTrialList::OnGroupFinalized(is_locked, this);
}
@@ -625,6 +702,36 @@ void FieldTrialList::GetActiveFieldTrialGroupsFromString(
}
// static
+void FieldTrialList::GetInitiallyActiveFieldTrials(
+ const base::CommandLine& command_line,
+ FieldTrial::ActiveGroups* active_groups) {
+ DCHECK(global_->create_trials_from_command_line_called_);
+
+ if (!global_->field_trial_allocator_) {
+ GetActiveFieldTrialGroupsFromString(
+ command_line.GetSwitchValueASCII(switches::kForceFieldTrials),
+ active_groups);
+ return;
+ }
+
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(allocator);
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (subtle::NoBarrier_Load(&entry->activated) &&
+ entry->GetTrialAndGroupName(&trial_name, &group_name)) {
+ FieldTrial::ActiveGroup group;
+ group.trial_name = trial_name.as_string();
+ group.group_name = group_name.as_string();
+ active_groups->push_back(group);
+ }
+ }
+}
+
+// static
bool FieldTrialList::CreateTrialsFromString(
const std::string& trials_string,
const std::set<std::string>& ignored_trial_names) {
@@ -659,29 +766,26 @@ bool FieldTrialList::CreateTrialsFromString(
// static
void FieldTrialList::CreateTrialsFromCommandLine(
const CommandLine& cmd_line,
- const char* field_trial_handle_switch) {
- DCHECK(global_);
+ const char* field_trial_handle_switch,
+ int fd_key) {
+ global_->create_trials_from_command_line_called_ = true;
-#if defined(OS_WIN) && !defined(OS_NACL)
+#if defined(OS_WIN)
if (cmd_line.HasSwitch(field_trial_handle_switch)) {
- std::string arg = cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
- size_t token = arg.find(",");
- int field_trial_handle = std::stoi(arg.substr(0, token));
- size_t field_trial_length = std::stoi(arg.substr(token + 1, arg.length()));
-
- HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
- SharedMemoryHandle shm_handle =
- SharedMemoryHandle(handle, GetCurrentProcId());
-
- // Gets deleted when it gets out of scope, but that's OK because we need it
- // only for the duration of this method.
- std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
- if (!shm.get()->Map(field_trial_length))
- TerminateBecauseOutOfMemory(field_trial_length);
-
- bool result = FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string handle_switch =
+ cmd_line.GetSwitchValueASCII(field_trial_handle_switch);
+ bool result = CreateTrialsFromHandleSwitch(handle_switch);
+ DCHECK(result);
+ }
+#endif
+
+#if defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
+ // On POSIX, we check if the handle is valid by seeing if the browser process
+ // sent over the switch (we don't care about the value). Invalid handles
+ // occur in some browser tests which don't initialize the allocator.
+ if (cmd_line.HasSwitch(field_trial_handle_switch)) {
+ bool result = CreateTrialsFromDescriptor(fd_key);
DCHECK(result);
- return;
}
#endif
@@ -693,6 +797,49 @@ void FieldTrialList::CreateTrialsFromCommandLine(
}
}
+// static
+void FieldTrialList::CreateFeaturesFromCommandLine(
+ const base::CommandLine& command_line,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ FeatureList* feature_list) {
+ // Fallback to command line if not using shared memory.
+ if (!kUseSharedMemoryForFieldTrials ||
+ !global_->field_trial_allocator_.get()) {
+ return feature_list->InitializeFromCommandLine(
+ command_line.GetSwitchValueASCII(enable_features_switch),
+ command_line.GetSwitchValueASCII(disable_features_switch));
+ }
+
+ feature_list->InitializeFromSharedMemory(
+ global_->field_trial_allocator_.get());
+}
+
+#if defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
+// static
+bool FieldTrialList::CreateTrialsFromDescriptor(int fd_key) {
+ if (!kUseSharedMemoryForFieldTrials)
+ return false;
+
+ if (fd_key == -1)
+ return false;
+
+ int fd = GlobalDescriptors::GetInstance()->MaybeGet(fd_key);
+ if (fd == -1)
+ return false;
+
+#if defined(POSIX_WITH_ZYGOTE)
+ SharedMemoryHandle shm_handle(fd, true);
+#elif defined(OS_MACOSX)
+ SharedMemoryHandle shm_handle(FileDescriptor(fd, true));
+#endif
+
+ bool result = FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
+ DCHECK(result);
+ return true;
+}
+#endif
+
#if defined(OS_WIN)
// static
void FieldTrialList::AppendFieldTrialHandleIfNeeded(
@@ -707,46 +854,77 @@ void FieldTrialList::AppendFieldTrialHandleIfNeeded(
}
#endif
+#if defined(OS_POSIX) && !defined(OS_NACL)
+// static
+int FieldTrialList::GetFieldTrialHandle() {
+ if (global_ && kUseSharedMemoryForFieldTrials) {
+ InstantiateFieldTrialAllocatorIfNeeded();
+ // We check for an invalid handle where this gets called.
+ return global_->readonly_allocator_handle_;
+ }
+ return kInvalidPlatformFile;
+}
+#endif
+
// static
void FieldTrialList::CopyFieldTrialStateToFlags(
const char* field_trial_handle_switch,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
CommandLine* cmd_line) {
// TODO(lawrencewu): Ideally, having the global would be guaranteed. However,
// content browser tests currently don't create a FieldTrialList because they
// don't run ChromeBrowserMainParts code where it's done for Chrome.
- if (!global_)
+ // Some tests depend on the enable and disable features flag switch, though,
+ // so we can still add those even though AllStatesToString() will be a no-op.
+ if (!global_) {
+ AddFeatureAndFieldTrialFlags(enable_features_switch,
+ disable_features_switch, cmd_line);
return;
+ }
-#if defined(OS_WIN)
+#if defined(OS_WIN) || defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
// Use shared memory to pass the state if the feature is enabled, otherwise
// fallback to passing it via the command line as a string.
if (kUseSharedMemoryForFieldTrials) {
InstantiateFieldTrialAllocatorIfNeeded();
// If the readonly handle didn't get duplicated properly, then fallback to
// original behavior.
- if (!global_->readonly_allocator_handle_) {
- AddForceFieldTrialsFlag(cmd_line);
+ if (global_->readonly_allocator_handle_ == kInvalidPlatformFile) {
+ AddFeatureAndFieldTrialFlags(enable_features_switch,
+ disable_features_switch, cmd_line);
return;
}
- // HANDLE is just typedef'd to void *. We basically cast the handle into an
- // int (uintptr_t, to be exact), stringify the int, and pass it as a
- // command-line flag. The child process will do the reverse conversions to
- // retrieve the handle. See http://stackoverflow.com/a/153077
+ global_->field_trial_allocator_->UpdateTrackingHistograms();
+
+#if defined(OS_WIN)
+ // We need to pass a named anonymous handle to shared memory over the
+ // command line on Windows, since the child doesn't know which of the
+ // handles it inherited it should open.
+ // PlatformFile is typedef'd to HANDLE which is typedef'd to void *. We
+ // basically cast the handle into an int (uintptr_t, to be exact), stringify
+ // the int, and pass it as a command-line flag. The child process will do
+ // the reverse conversions to retrieve the handle. See
+ // http://stackoverflow.com/a/153077
auto uintptr_handle =
reinterpret_cast<uintptr_t>(global_->readonly_allocator_handle_);
- size_t field_trial_length =
- global_->field_trial_allocator_->shared_memory()->mapped_size();
- std::string field_trial_handle = std::to_string(uintptr_handle) + "," +
- std::to_string(field_trial_length);
-
+ std::string field_trial_handle = std::to_string(uintptr_handle);
cmd_line->AppendSwitchASCII(field_trial_handle_switch, field_trial_handle);
- global_->field_trial_allocator_->UpdateTrackingHistograms();
+#elif defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
+ // On POSIX, we dup the fd into a fixed fd kFieldTrialDescriptor, so we
+ // don't have to pass over the handle (it's not even the right handle
+ // anyways). But some browser tests don't create the allocator, so we need
+ // to be able to distinguish valid and invalid handles. We do that by just
+ // checking that the flag is set with a dummy value.
+ cmd_line->AppendSwitchASCII(field_trial_handle_switch, "1");
+#endif
return;
}
#endif
- AddForceFieldTrialsFlag(cmd_line);
+ AddFeatureAndFieldTrialFlags(enable_features_switch, disable_features_switch,
+ cmd_line);
}
// static
@@ -794,10 +972,12 @@ void FieldTrialList::OnGroupFinalized(bool is_locked, FieldTrial* field_trial) {
if (!global_)
return;
if (is_locked) {
- AddToAllocatorWhileLocked(field_trial);
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
} else {
AutoLock auto_lock(global_->lock_);
- AddToAllocatorWhileLocked(field_trial);
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
}
}
@@ -833,20 +1013,167 @@ size_t FieldTrialList::GetFieldTrialCount() {
}
// static
+bool FieldTrialList::GetParamsFromSharedMemory(
+ FieldTrial* field_trial,
+ std::map<std::string, std::string>* params) {
+ DCHECK(global_);
+ // If the field trial allocator is not set up yet, then there are several
+ // cases:
+ // - We are in the browser process and the allocator has not been set up
+ // yet. If we got here, then we couldn't find the params in
+ // FieldTrialParamAssociator, so it's definitely not here. Return false.
+ // - Using shared memory for field trials is not enabled. If we got here,
+ // then there's nothing in shared memory. Return false.
+ // - We are in the child process and the allocator has not been set up yet.
+ // If this is the case, then you are calling this too early. The field trial
+ // allocator should get set up very early in the lifecycle. Try to see if
+ // you can call it after it's been set up.
+ AutoLock auto_lock(global_->lock_);
+ if (!global_->field_trial_allocator_)
+ return false;
+
+ // If ref_ isn't set, then the field trial data can't be in shared memory.
+ if (!field_trial->ref_)
+ return false;
+
+ const FieldTrial::FieldTrialEntry* entry =
+ global_->field_trial_allocator_->GetAsObject<FieldTrial::FieldTrialEntry>(
+ field_trial->ref_);
+
+ size_t allocated_size =
+ global_->field_trial_allocator_->GetAllocSize(field_trial->ref_);
+ size_t actual_size = sizeof(FieldTrial::FieldTrialEntry) + entry->pickle_size;
+ if (allocated_size < actual_size)
+ return false;
+
+ return entry->GetParams(params);
+}
+
+// static
+void FieldTrialList::ClearParamsFromSharedMemoryForTesting() {
+ if (!global_)
+ return;
+
+ AutoLock auto_lock(global_->lock_);
+ if (!global_->field_trial_allocator_)
+ return;
+
+ // To clear the params, we iterate through every item in the allocator, copy
+ // just the trial and group name into a newly-allocated segment and then clear
+ // the existing item.
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(allocator);
+
+ // List of refs to eventually be made iterable. We can't make it in the loop,
+ // since it would go on forever.
+ std::vector<FieldTrial::FieldTrialRef> new_refs;
+
+ FieldTrial::FieldTrialRef prev_ref;
+ while ((prev_ref = mem_iter.GetNextOfType<FieldTrial::FieldTrialEntry>()) !=
+ FieldTrialAllocator::kReferenceNull) {
+ // Get the existing field trial entry in shared memory.
+ const FieldTrial::FieldTrialEntry* prev_entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(prev_ref);
+ StringPiece trial_name;
+ StringPiece group_name;
+ if (!prev_entry->GetTrialAndGroupName(&trial_name, &group_name))
+ continue;
+
+ // Write a new entry, minus the params.
+ Pickle pickle;
+ pickle.WriteString(trial_name);
+ pickle.WriteString(group_name);
+ size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+ FieldTrial::FieldTrialEntry* new_entry =
+ allocator->AllocateObject<FieldTrial::FieldTrialEntry>(total_size);
+ subtle::NoBarrier_Store(&new_entry->activated,
+ subtle::NoBarrier_Load(&prev_entry->activated));
+ new_entry->pickle_size = pickle.size();
+
+ // TODO(lawrencewu): Modify base::Pickle to be able to write over a section
+ // in memory, so we can avoid this memcpy.
+ char* dst = reinterpret_cast<char*>(new_entry) +
+ sizeof(FieldTrial::FieldTrialEntry);
+ memcpy(dst, pickle.data(), pickle.size());
+
+ // Update the ref on the field trial and add it to the list to be made
+ // iterable.
+ FieldTrial::FieldTrialRef new_ref = allocator->GetAsReference(new_entry);
+ FieldTrial* trial = global_->PreLockedFind(trial_name.as_string());
+ trial->ref_ = new_ref;
+ new_refs.push_back(new_ref);
+
+ // Mark the existing entry as unused.
+ allocator->ChangeType(prev_ref, 0,
+ FieldTrial::FieldTrialEntry::kPersistentTypeId);
+ }
+
+ for (const auto& ref : new_refs) {
+ allocator->MakeIterable(ref);
+ }
+}
+
+// static
+void FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(
+ PersistentMemoryAllocator* allocator) {
+ if (!global_)
+ return;
+ AutoLock auto_lock(global_->lock_);
+ for (const auto& registered : global_->registered_) {
+ AddToAllocatorWhileLocked(allocator, registered.second);
+ }
+}
+
+// static
+std::vector<const FieldTrial::FieldTrialEntry*>
+FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(
+ PersistentMemoryAllocator const& allocator) {
+ std::vector<const FieldTrial::FieldTrialEntry*> entries;
+ FieldTrialAllocator::Iterator iter(&allocator);
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
+ entries.push_back(entry);
+ }
+ return entries;
+}
+
+#if defined(OS_WIN)
+// static
+bool FieldTrialList::CreateTrialsFromHandleSwitch(
+ const std::string& handle_switch) {
+ int field_trial_handle = std::stoi(handle_switch);
+ HANDLE handle = reinterpret_cast<HANDLE>(field_trial_handle);
+ SharedMemoryHandle shm_handle(handle, GetCurrentProcId());
+ return FieldTrialList::CreateTrialsFromSharedMemoryHandle(shm_handle);
+}
+#endif
+
+#if !defined(OS_NACL)
+// static
+bool FieldTrialList::CreateTrialsFromSharedMemoryHandle(
+ SharedMemoryHandle shm_handle) {
+ // shm gets deleted when it gets out of scope, but that's OK because we need
+ // it only for the duration of this method.
+ std::unique_ptr<SharedMemory> shm(new SharedMemory(shm_handle, true));
+ if (!shm.get()->Map(kFieldTrialAllocationSize))
+ TerminateBecauseOutOfMemory(kFieldTrialAllocationSize);
+
+ return FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+}
+#endif
+
+// static
bool FieldTrialList::CreateTrialsFromSharedMemory(
std::unique_ptr<SharedMemory> shm) {
- global_->field_trial_allocator_.reset(new SharedPersistentMemoryAllocator(
- std::move(shm), 0, kAllocatorName, true));
- SharedPersistentMemoryAllocator* shalloc =
- global_->field_trial_allocator_.get();
- PersistentMemoryAllocator::Iterator mem_iter(shalloc);
-
- SharedPersistentMemoryAllocator::Reference ref;
- while ((ref = mem_iter.GetNextOfType(kFieldTrialType)) !=
- SharedPersistentMemoryAllocator::kReferenceNull) {
- const FieldTrialEntry* entry =
- shalloc->GetAsObject<const FieldTrialEntry>(ref, kFieldTrialType);
-
+ global_->field_trial_allocator_.reset(
+ new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, true));
+ FieldTrialAllocator* shalloc = global_->field_trial_allocator_.get();
+ FieldTrialAllocator::Iterator mem_iter(shalloc);
+
+ const FieldTrial::FieldTrialEntry* entry;
+ while ((entry = mem_iter.GetNextOfObject<FieldTrial::FieldTrialEntry>()) !=
+ nullptr) {
StringPiece trial_name;
StringPiece group_name;
if (!entry->GetTrialAndGroupName(&trial_name, &group_name))
@@ -857,26 +1184,8 @@ bool FieldTrialList::CreateTrialsFromSharedMemory(
FieldTrial* trial =
CreateFieldTrial(trial_name.as_string(), group_name.as_string());
- // If we failed to create the field trial, crash with debug info.
- // TODO(665129): Remove this when the crash is resolved.
- if (!trial) {
- std::string trial_name_string = trial_name.as_string();
- std::string group_name_string = group_name.as_string();
- FieldTrial* existing_field_trial =
- FieldTrialList::Find(trial_name_string);
- if (existing_field_trial)
- debug::Alias(existing_field_trial->group_name_internal().c_str());
- debug::Alias(trial_name_string.c_str());
- debug::Alias(group_name_string.c_str());
- CHECK(!trial_name_string.empty());
- CHECK(!group_name_string.empty());
- CHECK_EQ(existing_field_trial->group_name_internal(),
- group_name.as_string());
- return false;
- }
-
- trial->ref_ = ref;
- if (entry->activated) {
+ trial->ref_ = mem_iter.GetAsReference(entry);
+ if (subtle::NoBarrier_Load(&entry->activated)) {
// Call |group()| to mark the trial as "used" and notify observers, if
// any. This is useful to ensure that field trials created in child
// processes are properly reported in crash reports.
@@ -896,20 +1205,35 @@ void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
if (global_->field_trial_allocator_ != nullptr)
return;
+ SharedMemoryCreateOptions options;
+ options.size = kFieldTrialAllocationSize;
+ options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ options.type = SharedMemoryHandle::POSIX;
+#endif
+
std::unique_ptr<SharedMemory> shm(new SharedMemory());
- if (!shm->CreateAndMapAnonymous(kFieldTrialAllocationSize))
+ if (!shm->Create(options))
+ TerminateBecauseOutOfMemory(kFieldTrialAllocationSize);
+
+ if (!shm->Map(kFieldTrialAllocationSize))
TerminateBecauseOutOfMemory(kFieldTrialAllocationSize);
- global_->field_trial_allocator_.reset(new SharedPersistentMemoryAllocator(
- std::move(shm), 0, kAllocatorName, false));
+ global_->field_trial_allocator_.reset(
+ new FieldTrialAllocator(std::move(shm), 0, kAllocatorName, false));
global_->field_trial_allocator_->CreateTrackingHistograms(kAllocatorName);
// Add all existing field trials.
for (const auto& registered : global_->registered_) {
- AddToAllocatorWhileLocked(registered.second);
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ registered.second);
}
-#if defined(OS_WIN)
+ // Add all existing features.
+ FeatureList::GetInstance()->AddFeaturesToAllocator(
+ global_->field_trial_allocator_.get());
+
+#if defined(OS_WIN) || defined(POSIX_WITH_ZYGOTE) || defined(OS_MACOSX)
// Set |readonly_allocator_handle_| so we can pass it to be inherited and
// via the command line.
global_->readonly_allocator_handle_ =
@@ -919,10 +1243,9 @@ void FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded() {
#endif
// static
-void FieldTrialList::AddToAllocatorWhileLocked(FieldTrial* field_trial) {
- SharedPersistentMemoryAllocator* allocator =
- global_->field_trial_allocator_.get();
-
+void FieldTrialList::AddToAllocatorWhileLocked(
+ PersistentMemoryAllocator* allocator,
+ FieldTrial* field_trial) {
// Don't do anything if the allocator hasn't been instantiated yet.
if (allocator == nullptr)
return;
@@ -942,23 +1265,28 @@ void FieldTrialList::AddToAllocatorWhileLocked(FieldTrial* field_trial) {
return;
Pickle pickle;
- pickle.WriteString(trial_state.trial_name);
- pickle.WriteString(trial_state.group_name);
+ if (!PickleFieldTrial(trial_state, &pickle)) {
+ NOTREACHED();
+ return;
+ }
- size_t total_size = sizeof(FieldTrialEntry) + pickle.size();
- SharedPersistentMemoryAllocator::Reference ref =
- allocator->Allocate(total_size, kFieldTrialType);
- if (ref == SharedPersistentMemoryAllocator::kReferenceNull)
+ size_t total_size = sizeof(FieldTrial::FieldTrialEntry) + pickle.size();
+ FieldTrial::FieldTrialRef ref = allocator->Allocate(
+ total_size, FieldTrial::FieldTrialEntry::kPersistentTypeId);
+ if (ref == FieldTrialAllocator::kReferenceNull) {
+ NOTREACHED();
return;
+ }
- FieldTrialEntry* entry =
- allocator->GetAsObject<FieldTrialEntry>(ref, kFieldTrialType);
- entry->activated = trial_state.activated;
- entry->size = pickle.size();
+ FieldTrial::FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+ subtle::NoBarrier_Store(&entry->activated, trial_state.activated);
+ entry->pickle_size = pickle.size();
// TODO(lawrencewu): Modify base::Pickle to be able to write over a section in
// memory, so we can avoid this memcpy.
- char* dst = reinterpret_cast<char*>(entry) + sizeof(FieldTrialEntry);
+ char* dst =
+ reinterpret_cast<char*>(entry) + sizeof(FieldTrial::FieldTrialEntry);
memcpy(dst, pickle.data(), pickle.size());
allocator->MakeIterable(ref);
@@ -968,25 +1296,25 @@ void FieldTrialList::AddToAllocatorWhileLocked(FieldTrial* field_trial) {
// static
void FieldTrialList::ActivateFieldTrialEntryWhileLocked(
FieldTrial* field_trial) {
- SharedPersistentMemoryAllocator* allocator =
- global_->field_trial_allocator_.get();
+ FieldTrialAllocator* allocator = global_->field_trial_allocator_.get();
// Check if we're in the child process and return early if so.
if (allocator && allocator->IsReadonly())
return;
- SharedPersistentMemoryAllocator::Reference ref = field_trial->ref_;
- if (ref == SharedPersistentMemoryAllocator::kReferenceNull) {
+ FieldTrial::FieldTrialRef ref = field_trial->ref_;
+ if (ref == FieldTrialAllocator::kReferenceNull) {
// It's fine to do this even if the allocator hasn't been instantiated
// yet -- it'll just return early.
- AddToAllocatorWhileLocked(field_trial);
+ AddToAllocatorWhileLocked(global_->field_trial_allocator_.get(),
+ field_trial);
} else {
// It's also okay to do this even though the callee doesn't have a lock --
// the only thing that happens on a stale read here is a slight performance
// hit from the child re-synchronizing activation state.
- FieldTrialEntry* entry =
- allocator->GetAsObject<FieldTrialEntry>(ref, kFieldTrialType);
- entry->activated = true;
+ FieldTrial::FieldTrialEntry* entry =
+ allocator->GetAsObject<FieldTrial::FieldTrialEntry>(ref);
+ subtle::NoBarrier_Store(&entry->activated, 1);
}
}
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index 7fd067ad74c..5c5240c42af 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -63,14 +63,18 @@
#include <string>
#include <vector>
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/command_line.h"
+#include "base/feature_list.h"
+#include "base/files/file.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/observer_list_threadsafe.h"
+#include "base/pickle.h"
#include "base/process/launch.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
@@ -84,6 +88,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
public:
typedef int Probability; // Probability type for being selected in a trial.
+ // TODO(665129): Make private again after crash has been resolved.
+ typedef SharedPersistentMemoryAllocator::Reference FieldTrialRef;
+
// Specifies the persistence of the field trial group choice.
enum RandomizationType {
// One time randomized trials will persist the group choice between
@@ -128,6 +135,48 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
~State();
};
+ // We create one FieldTrialEntry per field trial in shared memory, via
+ // AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
+ // base::Pickle object that we unpickle and read from.
+ struct BASE_EXPORT FieldTrialEntry {
+ // SHA1(FieldTrialEntry): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 8;
+
+ // Whether or not this field trial is activated. This is really just a
+ // boolean but using a 32 bit value for portability reasons. It should be
+ // accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
+ // from doing unexpected optimizations because it thinks that only one
+ // thread is accessing the memory location.
+ subtle::Atomic32 activated;
+
+ // Size of the pickled structure, NOT the total size of this entry.
+ uint32_t pickle_size;
+
+ // Calling this is only valid when the entry is initialized. That is, it
+ // resides in shared memory and has a pickle containing the trial name and
+ // group name following it.
+ bool GetTrialAndGroupName(StringPiece* trial_name,
+ StringPiece* group_name) const;
+
+ // Calling this is only valid when the entry is initialized as well. Reads
+ // the parameters following the trial and group name and stores them as
+ // key-value mappings in |params|.
+ bool GetParams(std::map<std::string, std::string>* params) const;
+
+ private:
+ // Returns an iterator over the data containing names and params.
+ PickleIterator GetPickleIterator() const;
+
+ // Takes the iterator and writes out the first two items into |trial_name|
+ // and |group_name|.
+ bool ReadStringPair(PickleIterator* iter,
+ StringPiece* trial_name,
+ StringPiece* group_name) const;
+ };
+
typedef std::vector<ActiveGroup> ActiveGroups;
// A return value to indicate that a given instance has not yet had a group
@@ -218,6 +267,9 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+ DoNotAddSimulatedFieldTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
friend class base::FieldTrialList;
@@ -322,7 +374,7 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
bool trial_registered_;
// Reference to related field trial struct and data in shared memory.
- SharedPersistentMemoryAllocator::Reference ref_;
+ FieldTrialRef ref_;
// When benchmarking is enabled, field trials all revert to the 'default'
// group.
@@ -337,6 +389,8 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
// Only one instance of this class exists.
class BASE_EXPORT FieldTrialList {
public:
+ typedef SharedPersistentMemoryAllocator FieldTrialAllocator;
+
// Year that is guaranteed to not be expired when instantiating a field trial
// via |FactoryGetFieldTrial()|. Set to two years from the build date.
static int kNoExpirationYear;
@@ -466,6 +520,14 @@ class BASE_EXPORT FieldTrialList {
const std::string& trials_string,
FieldTrial::ActiveGroups* active_groups);
+ // Returns the field trials that were active when the process was
+ // created. Either parses the field trial string or the shared memory
+ // holding field trial information.
+ // Must be called only after a call to CreateTrialsFromCommandLine().
+ static void GetInitiallyActiveFieldTrials(
+ const base::CommandLine& command_line,
+ FieldTrial::ActiveGroups* active_groups);
+
// Use a state string (re: StatesToString()) to augment the current list of
// field trials to include the supplied trials, and using a 100% probability
// for each trial, force them to have the same group string. This is commonly
@@ -481,16 +543,23 @@ class BASE_EXPORT FieldTrialList {
// Achieves the same thing as CreateTrialsFromString, except wraps the logic
// by taking in the trials from the command line, either via shared memory
- // handle or command line argument.
- // If using shared memory to pass around the list of field trials, then
- // expects |field_trial_handle_switch| command line argument to
- // contain the shared memory handle.
- // If not, then create the trials as before (using the kForceFieldTrials
- // switch). Needs the |field_trial_handle_switch| argument to be passed in
- // since base/ can't depend on content/.
- static void CreateTrialsFromCommandLine(
- const base::CommandLine& cmd_line,
- const char* field_trial_handle_switch);
+ // handle or command line argument. A bit of a misnomer since on POSIX we
+ // simply get the trials from opening |fd_key| if using shared memory. On
+ // Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
+ // contain the shared memory handle that contains the field trial allocator.
+ // We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
+ // in since base/ can't depend on content/.
+ static void CreateTrialsFromCommandLine(const base::CommandLine& cmd_line,
+ const char* field_trial_handle_switch,
+ int fd_key);
+
+ // Creates base::Feature overrides from the command line by first trying to
+ // use shared memory and then falling back to the command line if it fails.
+ static void CreateFeaturesFromCommandLine(
+ const base::CommandLine& command_line,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
+ FeatureList* feature_list);
#if defined(OS_WIN)
// On Windows, we need to explicitly pass down any handles to be inherited.
@@ -498,6 +567,11 @@ class BASE_EXPORT FieldTrialList {
// list of handles to be inherited.
static void AppendFieldTrialHandleIfNeeded(
base::HandlesToInheritVector* handles);
+#elif defined(OS_POSIX)
+ // On POSIX, we also need to explicitly pass down this file descriptor that
+ // should be shared with the child process. Returns kInvalidPlatformFile if no
+ // handle exists or was not initialized properly.
+ static PlatformFile GetFieldTrialHandle();
#endif
// Adds a switch to the command line containing the field trial state as a
@@ -506,6 +580,8 @@ class BASE_EXPORT FieldTrialList {
// Needs the |field_trial_handle_switch| argument to be passed in since base/
// can't depend on content/.
static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
+ const char* enable_features_switch,
+ const char* disable_features_switch,
base::CommandLine* cmd_line);
// Create a FieldTrial with the given |name| and using 100% probability for
@@ -535,10 +611,54 @@ class BASE_EXPORT FieldTrialList {
// Return the number of active field trials.
static size_t GetFieldTrialCount();
+ // Gets the parameters for |field_trial| from shared memory and stores them in
+ // |params|. This is only exposed for use by FieldTrialParamAssociator and
+ // shouldn't be used by anything else.
+ static bool GetParamsFromSharedMemory(
+ FieldTrial* field_trial,
+ std::map<std::string, std::string>* params);
+
+ // Clears all the params in the allocator.
+ static void ClearParamsFromSharedMemoryForTesting();
+
+ // Dumps field trial state to an allocator so that it can be analyzed after a
+ // crash.
+ static void DumpAllFieldTrialsToPersistentAllocator(
+ PersistentMemoryAllocator* allocator);
+
+ // Retrieves field trial state from an allocator so that it can be analyzed
+ // after a crash. The pointers in the returned vector are into the persistent
+ // memory segment and so are only valid as long as the allocator is valid.
+ static std::vector<const FieldTrial::FieldTrialEntry*>
+ GetAllFieldTrialsFromPersistentAllocator(
+ PersistentMemoryAllocator const& allocator);
+
private:
// Allow tests to access our innards for testing purposes.
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
+ DoNotAddSimulatedFieldTrialsToAllocator);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
+ FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
+
+#if defined(OS_WIN)
+ // Takes in |handle_switch| from the command line which represents the shared
+ // memory handle for field trials, parses it, and creates the field trials.
+ // Returns true on success, false on failure.
+ static bool CreateTrialsFromHandleSwitch(const std::string& handle_switch);
+#endif
+
+#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_ANDROID)
+ // On POSIX systems that use the zygote, we look up the correct fd that backs
+ // the shared memory segment containing the field trials by looking it up via
+ // an fd key in GlobalDescriptors. Returns true on success, false on failure.
+ static bool CreateTrialsFromDescriptor(int fd_key);
+#endif
+
+ // Takes an unmapped SharedMemoryHandle, creates a SharedMemory object from it
+ // and maps it with the correct size.
+ static bool CreateTrialsFromSharedMemoryHandle(SharedMemoryHandle shm_handle);
// Expects a mapped piece of shared memory |shm| that was created from the
// browser process's field_trial_allocator and shared via the command line.
@@ -555,7 +675,8 @@ class BASE_EXPORT FieldTrialList {
// Adds the field trial to the allocator. Caller must hold a lock before
// calling this.
- static void AddToAllocatorWhileLocked(FieldTrial* field_trial);
+ static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
+ FieldTrial* field_trial);
// Activate the corresponding field trial entry struct in shared memory.
static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
@@ -584,8 +705,8 @@ class BASE_EXPORT FieldTrialList {
// FieldTrialList is created after that.
static bool used_without_global_;
- // Lock for access to registered_.
- base::Lock lock_;
+ // Lock for access to registered_ and field_trial_allocator_.
+ Lock lock_;
RegistrationMap registered_;
std::map<std::string, std::string> seen_states_;
@@ -601,15 +722,15 @@ class BASE_EXPORT FieldTrialList {
// browser and child processes, but readonly in the child.
// In the future, we may want to move this to a more generic place if we want
// to start passing more data other than field trials.
- std::unique_ptr<SharedPersistentMemoryAllocator> field_trial_allocator_ =
- nullptr;
+ std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
-#if defined(OS_WIN)
// Readonly copy of the handle to the allocator. Needs to be a member variable
// because it's needed from both CopyFieldTrialStateToFlags() and
// AppendFieldTrialHandleIfNeeded().
- HANDLE readonly_allocator_handle_ = nullptr;
-#endif
+ PlatformFile readonly_allocator_handle_ = kInvalidPlatformFile;
+
+ // Tracks whether CreateTrialsFromCommandLine() has been called.
+ bool create_trials_from_command_line_called_ = false;
DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
};
diff --git a/chromium/base/metrics/field_trial_param_associator.cc b/chromium/base/metrics/field_trial_param_associator.cc
index 50619ea2bc4..3bac18d6a91 100644
--- a/chromium/base/metrics/field_trial_param_associator.cc
+++ b/chromium/base/metrics/field_trial_param_associator.cc
@@ -36,9 +36,26 @@ bool FieldTrialParamAssociator::AssociateFieldTrialParams(
bool FieldTrialParamAssociator::GetFieldTrialParams(
const std::string& trial_name,
FieldTrialParams* params) {
+ FieldTrial* field_trial = FieldTrialList::Find(trial_name);
+ if (!field_trial)
+ return false;
+
+ // First try the local map, falling back to getting it from shared memory.
+ if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
+ params)) {
+ return true;
+ }
+
+ // TODO(lawrencewu): add the params to field_trial_params_ for next time.
+ return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
+}
+
+bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
+ const std::string& trial_name,
+ const std::string& group_name,
+ FieldTrialParams* params) {
AutoLock scoped_lock(lock_);
- const std::string group_name = FieldTrialList::FindFullName(trial_name);
const FieldTrialKey key(trial_name, group_name);
if (!ContainsKey(field_trial_params_, key))
return false;
@@ -48,6 +65,14 @@ bool FieldTrialParamAssociator::GetFieldTrialParams(
}
void FieldTrialParamAssociator::ClearAllParamsForTesting() {
+ {
+ AutoLock scoped_lock(lock_);
+ field_trial_params_.clear();
+ }
+ FieldTrialList::ClearParamsFromSharedMemoryForTesting();
+}
+
+void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
AutoLock scoped_lock(lock_);
field_trial_params_.clear();
}
diff --git a/chromium/base/metrics/field_trial_param_associator.h b/chromium/base/metrics/field_trial_param_associator.h
index 214e146966e..b19c66661c7 100644
--- a/chromium/base/metrics/field_trial_param_associator.h
+++ b/chromium/base/metrics/field_trial_param_associator.h
@@ -11,6 +11,7 @@
#include "base/base_export.h"
#include "base/memory/singleton.h"
+#include "base/metrics/field_trial.h"
#include "base/synchronization/lock.h"
namespace base {
@@ -33,13 +34,26 @@ class BASE_EXPORT FieldTrialParamAssociator {
const std::string& group_name,
const FieldTrialParams& params);
- // Gets the parameters for a field trial and its chosen group.
+ // Gets the parameters for a field trial and its chosen group. If not found in
+ // field_trial_params_, then tries to looks it up in shared memory.
bool GetFieldTrialParams(const std::string& trial_name,
FieldTrialParams* params);
- // Clears the internal field_trial_params_ mapping.
+ // Gets the parameters for a field trial and its chosen group. Does not
+ // fallback to looking it up in shared memory. This should only be used if you
+ // know for sure the params are in the mapping, like if you're in the browser
+ // process, and even then you should probably just use GetFieldTrialParams().
+ bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
+ const std::string& group_name,
+ FieldTrialParams* params);
+
+ // Clears the internal field_trial_params_ mapping, plus removes all params in
+ // shared memory.
void ClearAllParamsForTesting();
+ // Clears the internal field_trial_params_ mapping.
+ void ClearAllCachedParamsForTesting();
+
private:
friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index 0ad13a22be7..7dcbf659645 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -12,12 +12,14 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop.h"
+#include "base/metrics/field_trial_param_associator.h"
#include "base/rand_util.h"
#include "base/run_loop.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/test/gtest_util.h"
#include "base/test/mock_entropy_provider.h"
+#include "base/test/scoped_feature_list.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -1136,6 +1138,7 @@ TEST(FieldTrialDeathTest, OneTimeRandomizedTrialWithoutFieldTrialList) {
"");
}
+#if defined(OS_WIN)
TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
base::FieldTrialList field_trial_list(
base::MakeUnique<base::MockEntropyProvider>());
@@ -1143,18 +1146,21 @@ TEST(FieldTrialListTest, TestCopyFieldTrialStateToFlags) {
base::FilePath test_file_path = base::FilePath(FILE_PATH_LITERAL("Program"));
base::CommandLine cmd_line = base::CommandLine(test_file_path);
const char field_trial_handle[] = "test-field-trial-handle";
+ const char enable_features_switch[] = "test-enable-features";
+ const char disable_features_switch[] = "test-disable-features";
- base::FieldTrialList::CopyFieldTrialStateToFlags(field_trial_handle,
- &cmd_line);
-#if defined(OS_WIN)
+ base::FieldTrialList::CopyFieldTrialStateToFlags(
+ field_trial_handle, enable_features_switch, disable_features_switch,
+ &cmd_line);
EXPECT_TRUE(cmd_line.HasSwitch(field_trial_handle) ||
cmd_line.HasSwitch(switches::kForceFieldTrials));
-#else
- EXPECT_TRUE(cmd_line.HasSwitch(switches::kForceFieldTrials));
-#endif
}
+#endif
TEST(FieldTrialListTest, InstantiateAllocator) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
FieldTrialList field_trial_list(nullptr);
FieldTrialList::CreateFieldTrial("Trial1", "Group1");
@@ -1177,6 +1183,9 @@ TEST(FieldTrialListTest, AddTrialsToAllocator) {
// Scoping the first FieldTrialList, as we need another one to test that it
// matches.
{
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
FieldTrialList field_trial_list(nullptr);
FieldTrialList::CreateFieldTrial("Trial1", "Group1");
FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
@@ -1187,11 +1196,177 @@ TEST(FieldTrialListTest, AddTrialsToAllocator) {
FieldTrialList field_trial_list2(nullptr);
std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
- shm.get()->Map(4 << 10); // Hardcoded, equal to kFieldTrialAllocationSize.
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
std::string check_string;
FieldTrialList::AllStatesToString(&check_string);
EXPECT_EQ(save_string, check_string);
}
+TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
+ constexpr char kTrialName[] = "trial";
+ base::SharedMemoryHandle handle;
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ // Create a simulated trial and a real trial and call group() on them, which
+ // should only add the real trial to the field trial allocator.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // This shouldn't add to the allocator.
+ scoped_refptr<FieldTrial> simulated_trial =
+ FieldTrial::CreateSimulatedFieldTrial(kTrialName, 100, "Simulated",
+ 0.95);
+ simulated_trial->group();
+
+ // This should add to the allocator.
+ FieldTrial* real_trial =
+ FieldTrialList::CreateFieldTrial(kTrialName, "Real");
+ real_trial->group();
+
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ // Check that there's only one entry in the allocator.
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
+}
+
+TEST(FieldTrialListTest, AssociateFieldTrialParams) {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Clear all cached params from the associator.
+ FieldTrialParamAssociator::GetInstance()->ClearAllCachedParamsForTesting();
+ // Check that the params have been cleared from the cache.
+ std::map<std::string, std::string> cached_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParamsWithoutFallback(
+ trial_name, group_name, &cached_params);
+ EXPECT_EQ(0U, cached_params.size());
+
+ // Check that we fetch the param from shared memory properly.
+ std::map<std::string, std::string> new_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+ &new_params);
+ EXPECT_EQ("value1", new_params["key1"]);
+ EXPECT_EQ("value2", new_params["key2"]);
+ EXPECT_EQ(2U, new_params.size());
+}
+
+TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ base::SharedMemoryHandle handle;
+ {
+ test::ScopedFeatureList scoped_feature_list;
+ scoped_feature_list.Init();
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrial* trial =
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+ FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
+
+ // Clear all params from the associator AND shared memory. The allocated
+ // segments should be different.
+ FieldTrial::FieldTrialRef old_ref = trial->ref_;
+ FieldTrialParamAssociator::GetInstance()->ClearAllParamsForTesting();
+ FieldTrial::FieldTrialRef new_ref = trial->ref_;
+ EXPECT_NE(old_ref, new_ref);
+
+ // Check that there are no params associated with the field trial anymore.
+ std::map<std::string, std::string> new_params;
+ FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(trial_name,
+ &new_params);
+ EXPECT_EQ(0U, new_params.size());
+
+ // Now duplicate the handle so we can easily check that the trial is still
+ // in shared memory via AllStatesToString.
+ handle = base::SharedMemory::DuplicateHandle(
+ field_trial_list.field_trial_allocator_->shared_memory()->handle());
+ }
+
+ // Check that we have the trial.
+ FieldTrialList field_trial_list2(nullptr);
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory(handle, true));
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->Map(4 << 10);
+ FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
+ std::string check_string;
+ FieldTrialList::AllStatesToString(&check_string);
+ EXPECT_EQ("*Trial1/Group1/", check_string);
+}
+
+TEST(FieldTrialListTest, DumpAndFetchFromSharedMemory) {
+ std::string trial_name("Trial1");
+ std::string group_name("Group1");
+
+ // Create a field trial with some params.
+ FieldTrialList field_trial_list(nullptr);
+ FieldTrialList::CreateFieldTrial(trial_name, group_name);
+ std::map<std::string, std::string> params;
+ params["key1"] = "value1";
+ params["key2"] = "value2";
+ FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
+ trial_name, group_name, params);
+
+ std::unique_ptr<base::SharedMemory> shm(new SharedMemory());
+ // 4 KiB is enough to hold the trials only created for this test.
+ shm.get()->CreateAndMapAnonymous(4 << 10);
+ // We _could_ use PersistentMemoryAllocator, this just has less params.
+ SharedPersistentMemoryAllocator allocator(std::move(shm), 1, "", false);
+
+ // Dump and subsequently retrieve the field trial to |allocator|.
+ FieldTrialList::DumpAllFieldTrialsToPersistentAllocator(&allocator);
+ std::vector<const FieldTrial::FieldTrialEntry*> entries =
+ FieldTrialList::GetAllFieldTrialsFromPersistentAllocator(allocator);
+
+ // Check that we have the entry we put in.
+ EXPECT_EQ(1u, entries.size());
+ const FieldTrial::FieldTrialEntry* entry = entries[0];
+
+ // Check that the trial and group names match.
+ StringPiece shm_trial_name;
+ StringPiece shm_group_name;
+ entry->GetTrialAndGroupName(&shm_trial_name, &shm_group_name);
+ EXPECT_EQ(trial_name, shm_trial_name);
+ EXPECT_EQ(group_name, shm_group_name);
+
+ // Check that the params match.
+ std::map<std::string, std::string> shm_params;
+ entry->GetParams(&shm_params);
+ EXPECT_EQ(2u, shm_params.size());
+ EXPECT_EQ("value1", shm_params["key1"]);
+ EXPECT_EQ("value2", shm_params["key2"]);
+}
+
} // namespace base
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index 0f059457aaa..26a3ec84b8c 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -81,8 +81,11 @@
#include "base/macros.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
+#if defined(OS_CHROMEOS) || defined(OS_IOS) || defined(OS_WIN)
// TODO(asvitkine): Migrate callers to to include this directly and remove this.
+// Note: Incrementally migrating platforms as they become clean.
#include "base/metrics/histogram_macros.h"
+#endif
#include "base/metrics/histogram_samples.h"
#include "base/time/time.h"
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index 6dd91b25380..671cad24290 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -118,14 +118,16 @@ void HistogramBase::WriteJSON(std::string* output) const {
root.SetInteger("flags", flags());
root.Set("params", std::move(parameters));
root.Set("buckets", std::move(buckets));
- root.SetInteger("pid", GetCurrentProcId());
+ root.SetInteger("pid", GetUniqueIdForProcess());
serializer.Serialize(root);
}
// static
void HistogramBase::EnableActivityReportHistogram(
const std::string& process_type) {
- DCHECK(!report_histogram_);
+ if (report_histogram_)
+ return;
+
size_t existing = StatisticsRecorder::GetHistogramCount();
if (existing != 0) {
DVLOG(1) << existing
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 9e3caec3a34..78473761dd4 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -256,6 +256,39 @@
INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)
//------------------------------------------------------------------------------
+// Histogram instantiation helpers.
+
+// Support a collection of histograms, perhaps one for each entry in an
+// enumeration. This macro manages a block of pointers, adding to a specific
+// one by its index.
+//
+// A typical instantiation looks something like this:
+// STATIC_HISTOGRAM_POINTER_GROUP(
+// GetHistogramNameForIndex(histogram_index),
+// histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
+// base::Histogram::FactoryGet(
+// GetHistogramNameForIndex(histogram_index),
+// MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
+// base::HistogramBase::kUmaTargetedHistogramFlag));
+//
+// Though it seems inefficient to generate the name twice, the first
+// instance will be used only for DCHECK builds and the second will
+// execute only during the first access to the given index, after which
+// the pointer is cached and the name never needed again.
+#define STATIC_HISTOGRAM_POINTER_GROUP(constant_histogram_name, index, \
+ constant_maximum, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation) \
+ do { \
+ static base::subtle::AtomicWord atomic_histograms[constant_maximum]; \
+ DCHECK_LE(0, index); \
+ DCHECK_LT(index, constant_maximum); \
+ HISTOGRAM_POINTER_USE(&atomic_histograms[index], constant_histogram_name, \
+ histogram_add_method_invocation, \
+ histogram_factory_get_invocation); \
+ } while (0)
+
+//------------------------------------------------------------------------------
// Deprecated histogram macros. Not recommended for current use.
// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
diff --git a/chromium/base/metrics/histogram_samples.h b/chromium/base/metrics/histogram_samples.h
index e28573fa7e5..93f6d21c8aa 100644
--- a/chromium/base/metrics/histogram_samples.h
+++ b/chromium/base/metrics/histogram_samples.h
@@ -27,6 +27,9 @@ class SampleCountIterator;
class BASE_EXPORT HistogramSamples {
public:
struct Metadata {
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 24;
+
// Initialized when the sample-set is first created with a value provided
// by the caller. It is generally used to identify the sample-set across
// threads and processes, though not necessarily uniquely as it is possible
@@ -55,7 +58,21 @@ class BASE_EXPORT HistogramSamples {
// might mismatch even when no memory corruption has happened.
HistogramBase::AtomicCount redundant_count;
- Metadata() : id(0), sum(0), redundant_count(0) {}
+ // 4 bytes of padding to explicitly extend this structure to a multiple of
+ // 64-bits. This is required to ensure the structure is the same size on
+ // both 32-bit and 64-bit builds.
+ char padding[4];
+ };
+
+ // Because sturctures held in persistent memory must be POD, there can be no
+ // default constructor to clear the fields. This derived class exists just
+ // to clear them when being allocated on the heap.
+ struct LocalMetadata : Metadata {
+ LocalMetadata() {
+ id = 0;
+ sum = 0;
+ redundant_count = 0;
+ }
};
explicit HistogramSamples(uint64_t id);
@@ -102,7 +119,7 @@ class BASE_EXPORT HistogramSamples {
// In order to support histograms shared through an external memory segment,
// meta values may be the local storage or external storage depending on the
// wishes of the derived class.
- Metadata local_meta_;
+ LocalMetadata local_meta_;
Metadata* meta_;
DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
diff --git a/chromium/base/metrics/histogram_snapshot_manager.cc b/chromium/base/metrics/histogram_snapshot_manager.cc
index f28009c436f..a774ea6177c 100644
--- a/chromium/base/metrics/histogram_snapshot_manager.cc
+++ b/chromium/base/metrics/histogram_snapshot_manager.cc
@@ -62,7 +62,7 @@ void HistogramSnapshotManager::PrepareSamples(
CHECK(false); // Crash for the bucket order corruption.
// Ensure that compiler keeps around pointers to |histogram| and its
// internal |bucket_ranges_| for any minidumps.
- base::debug::Alias(ranges_ptr);
+ base::debug::Alias(&ranges_ptr);
base::debug::Alias(&ranges_checksum);
base::debug::Alias(&ranges_calc_checksum);
base::debug::Alias(&histogram_name);
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index dab2564b10e..67af4a02240 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -6,6 +6,7 @@
#include <memory>
+#include "base/atomicops.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/important_file_writer.h"
@@ -35,11 +36,8 @@ const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
// so that, if the structure of that object changes, stored older versions
// will be safely ignored.
enum : uint32_t {
- kTypeIdHistogram = 0xF1645910 + 2, // SHA1(Histogram) v2
kTypeIdRangesArray = 0xBCEA225A + 1, // SHA1(RangesArray) v1
kTypeIdCountsArray = 0x53215530 + 1, // SHA1(CountsArray) v1
-
- kTypeIdHistogramUnderConstruction = ~kTypeIdHistogram,
};
// The current globally-active persistent allocator for all new histograms.
@@ -47,8 +45,10 @@ enum : uint32_t {
// but that's best since PersistentMemoryAllocator objects (that underlie
// GlobalHistogramAllocator objects) are explicitly forbidden from doing
// anything essential at exit anyway due to the fact that they depend on data
-// managed elsewhere and which could be destructed first.
-GlobalHistogramAllocator* g_allocator = nullptr;
+// managed elsewhere and which could be destructed first. An AtomicWord is
+// used instead of std::atomic because the latter can create global ctors
+// and dtors.
+subtle::AtomicWord g_allocator = 0;
// Take an array of range boundaries and create a proper BucketRanges object
// which is returned to the caller. A return of nullptr indicates that the
@@ -226,6 +226,13 @@ PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
// This data will be held in persistent memory in order for processes to
// locate and use histograms created elsewhere.
struct PersistentHistogramAllocator::PersistentHistogramData {
+ // SHA1(Histogram): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize =
+ 40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
+
int32_t histogram_type;
int32_t flags;
int32_t minimum;
@@ -240,7 +247,7 @@ struct PersistentHistogramAllocator::PersistentHistogramData {
// Space for the histogram name will be added during the actual allocation
// request. This must be the last field of the structure. A zero-size array
// or a "flexible" array would be preferred but is not (yet) valid C++.
- char name[1];
+ char name[sizeof(uint64_t)]; // Force 64-bit alignment on 32-bit builds.
};
PersistentHistogramAllocator::Iterator::Iterator(
@@ -250,7 +257,7 @@ PersistentHistogramAllocator::Iterator::Iterator(
std::unique_ptr<HistogramBase>
PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
PersistentMemoryAllocator::Reference ref;
- while ((ref = memory_iter_.GetNextOfType(kTypeIdHistogram)) != 0) {
+ while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
if (ref != ignore)
return allocator_->GetHistogram(ref);
}
@@ -273,8 +280,7 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
// add it to the local list of known histograms (while these may be simple
// references to histograms in other processes).
PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(
- ref, kTypeIdHistogram);
+ memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
size_t length = memory_allocator_->GetAllocSize(ref);
// Check that metadata is reasonable: name is NUL terminated and non-empty,
@@ -315,13 +321,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
// during the datafill doesn't leave a bad record around that could cause
// confusion by another process trying to read it. It will be corrected
// once histogram construction is complete.
- PersistentMemoryAllocator::Reference histogram_ref =
- memory_allocator_->Allocate(
- offsetof(PersistentHistogramData, name) + name.length() + 1,
- kTypeIdHistogramUnderConstruction);
PersistentHistogramData* histogram_data =
- memory_allocator_->GetAsObject<PersistentHistogramData>(
- histogram_ref, kTypeIdHistogramUnderConstruction);
+ memory_allocator_->AllocateObject<PersistentHistogramData>(
+ offsetof(PersistentHistogramData, name) + name.length() + 1);
if (histogram_data) {
memcpy(histogram_data->name, name.c_str(), name.size() + 1);
histogram_data->histogram_type = histogram_type;
@@ -338,14 +340,15 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
return nullptr;
}
- size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+ size_t ranges_count = bucket_count + 1;
+ size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
PersistentMemoryAllocator::Reference counts_ref =
memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
PersistentMemoryAllocator::Reference ranges_ref =
memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(
- ranges_ref, kTypeIdRangesArray);
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
+ ranges_ref, kTypeIdRangesArray, ranges_count);
// Only continue here if all allocations were successful. If they weren't,
// there is no way to free the space but that's not really a problem since
@@ -379,9 +382,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
DCHECK(histogram);
DCHECK_NE(0U, histogram_data->samples_metadata.id);
DCHECK_NE(0U, histogram_data->logged_metadata.id);
- memory_allocator_->ChangeType(histogram_ref, kTypeIdHistogram,
- kTypeIdHistogramUnderConstruction);
+ PersistentMemoryAllocator::Reference histogram_ref =
+ memory_allocator_->GetAsReference(histogram_data);
if (ref_ptr != nullptr)
*ref_ptr = histogram_ref;
@@ -403,22 +406,30 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
}
RecordCreateHistogramResult(result);
- NOTREACHED() << "error=" << result;
+
+ // Crash for failures caused by internal bugs but not "full" which is
+ // dependent on outside code.
+ if (result != CREATE_HISTOGRAM_ALLOCATOR_FULL)
+ NOTREACHED() << memory_allocator_->Name() << ", error=" << result;
return nullptr;
}
void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
bool registered) {
- // If the created persistent histogram was registered then it needs to
- // be marked as "iterable" in order to be found by other processes.
- if (registered)
+ if (registered) {
+ // If the created persistent histogram was registered then it needs to
+ // be marked as "iterable" in order to be found by other processes. This
+ // happens only after the histogram is fully formed so it's impossible for
+ // code iterating through the allocator to read a partially created record.
memory_allocator_->MakeIterable(ref);
- // If it wasn't registered then a race condition must have caused
- // two to be created. The allocator does not support releasing the
- // acquired memory so just change the type to be empty.
- else
- memory_allocator_->ChangeType(ref, 0, kTypeIdHistogram);
+ } else {
+ // If it wasn't registered then a race condition must have caused two to
+ // be created. The allocator does not support releasing the acquired memory
+ // so just change the type to be empty.
+ memory_allocator_->ChangeType(ref, 0,
+ PersistentHistogramData::kPersistentTypeId);
+ }
}
void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
@@ -494,7 +505,7 @@ PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
static bool initialized = false;
if (!initialized) {
initialized = true;
- if (g_allocator) {
+ if (GlobalHistogramAllocator::Get()) {
DVLOG(1) << "Creating the results-histogram inside persistent"
<< " memory can cause future allocations to crash if"
<< " that memory is ever released (for testing).";
@@ -539,8 +550,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
PersistentHistogramData histogram_data = *histogram_data_ptr;
HistogramBase::Sample* ranges_data =
- memory_allocator_->GetAsObject<HistogramBase::Sample>(
- histogram_data.ranges_ref, kTypeIdRangesArray);
+ memory_allocator_->GetAsArray<HistogramBase::Sample>(
+ histogram_data.ranges_ref, kTypeIdRangesArray,
+ PersistentMemoryAllocator::kSizeAny);
const uint32_t max_buckets =
std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
@@ -569,8 +581,9 @@ std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
created_ranges.release());
HistogramBase::AtomicCount* counts_data =
- memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
- histogram_data.counts_ref, kTypeIdCountsArray);
+ memory_allocator_->GetAsArray<HistogramBase::AtomicCount>(
+ histogram_data.counts_ref, kTypeIdCountsArray,
+ PersistentMemoryAllocator::kSizeAny);
size_t counts_bytes =
CalculateRequiredCountsBytes(histogram_data.bucket_count);
if (!counts_data || counts_bytes == 0 ||
@@ -640,7 +653,7 @@ PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
const HistogramBase* histogram) {
// This should never be called on the global histogram allocator as objects
// created there are already within the global statistics recorder.
- DCHECK_NE(g_allocator, this);
+ DCHECK_NE(GlobalHistogramAllocator::Get(), this);
DCHECK(histogram);
HistogramBase* existing =
@@ -809,8 +822,9 @@ void GlobalHistogramAllocator::Set(
// Releasing or changing an allocator is extremely dangerous because it
// likely has histograms stored within it. If the backing memory is also
// also released, future accesses to those histograms will seg-fault.
- CHECK(!g_allocator);
- g_allocator = allocator.release();
+ CHECK(!subtle::NoBarrier_Load(&g_allocator));
+ subtle::Release_Store(&g_allocator,
+ reinterpret_cast<uintptr_t>(allocator.release()));
size_t existing = StatisticsRecorder::GetHistogramCount();
DVLOG_IF(1, existing)
@@ -819,13 +833,14 @@ void GlobalHistogramAllocator::Set(
// static
GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
- return g_allocator;
+ return reinterpret_cast<GlobalHistogramAllocator*>(
+ subtle::Acquire_Load(&g_allocator));
}
// static
std::unique_ptr<GlobalHistogramAllocator>
GlobalHistogramAllocator::ReleaseForTesting() {
- GlobalHistogramAllocator* histogram_allocator = g_allocator;
+ GlobalHistogramAllocator* histogram_allocator = Get();
if (!histogram_allocator)
return nullptr;
PersistentMemoryAllocator* memory_allocator =
@@ -835,13 +850,9 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// Recorder forget about the histograms contained therein; otherwise,
// some operations will try to access them and the released memory.
PersistentMemoryAllocator::Iterator iter(memory_allocator);
- PersistentMemoryAllocator::Reference ref;
- while ((ref = iter.GetNextOfType(kTypeIdHistogram)) != 0) {
- PersistentHistogramData* histogram_data =
- memory_allocator->GetAsObject<PersistentHistogramData>(
- ref, kTypeIdHistogram);
- DCHECK(histogram_data);
- StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+ const PersistentHistogramData* data;
+ while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
+ StatisticsRecorder::ForgetHistogramForTesting(data->name);
// If a test breaks here then a memory region containing a histogram
// actively used by this code is being released back to the test.
@@ -850,10 +861,10 @@ GlobalHistogramAllocator::ReleaseForTesting() {
// the method GetCreateHistogramResultHistogram() *before* setting
// the (temporary) memory allocator via SetGlobalAllocator() so that
// histogram is instead allocated from the process heap.
- DCHECK_NE(kResultHistogram, histogram_data->name);
+ DCHECK_NE(kResultHistogram, data->name);
}
- g_allocator = nullptr;
+ subtle::Release_Store(&g_allocator, 0);
return WrapUnique(histogram_allocator);
};
diff --git a/chromium/base/metrics/persistent_histogram_allocator.h b/chromium/base/metrics/persistent_histogram_allocator.h
index 4c36e35c2f4..2eb28dfaf5b 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.h
+++ b/chromium/base/metrics/persistent_histogram_allocator.h
@@ -56,8 +56,8 @@ class BASE_EXPORT PersistentSparseHistogramDataManager {
// Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator.
template <typename T>
- T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
- return allocator_->GetAsObject<T>(ref, type_id);
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+ return allocator_->GetAsObject<T>(ref);
}
private:
@@ -131,8 +131,8 @@ class BASE_EXPORT PersistentSampleMapRecords {
// cleanliness of the interface), a template is defined that will be
// resolved when used inside that file.
template <typename T>
- T* GetAsObject(PersistentMemoryAllocator::Reference ref, uint32_t type_id) {
- return data_manager_->GetAsObject<T>(ref, type_id);
+ T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
+ return data_manager_->GetAsObject<T>(ref);
}
private:
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index 2b3bf17ede2..ba0ef2a19ef 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -49,6 +49,11 @@ enum : int {
kFlagFull = 1 << 1
};
+// Errors that are logged in "errors" histogram.
+enum AllocatorError : int {
+ kMemoryIsCorrupt = 1,
+};
+
bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
uint32_t loaded_flags = flags->load(std::memory_order_relaxed);
return (loaded_flags & flag) != 0;
@@ -59,8 +64,13 @@ void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
for (;;) {
uint32_t new_flags = (loaded_flags & ~flag) | flag;
// In the failue case, actual "flags" value stored in loaded_flags.
- if (flags->compare_exchange_weak(loaded_flags, new_flags))
+ // These access are "relaxed" because they are completely independent
+ // of all other values.
+ if (flags->compare_exchange_weak(loaded_flags, new_flags,
+ std::memory_order_relaxed,
+ std::memory_order_relaxed)) {
break;
+ }
}
}
@@ -214,7 +224,8 @@ PersistentMemoryAllocator::Iterator::GetNext(uint32_t* type_return) {
// is no need to do another such load when the while-loop restarts. A
// "strong" compare-exchange is used because failing unnecessarily would
// mean repeating some fairly costly validations above.
- if (last_record_.compare_exchange_strong(last, next)) {
+ if (last_record_.compare_exchange_strong(
+ last, next, std::memory_order_acq_rel, std::memory_order_acquire)) {
*type_return = block->type_id.load(std::memory_order_relaxed);
break;
}
@@ -294,7 +305,16 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
readonly_(readonly),
corrupt_(0),
allocs_histogram_(nullptr),
- used_histogram_(nullptr) {
+ used_histogram_(nullptr),
+ errors_histogram_(nullptr) {
+ // These asserts ensure that the structures are 32/64-bit agnostic and meet
+ // all the requirements of use within the allocator. They access private
+ // definitions and so cannot be moved to the global scope.
+ static_assert(sizeof(PersistentMemoryAllocator::BlockHeader) == 16,
+ "struct is not portable across different natural word widths");
+ static_assert(sizeof(PersistentMemoryAllocator::SharedMetadata) == 56,
+ "struct is not portable across different natural word widths");
+
static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
"BlockHeader is not a multiple of kAllocAlignment");
static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
@@ -360,7 +380,7 @@ PersistentMemoryAllocator::PersistentMemoryAllocator(Memory memory,
if (!name.empty()) {
const size_t name_length = name.length() + 1;
shared_meta()->name = Allocate(name_length, 0);
- char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
+ char* name_cstr = GetAsArray<char>(shared_meta()->name, 0, name_length);
if (name_cstr)
memcpy(name_cstr, name.data(), name.length());
}
@@ -408,7 +428,8 @@ uint64_t PersistentMemoryAllocator::Id() const {
const char* PersistentMemoryAllocator::Name() const {
Reference name_ref = shared_meta()->name;
- const char* name_cstr = GetAsObject<char>(name_ref, 0);
+ const char* name_cstr =
+ GetAsArray<char>(name_ref, 0, PersistentMemoryAllocator::kSizeAny);
if (!name_cstr)
return "";
@@ -426,16 +447,21 @@ void PersistentMemoryAllocator::CreateTrackingHistograms(
base::StringPiece name) {
if (name.empty() || readonly_)
return;
-
std::string name_string = name.as_string();
+
+ DCHECK(!allocs_histogram_);
+ allocs_histogram_ = Histogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+
DCHECK(!used_histogram_);
used_histogram_ = LinearHistogram::FactoryGet(
"UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
HistogramBase::kUmaTargetedHistogramFlag);
- DCHECK(!allocs_histogram_);
- allocs_histogram_ = Histogram::FactoryGet(
- "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+ DCHECK(!errors_histogram_);
+ errors_histogram_ = SparseHistogram::FactoryGet(
+ "UMA.PersistentAllocator." + name_string + ".Errors",
HistogramBase::kUmaTargetedHistogramFlag);
}
@@ -444,6 +470,24 @@ size_t PersistentMemoryAllocator::used() const {
mem_size_);
}
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetAsReference(
+ const void* memory,
+ uint32_t type_id) const {
+ uintptr_t address = reinterpret_cast<uintptr_t>(memory);
+ if (address < reinterpret_cast<uintptr_t>(mem_base_))
+ return kReferenceNull;
+
+ uintptr_t offset = address - reinterpret_cast<uintptr_t>(mem_base_);
+ if (offset >= mem_size_ || offset < sizeof(BlockHeader))
+ return kReferenceNull;
+
+ Reference ref = static_cast<Reference>(offset) - sizeof(BlockHeader);
+ if (!GetBlockData(ref, type_id, kSizeAny))
+ return kReferenceNull;
+
+ return ref;
+}
+
size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
if (!block)
@@ -474,8 +518,12 @@ bool PersistentMemoryAllocator::ChangeType(Reference ref,
return false;
// This is a "strong" exchange because there is no loop that can retry in
- // the wake of spurious failures possible with "weak" exchanges.
- return block->type_id.compare_exchange_strong(from_type_id, to_type_id);
+ // the wake of spurious failures possible with "weak" exchanges. Make this
+ // an "acquire-release" so no memory accesses can be reordered either before
+ // or after since changes based on type could happen on either side.
+ return block->type_id.compare_exchange_strong(from_type_id, to_type_id,
+ std::memory_order_acq_rel,
+ std::memory_order_acquire);
}
PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
@@ -554,8 +602,9 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
return kReferenceNull;
}
const uint32_t new_freeptr = freeptr + page_free;
- if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
- new_freeptr)) {
+ if (shared_meta()->freeptr.compare_exchange_strong(
+ freeptr, new_freeptr, std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
block->size = page_free;
block->cookie = kBlockCookieWasted;
}
@@ -578,8 +627,11 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
// while we were processing. A "weak" exchange would be permissable here
// because the code will just loop and try again but the above processing
// is significant so make the extra effort of a "strong" exchange.
- if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
+ if (!shared_meta()->freeptr.compare_exchange_strong(
+ freeptr, new_freeptr, std::memory_order_acq_rel,
+ std::memory_order_acquire)) {
continue;
+ }
// Given that all memory was zeroed before ever being given to an instance
// of this class and given that we only allocate in a monotomic fashion
@@ -595,6 +647,10 @@ PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
return kReferenceNull;
}
+ // Load information into the block header. There is no "release" of the
+ // data here because this memory can, currently, be seen only by the thread
+ // performing the allocation. When it comes time to share this, the thread
+ // will call MakeIterable() which does the release operation.
block->size = size;
block->cookie = kBlockCookieAllocated;
block->type_id.store(type_id, std::memory_order_relaxed);
@@ -607,7 +663,7 @@ void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
mem_size_ - shared_meta()->freeptr.load(std::memory_order_relaxed),
(uint32_t)sizeof(BlockHeader));
meminfo->total = mem_size_;
- meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
+ meminfo->free = remaining - sizeof(BlockHeader);
}
void PersistentMemoryAllocator::MakeIterable(Reference ref) {
@@ -675,9 +731,15 @@ void PersistentMemoryAllocator::MakeIterable(Reference ref) {
// case, it's safe to discard the constness and modify the local flag and
// maybe even the shared flag if the underlying data isn't actually read-only.
void PersistentMemoryAllocator::SetCorrupt() const {
- LOG(ERROR) << "Corruption detected in shared-memory segment.";
- const_cast<std::atomic<bool>*>(&corrupt_)->store(true,
- std::memory_order_relaxed);
+ if (!corrupt_.load(std::memory_order_relaxed) &&
+ !CheckFlag(
+ const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+ kFlagCorrupt)) {
+ LOG(ERROR) << "Corruption detected in shared-memory segment.";
+ RecordError(kMemoryIsCorrupt);
+ }
+
+ corrupt_.store(true, std::memory_order_relaxed);
if (!readonly_) {
SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
kFlagCorrupt);
@@ -707,10 +769,10 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const {
// Validation of parameters.
- if (ref % kAllocAlignment != 0)
- return nullptr;
if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
return nullptr;
+ if (ref % kAllocAlignment != 0)
+ return nullptr;
size += sizeof(BlockHeader);
if (ref + size > mem_size_)
return nullptr;
@@ -739,6 +801,11 @@ PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
}
+void PersistentMemoryAllocator::RecordError(int error) const {
+ if (errors_histogram_)
+ errors_histogram_->Add(error);
+}
+
const volatile void* PersistentMemoryAllocator::GetBlockData(
Reference ref,
uint32_t type_id,
diff --git a/chromium/base/metrics/persistent_memory_allocator.h b/chromium/base/metrics/persistent_memory_allocator.h
index ae5d2d7caf7..9fdef0192a6 100644
--- a/chromium/base/metrics/persistent_memory_allocator.h
+++ b/chromium/base/metrics/persistent_memory_allocator.h
@@ -9,6 +9,7 @@
#include <atomic>
#include <memory>
+#include <type_traits>
#include "base/atomicops.h"
#include "base/base_export.h"
@@ -47,6 +48,51 @@ class SharedMemory;
// Note that memory not in active use is not accessed so it is possible to
// use virtual memory, including memory-mapped files, as backing storage with
// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+//
+// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
+// character arrays and manipulating that memory manually, the better way is
+// generally to use the "Object" methods to create and manage allocations. In
+// this way the sizing, type-checking, and construction are all automatic. For
+// this to work, however, every type of stored object must define two public
+// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
+//
+// struct MyPersistentObjectType {
+// // SHA1(MyPersistentObjectType): Increment this if structure changes!
+// static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
+//
+// // Expected size for 32/64-bit check. Update this if structure changes!
+// static constexpr size_t kExpectedInstanceSize = 20;
+//
+// ...
+// };
+//
+// kPersistentTypeId: This value is an arbitrary identifier that allows the
+// identification of these objects in the allocator, including the ability
+// to find them via iteration. The number is arbitrary but using the first
+// four bytes of the SHA1 hash of the type name means that there shouldn't
+// be any conflicts with other types that may also be stored in the memory.
+// The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
+// be used to generate the hash if the type name seems common. Use a command
+// like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
+// If the structure layout changes, ALWAYS increment this number so that
+// newer versions of the code don't try to interpret persistent data written
+// by older versions with a different layout.
+//
+// kExpectedInstanceSize: This value is the hard-coded number that matches
+// what sizeof(T) would return. By providing it explicitly, the allocator can
+// verify that the structure is compatible between both 32-bit and 64-bit
+// versions of the code.
+//
+// Using AllocateObject (and ChangeObject) will zero the memory and then call
+// the default constructor for the object. Given that objects are persistent,
+// no destructor is ever called automatically though a caller can explicitly
+// call DeleteObject to destruct it and change the type to something indicating
+// it is no longer in use.
+//
+// Though persistent memory segments are transferrable between programs built
+// for different natural word widths, they CANNOT be exchanged between CPUs
+// of different endianess. Attempts to do so will simply see the existing data
+// as corrupt and refuse to access any of it.
class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
@@ -104,6 +150,18 @@ class BASE_EXPORT PersistentMemoryAllocator {
// calls to GetNext() meaning it's possible to completely miss entries.
Reference GetNextOfType(uint32_t type_match);
+ // As above but works using object type.
+ template <typename T>
+ Reference GetNextOfType() {
+ return GetNextOfType(T::kPersistentTypeId);
+ }
+
+ // As above but works using objects and returns null if not found.
+ template <typename T>
+ const T* GetNextOfObject() {
+ return GetAsObject<T>(GetNextOfType<T>());
+ }
+
// Converts references to objects. This is a convenience method so that
// users of the iterator don't need to also have their own pointer to the
// allocator over which the iterator runs in order to retrieve objects.
@@ -112,8 +170,27 @@ class BASE_EXPORT PersistentMemoryAllocator {
// non-const (external) pointer to the same allocator (or use const_cast
// to remove the qualifier).
template <typename T>
- const T* GetAsObject(Reference ref, uint32_t type_id) const {
- return allocator_->GetAsObject<T>(ref, type_id);
+ const T* GetAsObject(Reference ref) const {
+ return allocator_->GetAsObject<T>(ref);
+ }
+
+ // Similar to GetAsObject() but converts references to arrays of things.
+ template <typename T>
+ const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+ return allocator_->GetAsArray<T>(ref, type_id, count);
+ }
+
+ // Convert a generic pointer back into a reference. A null reference will
+ // be returned if |memory| is not inside the persistent segment or does not
+ // point to an object of the specified |type_id|.
+ Reference GetAsReference(const void* memory, uint32_t type_id) const {
+ return allocator_->GetAsReference(memory, type_id);
+ }
+
+ // As above but convert an object back into a reference.
+ template <typename T>
+ Reference GetAsReference(const T* obj) const {
+ return allocator_->GetAsReference(obj);
}
private:
@@ -136,11 +213,16 @@ class BASE_EXPORT PersistentMemoryAllocator {
};
enum : Reference {
- kReferenceNull = 0 // A common "null" reference value.
+ // A common "null" reference value.
+ kReferenceNull = 0,
+
+ // A value indicating that the type is in transition. Work is being done
+ // on the contents to prepare it for a new type to come.
+ kReferenceTransitioning = 0xFFFFFFFF,
};
- enum : uint32_t {
- kTypeIdAny = 0 // Match any type-id inside GetAsObject().
+ enum : size_t {
+ kSizeAny = 1 // Constant indicating that any array size is acceptable.
};
// This is the standard file extension (suitable for being passed to the
@@ -204,6 +286,7 @@ class BASE_EXPORT PersistentMemoryAllocator {
// IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
// with the following histograms:
// UMA.PersistentAllocator.name.Allocs
+ // UMA.PersistentAllocator.name.Errors
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
@@ -224,18 +307,26 @@ class BASE_EXPORT PersistentMemoryAllocator {
// TIME before accessing it or risk crashing! Once dereferenced, the pointer
// is safe to reuse forever.
//
- // IMPORTANT: If there is any possibility that this allocator will be shared
- // across different CPU architectures (perhaps because it is being persisted
- // to disk), then it is essential that the object be of a fixed size. All
- // fields must be of a defined type that does not change across CPU architec-
- // tures or natural word sizes (i.e. 32/64 bit). Acceptable are char and
- // (u)intXX_t. Unacceptable are int, bool, or wchar_t which are implemen-
- // tation defined with regards to their size.
+ // It is essential that the object be of a fixed size. All fields must be of
+ // a defined type that does not change based on the compiler or the CPU
+ // natural word size. Acceptable are char, float, double, and (u)intXX_t.
+ // Unacceptable are int, bool, and wchar_t which are implementation defined
+ // with regards to their size.
//
- // ALSO: Alignment must be consistent. A uint64_t after a uint32_t will pad
+ // Alignment must also be consistent. A uint64_t after a uint32_t will pad
// differently between 32 and 64 bit architectures. Either put the bigger
// elements first, group smaller elements into blocks the size of larger
- // elements, or manually insert padding fields as appropriate.
+ // elements, or manually insert padding fields as appropriate for the
+ // largest architecture, including at the end.
+ //
+ // To protected against mistakes, all objects must have the attribute
+ // |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
+ // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
+ // instance size is not fixed, at least one build will fail.
+ //
+ // If the size of a structure changes, the type-ID used to recognize it
+ // should also change so later versions of the code don't try to read
+ // incompatible structures from earlier versions.
//
// NOTE: Though this method will guarantee that an object of the specified
// type can be accessed without going outside the bounds of the memory
@@ -249,17 +340,55 @@ class BASE_EXPORT PersistentMemoryAllocator {
// nature of that keyword to the caller. It can add it back, if necessary,
// based on knowledge of how the allocator is being used.
template <typename T>
- T* GetAsObject(Reference ref, uint32_t type_id) {
- static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
- return const_cast<T*>(
- reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
+ T* GetAsObject(Reference ref) {
+ static_assert(std::is_standard_layout<T>::value, "only standard objects");
+ static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
+ GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
+ }
+ template <typename T>
+ const T* GetAsObject(Reference ref) const {
+ static_assert(std::is_standard_layout<T>::value, "only standard objects");
+ static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
+ static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
+ return const_cast<const T*>(reinterpret_cast<const volatile T*>(
+ GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
}
+
+ // Like GetAsObject but get an array of simple, fixed-size types.
+ //
+ // Use a |count| of the required number of array elements, or kSizeAny.
+ // GetAllocSize() can be used to calculate the upper bound but isn't reliable
+ // because padding can make space for extra elements that were not written.
+ //
+ // Remember that an array of char is a string but may not be NUL terminated.
+ //
+ // There are no compile-time or run-time checks to ensure 32/64-bit size
+ // compatibilty when using these accessors. Only use fixed-size types such
+ // as char, float, double, or (u)intXX_t.
+ template <typename T>
+ T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
+ static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ return const_cast<T*>(reinterpret_cast<volatile T*>(
+ GetBlockData(ref, type_id, count * sizeof(T))));
+ }
+ template <typename T>
+ const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
+ static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
+ return const_cast<const char*>(reinterpret_cast<const volatile T*>(
+ GetBlockData(ref, type_id, count * sizeof(T))));
+ }
+
+ // Get the corresponding reference for an object held in persistent memory.
+ // If the |memory| is not valid or the type does not match, a kReferenceNull
+ // result will be returned.
+ Reference GetAsReference(const void* memory, uint32_t type_id) const;
+
+ // As above but works with objects allocated from persistent memory.
template <typename T>
- const T* GetAsObject(Reference ref, uint32_t type_id) const {
- static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
- return const_cast<const T*>(
- reinterpret_cast<const volatile T*>(GetBlockData(
- ref, type_id, sizeof(T))));
+ Reference GetAsReference(const T* obj) const {
+ return GetAsReference(obj, T::kPersistentTypeId);
}
// Get the number of bytes allocated to a block. This is useful when storing
@@ -273,15 +402,105 @@ class BASE_EXPORT PersistentMemoryAllocator {
// even though the memory stays valid and allocated. Changing the type is
// an atomic compare/exchange and so requires knowing the existing value.
// It will return false if the existing type is not what is expected.
+ // Changing the type doesn't mean the data is compatible with the new type.
+ // It will likely be necessary to clear or reconstruct the type before it
+ // can be used. Changing the type WILL NOT invalidate existing pointers to
+ // the data, either in this process or others, so changing the data structure
+ // could have unpredicatable results. USE WITH CARE!
uint32_t GetType(Reference ref) const;
bool ChangeType(Reference ref, uint32_t to_type_id, uint32_t from_type_id);
+ // Like ChangeType() but gets the "to" type from the object type, clears
+ // the memory, and constructs a new object of the desired type just as
+ // though it was fresh from AllocateObject<>(). The old type simply ceases
+ // to exist; no destructor is called for it. Calling this will not invalidate
+ // existing pointers to the object, either in this process or others, so
+ // changing the object could have unpredictable results. USE WITH CARE!
+ template <typename T>
+ T* ChangeObject(Reference ref, uint32_t from_type_id) {
+ DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
+ // Make sure the memory is appropriate. This won't be used until after
+ // the type is changed but checking first avoids the possibility of having
+ // to change the type back.
+ void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
+ if (!mem)
+ return nullptr;
+ // Ensure the allocator's internal alignment is sufficient for this object.
+ // This protects against coding errors in the allocator.
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
+ // First change the type to "transitioning" so that there is no race
+ // condition with the clearing and construction of the object should
+ // another thread be simultaneously iterating over data. This will
+ // "acquire" the memory so no changes get reordered before it.
+ if (!ChangeType(ref, kReferenceTransitioning, from_type_id))
+ return nullptr;
+ // Clear the memory so that the property of all memory being zero after an
+ // allocation also applies here.
+ memset(mem, 0, GetAllocSize(ref));
+ // Construct an object of the desired type on this memory, just as if
+ // AllocateObject had been called to create it.
+ T* obj = new (mem) T();
+ // Finally change the type to the desired one. This will "release" all of
+ // the changes above and so provide a consistent view to other threads.
+ bool success =
+ ChangeType(ref, T::kPersistentTypeId, kReferenceTransitioning);
+ DCHECK(success);
+ return obj;
+ }
+
// Reserve space in the memory segment of the desired |size| and |type_id|.
// A return value of zero indicates the allocation failed, otherwise the
// returned reference can be used by any process to get a real pointer via
// the GetAsObject() call.
Reference Allocate(size_t size, uint32_t type_id);
+ // Allocate and construct an object in persistent memory. The type must have
+ // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
+ // static constexpr fields that are used to ensure compatibility between
+ // software versions. An optional size parameter can be specified to force
+ // the allocation to be bigger than the size of the object; this is useful
+ // when the last field is actually variable length.
+ template <typename T>
+ T* AllocateObject(size_t size) {
+ if (size < sizeof(T))
+ size = sizeof(T);
+ Reference ref = Allocate(size, T::kPersistentTypeId);
+ void* mem =
+ const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
+ if (!mem)
+ return nullptr;
+ DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
+ return new (mem) T();
+ }
+ template <typename T>
+ T* AllocateObject() {
+ return AllocateObject<T>(sizeof(T));
+ }
+
+ // Deletes an object by destructing it and then changing the type to a
+ // different value (default 0).
+ template <typename T>
+ void DeleteObject(T* obj, uint32_t new_type) {
+ // Get the reference for the object.
+ Reference ref = GetAsReference<T>(obj);
+ // First change the type to "transitioning" so there is no race condition
+ // where another thread could find the object through iteration while it
+ // is been destructed. This will "acquire" the memory so no changes get
+ // reordered before it. It will fail if |ref| is invalid.
+ if (!ChangeType(ref, kReferenceTransitioning, T::kPersistentTypeId))
+ return;
+ // Destruct the object.
+ obj->~T();
+ // Finally change the type to the desired value. This will "release" all
+ // the changes above.
+ bool success = ChangeType(ref, new_type, kReferenceTransitioning);
+ DCHECK(success);
+ }
+ template <typename T>
+ void DeleteObject(T* obj) {
+ DeleteObject<T>(obj, 0);
+ }
+
// Allocated objects can be added to an internal list that can then be
// iterated over by other processes. If an allocated object can be found
// another way, such as by having its reference within a different object
@@ -289,8 +508,15 @@ class BASE_EXPORT PersistentMemoryAllocator {
// succeeds unless corruption is detected; check IsCorrupted() to find out.
// Once an object is made iterable, its position in iteration can never
// change; new iterable objects will always be added after it in the series.
+ // Changing the type does not alter its "iterable" status.
void MakeIterable(Reference ref);
+ // As above but works with an object allocated from persistent memory.
+ template <typename T>
+ void MakeIterable(const T* obj) {
+ MakeIterable(GetAsReference<T>(obj));
+ }
+
// Get the information about the amount of free space in the allocator. The
// amount of free space should be treated as approximate due to extras from
// alignment and metadata. Concurrent allocations from other threads will
@@ -384,11 +610,15 @@ class BASE_EXPORT PersistentMemoryAllocator {
ref, type_id, size));
}
- const bool readonly_; // Indicates access to read-only memory.
- std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
+ // Record an error in the internal histogram.
+ void RecordError(int error) const;
+
+ const bool readonly_; // Indicates access to read-only memory.
+ mutable std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
HistogramBase* allocs_histogram_; // Histogram recording allocs.
HistogramBase* used_histogram_; // Histogram recording used space.
+ HistogramBase* errors_histogram_; // Histogram recording errors.
friend class PersistentMemoryAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
diff --git a/chromium/base/metrics/persistent_memory_allocator_unittest.cc b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
index 977d85fd430..57e8e310749 100644
--- a/chromium/base/metrics/persistent_memory_allocator_unittest.cc
+++ b/chromium/base/metrics/persistent_memory_allocator_unittest.cc
@@ -40,16 +40,20 @@ class PersistentMemoryAllocatorTest : public testing::Test {
uint32_t kAllocAlignment;
struct TestObject1 {
- int onething;
+ static constexpr uint32_t kPersistentTypeId = 1;
+ static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
+ int32_t onething;
char oranother;
};
struct TestObject2 {
- int thiis;
- long that;
+ static constexpr uint32_t kPersistentTypeId = 2;
+ static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
+ int64_t thiis;
+ int32_t that;
float andthe;
- char other;
- double thing;
+ double other;
+ char thing[8];
};
PersistentMemoryAllocatorTest() {
@@ -107,10 +111,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Validate allocation of test object and make sure it can be referenced
// and all metadata looks correct.
- Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
- EXPECT_NE(0U, block1);
- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
- EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
+ TestObject1* obj1 = allocator_->AllocateObject<TestObject1>();
+ ASSERT_TRUE(obj1);
+ Reference block1 = allocator_->GetAsReference(obj1);
+ ASSERT_NE(0U, block1);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
allocator_->GetAllocSize(block1));
@@ -119,6 +125,18 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(meminfo0.total, meminfo1.total);
EXPECT_GT(meminfo0.free, meminfo1.free);
+ // Verify that pointers can be turned back into references and that invalid
+ // addresses return null.
+ char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
+ ASSERT_TRUE(memory1);
+ EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
+ EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
+ EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
+
// Ensure that the test-object can be made iterable.
PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
EXPECT_EQ(0U, iter1a.GetLast());
@@ -133,10 +151,12 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
// Create second test-object and ensure everything is good and it cannot
// be confused with test-object of another type.
- Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
- EXPECT_NE(0U, block2);
- EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
- EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
+ TestObject2* obj2 = allocator_->AllocateObject<TestObject2>();
+ ASSERT_TRUE(obj2);
+ Reference block2 = allocator_->GetAsReference(obj2);
+ ASSERT_NE(0U, block2);
+ EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
+ EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
allocator_->GetAllocSize(block2));
@@ -146,7 +166,7 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_GT(meminfo1.free, meminfo2.free);
// Ensure that second test-object can also be made iterable.
- allocator_->MakeIterable(block2);
+ allocator_->MakeIterable(obj2);
EXPECT_EQ(block2, iter1a.GetNext(&type));
EXPECT_EQ(2U, type);
EXPECT_EQ(block2, iter1a.GetLast());
@@ -200,11 +220,11 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(1, allocs_samples->GetCount(0));
#endif
- // Check that an objcet's type can be changed.
+ // Check that an object's type can be changed.
EXPECT_EQ(2U, allocator_->GetType(block2));
allocator_->ChangeType(block2, 3, 2);
EXPECT_EQ(3U, allocator_->GetType(block2));
- allocator_->ChangeType(block2, 2, 3);
+ allocator_->ChangeObject<TestObject2>(block2, 3);
EXPECT_EQ(2U, allocator_->GetType(block2));
// Create second allocator (read/write) using the same memory segment.
@@ -221,8 +241,8 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter2.GetNext(&type));
EXPECT_EQ(block2, iter2.GetNext(&type));
EXPECT_EQ(0U, iter2.GetNext(&type));
- EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
- EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
+ EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
// Create a third allocator (read-only) using the same memory segment.
std::unique_ptr<const PersistentMemoryAllocator> allocator3(
@@ -237,13 +257,23 @@ TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
EXPECT_EQ(block1, iter3.GetNext(&type));
EXPECT_EQ(block2, iter3.GetNext(&type));
EXPECT_EQ(0U, iter3.GetNext(&type));
- EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
- EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
+ EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
// Ensure that GetNextOfType works.
PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
- EXPECT_EQ(block2, iter1c.GetNextOfType(2));
+ EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
EXPECT_EQ(0U, iter1c.GetNextOfType(2));
+
+ // Ensure that GetNextOfObject works.
+ PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
+ EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
+ EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
+
+ // Ensure that deleting an object works.
+ allocator_->DeleteObject(obj2);
+ PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
+ EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
}
TEST_F(PersistentMemoryAllocatorTest, PageTest) {
@@ -786,7 +816,8 @@ TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
uint32_t type_id;
Reference ref;
while ((ref = iter.GetNext(&type_id)) != 0) {
- const char* data = allocator.GetAsObject<char>(ref, 0);
+ const char* data = allocator.GetAsArray<char>(
+ ref, 0, PersistentMemoryAllocator::kSizeAny);
uint32_t type = allocator.GetType(ref);
size_t size = allocator.GetAllocSize(ref);
// Ensure compiler can't optimize-out above variables.
diff --git a/chromium/base/metrics/persistent_sample_map.cc b/chromium/base/metrics/persistent_sample_map.cc
index 15f83cdb333..131e47b68a0 100644
--- a/chromium/base/metrics/persistent_sample_map.cc
+++ b/chromium/base/metrics/persistent_sample_map.cc
@@ -6,6 +6,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/stl_util.h"
@@ -16,6 +17,12 @@ typedef HistogramBase::Sample Sample;
namespace {
+enum NegativeSampleReason {
+ PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE,
+ PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED,
+ MAX_NEGATIVE_SAMPLE_REASONS
+};
+
// An iterator for going through a PersistentSampleMap. The logic here is
// identical to that of SampleMapIterator but with different data structures.
// Changes here likely need to be duplicated there.
@@ -82,14 +89,17 @@ void PersistentSampleMapIterator::SkipEmptyBuckets() {
// memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map.
struct SampleRecord {
+ // SHA1(SampleRecord): Increment this if structure changes!
+ static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
+
+ // Expected size for 32/64-bit check.
+ static constexpr size_t kExpectedInstanceSize = 16;
+
uint64_t id; // Unique identifier of owner.
Sample value; // The value for which this record holds a count.
Count count; // The count associated with the above value.
};
-// The type-id used to identify sample records inside an allocator.
-const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1; // SHA1(SampleRecord) v1
-
} // namespace
PersistentSampleMap::PersistentSampleMap(
@@ -141,15 +151,12 @@ PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id) {
- PersistentMemoryAllocator::Reference ref =
- iterator.GetNextOfType(kTypeIdSampleRecord);
- const SampleRecord* record =
- iterator.GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
if (!record)
return 0;
*sample_map_id = record->id;
- return ref;
+ return iterator.GetAsReference(record);
}
// static
@@ -158,11 +165,7 @@ PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
Sample value) {
- PersistentMemoryAllocator::Reference ref =
- allocator->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
- SampleRecord* record =
- allocator->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
-
+ SampleRecord* record = allocator->AllocateObject<SampleRecord>();
if (!record) {
NOTREACHED() << "full=" << allocator->IsFull()
<< ", corrupt=" << allocator->IsCorrupt();
@@ -172,6 +175,8 @@ PersistentSampleMap::CreatePersistentRecord(
record->id = sample_map_id;
record->value = value;
record->count = 0;
+
+ PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
allocator->MakeIterable(ref);
return ref;
}
@@ -183,11 +188,39 @@ bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
Count count;
for (; !iter->Done(); iter->Next()) {
iter->Get(&min, &max, &count);
+ if (count == 0)
+ continue;
if (min + 1 != max)
return false; // SparseHistogram only supports bucket with size 1.
+#if 0 // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
*GetOrCreateSampleCountStorage(min) +=
(op == HistogramSamples::ADD) ? count : -count;
+#else
+ if (op == HistogramSamples::ADD) {
+ *GetOrCreateSampleCountStorage(min) += count;
+ } else {
+ // Subtract is used only for determining deltas when reporting which
+ // means that it's in the "logged" iterator. It should have an active
+ // sample record and thus there is no need to try to create one.
+ NegativeSampleReason reason = MAX_NEGATIVE_SAMPLE_REASONS;
+ Count* bucket = GetSampleCountStorage(min);
+ if (bucket == nullptr) {
+ reason = PERSISTENT_SPARSE_HAVE_LOGGED_BUT_NOT_SAMPLE;
+ } else {
+ if (*bucket < count) {
+ reason = PERSISTENT_SPARSE_SAMPLE_LESS_THAN_LOGGED;
+ *bucket = 0;
+ } else {
+ *bucket -= count;
+ }
+ }
+ if (reason != MAX_NEGATIVE_SAMPLE_REASONS) {
+ UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
+ MAX_NEGATIVE_SAMPLE_REASONS);
+ }
+ }
+#endif
}
return true;
}
@@ -253,8 +286,7 @@ Count* PersistentSampleMap::ImportSamples(Sample until_value,
PersistentMemoryAllocator::Reference ref;
PersistentSampleMapRecords* records = GetRecords();
while ((ref = records->GetNext()) != 0) {
- SampleRecord* record =
- records->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+ SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
if (!record)
continue;
diff --git a/chromium/base/metrics/persistent_sample_map_unittest.cc b/chromium/base/metrics/persistent_sample_map_unittest.cc
index 8c3ac25dea4..d50ab997b25 100644
--- a/chromium/base/metrics/persistent_sample_map_unittest.cc
+++ b/chromium/base/metrics/persistent_sample_map_unittest.cc
@@ -31,7 +31,7 @@ std::unique_ptr<PersistentHistogramAllocator> DuplicateHistogramAllocator(
TEST(PersistentSampleMapTest, AccumulateTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(1, 100);
@@ -48,7 +48,7 @@ TEST(PersistentSampleMapTest, AccumulateTest) {
TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(250000000, 100);
@@ -65,7 +65,7 @@ TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
TEST(PersistentSampleMapTest, AddSubtractTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta1;
+ HistogramSamples::LocalMetadata meta1;
PersistentSampleMap samples1(1, allocator1.get(), &meta1);
samples1.Accumulate(1, 100);
samples1.Accumulate(2, 100);
@@ -73,7 +73,7 @@ TEST(PersistentSampleMapTest, AddSubtractTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator2 =
DuplicateHistogramAllocator(allocator1.get());
- HistogramSamples::Metadata meta2;
+ HistogramSamples::LocalMetadata meta2;
PersistentSampleMap samples2(2, allocator2.get(), &meta2);
samples2.Accumulate(1, 200);
samples2.Accumulate(2, 200);
@@ -101,7 +101,7 @@ TEST(PersistentSampleMapTest, AddSubtractTest) {
TEST(PersistentSampleMapTest, PersistenceTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta12;
+ HistogramSamples::LocalMetadata meta12;
PersistentSampleMap samples1(12, allocator1.get(), &meta12);
samples1.Accumulate(1, 100);
samples1.Accumulate(2, 200);
@@ -154,7 +154,7 @@ TEST(PersistentSampleMapTest, PersistenceTest) {
TEST(PersistentSampleMapIteratorTest, IterateTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
samples.Accumulate(1, 100);
samples.Accumulate(2, 200);
@@ -192,7 +192,7 @@ TEST(PersistentSampleMapIteratorTest, IterateTest) {
TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
std::unique_ptr<PersistentHistogramAllocator> allocator1 =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta1;
+ HistogramSamples::LocalMetadata meta1;
PersistentSampleMap samples1(1, allocator1.get(), &meta1);
samples1.Accumulate(5, 1);
samples1.Accumulate(10, 2);
@@ -202,7 +202,7 @@ TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
std::unique_ptr<PersistentHistogramAllocator> allocator2 =
DuplicateHistogramAllocator(allocator1.get());
- HistogramSamples::Metadata meta2;
+ HistogramSamples::LocalMetadata meta2;
PersistentSampleMap samples2(2, allocator2.get(), &meta2);
samples2.Accumulate(5, 1);
samples2.Accumulate(20, 4);
@@ -237,7 +237,7 @@ TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
std::unique_ptr<PersistentHistogramAllocator> allocator =
CreateHistogramAllocator(64 << 10); // 64 KiB
- HistogramSamples::Metadata meta;
+ HistogramSamples::LocalMetadata meta;
PersistentSampleMap samples(1, allocator.get(), &meta);
std::unique_ptr<SampleCountIterator> it = samples.Iterator();
diff --git a/chromium/base/metrics/sample_vector_unittest.cc b/chromium/base/metrics/sample_vector_unittest.cc
index 0abacad84be..2d77d2376b8 100644
--- a/chromium/base/metrics/sample_vector_unittest.cc
+++ b/chromium/base/metrics/sample_vector_unittest.cc
@@ -116,7 +116,6 @@ TEST(SampleVectorTest, AddSubtractTest) {
EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
}
-#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
TEST(SampleVectorDeathTest, BucketIndexTest) {
// 8 buckets with exponential layout:
// [0, 1) [1, 2) [2, 4) [4, 8) [8, 16) [16, 32) [32, 64) [64, INT_MAX)
@@ -133,9 +132,9 @@ TEST(SampleVectorDeathTest, BucketIndexTest) {
EXPECT_EQ(3, samples.GetCount(65));
// Extreme case.
- EXPECT_DEATH(samples.Accumulate(INT_MIN, 100), "");
- EXPECT_DEATH(samples.Accumulate(-1, 100), "");
- EXPECT_DEATH(samples.Accumulate(INT_MAX, 100), "");
+ EXPECT_DCHECK_DEATH(samples.Accumulate(INT_MIN, 100));
+ EXPECT_DCHECK_DEATH(samples.Accumulate(-1, 100));
+ EXPECT_DCHECK_DEATH(samples.Accumulate(INT_MAX, 100));
// Custom buckets: [1, 5) [5, 10)
// Note, this is not a valid BucketRanges for Histogram because it does not
@@ -155,8 +154,8 @@ TEST(SampleVectorDeathTest, BucketIndexTest) {
EXPECT_EQ(4, samples2.GetCount(5));
// Extreme case.
- EXPECT_DEATH(samples2.Accumulate(0, 100), "");
- EXPECT_DEATH(samples2.Accumulate(10, 100), "");
+ EXPECT_DCHECK_DEATH(samples2.Accumulate(0, 100));
+ EXPECT_DCHECK_DEATH(samples2.Accumulate(10, 100));
}
TEST(SampleVectorDeathTest, AddSubtractBucketNotMatchTest) {
@@ -182,25 +181,22 @@ TEST(SampleVectorDeathTest, AddSubtractBucketNotMatchTest) {
// Extra bucket in the beginning.
samples2.Accumulate(0, 100);
- EXPECT_DEATH(samples1.Add(samples2), "");
- EXPECT_DEATH(samples1.Subtract(samples2), "");
+ EXPECT_DCHECK_DEATH(samples1.Add(samples2));
+ EXPECT_DCHECK_DEATH(samples1.Subtract(samples2));
// Extra bucket in the end.
samples2.Accumulate(0, -100);
samples2.Accumulate(6, 100);
- EXPECT_DEATH(samples1.Add(samples2), "");
- EXPECT_DEATH(samples1.Subtract(samples2), "");
+ EXPECT_DCHECK_DEATH(samples1.Add(samples2));
+ EXPECT_DCHECK_DEATH(samples1.Subtract(samples2));
// Bucket not match: [3, 5) VS [3, 6)
samples2.Accumulate(6, -100);
samples2.Accumulate(3, 100);
- EXPECT_DEATH(samples1.Add(samples2), "");
- EXPECT_DEATH(samples1.Subtract(samples2), "");
+ EXPECT_DCHECK_DEATH(samples1.Add(samples2));
+ EXPECT_DCHECK_DEATH(samples1.Subtract(samples2));
}
-#endif
-// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
-
TEST(SampleVectorIteratorTest, IterateTest) {
BucketRanges ranges(5);
ranges.set_range(0, 0);
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index 6b1b0bfdea2..482845045f2 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -16,7 +16,6 @@
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
-#include "base/synchronization/lock.h"
#include "base/values.h"
namespace {
@@ -59,10 +58,10 @@ StatisticsRecorder::HistogramIterator::~HistogramIterator() {}
StatisticsRecorder::HistogramIterator&
StatisticsRecorder::HistogramIterator::operator++() {
const HistogramMap::iterator histograms_end = histograms_->end();
- if (iter_ == histograms_end || lock_ == NULL)
+ if (iter_ == histograms_end)
return *this;
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
for (;;) {
++iter_;
@@ -79,13 +78,12 @@ StatisticsRecorder::HistogramIterator::operator++() {
}
StatisticsRecorder::~StatisticsRecorder() {
- DCHECK(lock_);
DCHECK(histograms_);
DCHECK(ranges_);
// Clean out what this object created and then restore what existed before.
Reset();
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
histograms_ = existing_histograms_.release();
callbacks_ = existing_callbacks_.release();
ranges_ = existing_ranges_.release();
@@ -110,31 +108,26 @@ void StatisticsRecorder::Initialize() {
// static
bool StatisticsRecorder::IsActive() {
- if (lock_ == NULL)
- return false;
- base::AutoLock auto_lock(*lock_);
- return NULL != histograms_;
+ base::AutoLock auto_lock(lock_.Get());
+ return histograms_ != nullptr;
}
// static
HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
HistogramBase* histogram) {
- // As per crbug.com/79322 the histograms are intentionally leaked, so we need
- // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
- // for an object, the duplicates should not be annotated.
- // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
- // twice if (lock_ == NULL) || (!histograms_).
- if (lock_ == NULL) {
- ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
- return histogram;
- }
-
- HistogramBase* histogram_to_delete = NULL;
- HistogramBase* histogram_to_return = NULL;
+ HistogramBase* histogram_to_delete = nullptr;
+ HistogramBase* histogram_to_return = nullptr;
{
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL) {
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_) {
histogram_to_return = histogram;
+
+ // As per crbug.com/79322 the histograms are intentionally leaked, so we
+ // need to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used
+ // only once for an object, the duplicates should not be annotated.
+ // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
+ // twice |if (!histograms_)|.
+ ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
} else {
const std::string& name = histogram->histogram_name();
HistogramMap::iterator it = histograms_->find(name);
@@ -175,13 +168,8 @@ const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
DCHECK(ranges->HasValidChecksum());
std::unique_ptr<const BucketRanges> ranges_deleter;
- if (lock_ == NULL) {
- ANNOTATE_LEAKING_OBJECT_PTR(ranges);
- return ranges;
- }
-
- base::AutoLock auto_lock(*lock_);
- if (ranges_ == NULL) {
+ base::AutoLock auto_lock(lock_.Get());
+ if (!ranges_) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
return ranges;
}
@@ -278,10 +266,8 @@ std::string StatisticsRecorder::ToJSON(const std::string& query) {
// static
void StatisticsRecorder::GetHistograms(Histograms* output) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
for (const auto& entry : *histograms_) {
@@ -292,10 +278,8 @@ void StatisticsRecorder::GetHistograms(Histograms* output) {
// static
void StatisticsRecorder::GetBucketRanges(
std::vector<const BucketRanges*>* output) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (ranges_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!ranges_)
return;
for (const auto& entry : *ranges_) {
@@ -312,15 +296,13 @@ HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
// will acquire the lock at that time.
ImportGlobalPersistentHistograms();
- if (lock_ == NULL)
- return NULL;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
- return NULL;
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
+ return nullptr;
HistogramMap::iterator it = histograms_->find(name);
if (histograms_->end() == it)
- return NULL;
+ return nullptr;
return it->second;
}
@@ -332,7 +314,7 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
HistogramMap::iterator iter_begin;
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
iter_begin = histograms_->begin();
}
return HistogramIterator(iter_begin, include_persistent);
@@ -342,7 +324,7 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
HistogramMap::iterator iter_end;
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
iter_end = histograms_->end();
}
return HistogramIterator(iter_end, true);
@@ -350,19 +332,18 @@ StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
// static
void StatisticsRecorder::InitLogOnShutdown() {
- if (lock_ == nullptr)
+ if (!histograms_)
return;
- base::AutoLock auto_lock(*lock_);
+
+ base::AutoLock auto_lock(lock_.Get());
g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
}
// static
void StatisticsRecorder::GetSnapshot(const std::string& query,
Histograms* snapshot) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
for (const auto& entry : *histograms_) {
@@ -376,10 +357,8 @@ bool StatisticsRecorder::SetCallback(
const std::string& name,
const StatisticsRecorder::OnSampleCallback& cb) {
DCHECK(!cb.is_null());
- if (lock_ == NULL)
- return false;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return false;
if (ContainsKey(*callbacks_, name))
@@ -395,10 +374,8 @@ bool StatisticsRecorder::SetCallback(
// static
void StatisticsRecorder::ClearCallback(const std::string& name) {
- if (lock_ == NULL)
- return;
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return;
callbacks_->erase(name);
@@ -412,10 +389,8 @@ void StatisticsRecorder::ClearCallback(const std::string& name) {
// static
StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
const std::string& name) {
- if (lock_ == NULL)
- return OnSampleCallback();
- base::AutoLock auto_lock(*lock_);
- if (histograms_ == NULL)
+ base::AutoLock auto_lock(lock_.Get());
+ if (!histograms_)
return OnSampleCallback();
auto callback_iterator = callbacks_->find(name);
@@ -425,10 +400,7 @@ StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
// static
size_t StatisticsRecorder::GetHistogramCount() {
- if (!lock_)
- return 0;
-
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
if (!histograms_)
return 0;
return histograms_->size();
@@ -449,7 +421,7 @@ StatisticsRecorder::CreateTemporaryForTesting() {
// static
void StatisticsRecorder::UninitializeForTesting() {
// Stop now if it's never been initialized.
- if (lock_ == NULL || histograms_ == NULL)
+ if (!histograms_)
return;
// Get the global instance and destruct it. It's held in static memory so
@@ -465,7 +437,7 @@ void StatisticsRecorder::UninitializeForTesting() {
// static
void StatisticsRecorder::ImportGlobalPersistentHistograms() {
- if (lock_ == NULL)
+ if (!histograms_)
return;
// Import histograms from known persistent storage. Histograms could have
@@ -481,17 +453,7 @@ void StatisticsRecorder::ImportGlobalPersistentHistograms() {
// of main(), and hence it is not thread safe. It initializes globals to
// provide support for all future calls.
StatisticsRecorder::StatisticsRecorder() {
- if (lock_ == NULL) {
- // This will leak on purpose. It's the only way to make sure we won't race
- // against the static uninitialization of the module while one of our
- // static methods relying on the lock get called at an inappropriate time
- // during the termination phase. Since it's a static data member, we will
- // leak one per process, which would be similar to the instance allocated
- // during static initialization and released only on process termination.
- lock_ = new base::Lock;
- }
-
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
existing_histograms_.reset(histograms_);
existing_callbacks_.reset(callbacks_);
@@ -513,23 +475,18 @@ void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
// static
void StatisticsRecorder::Reset() {
- // If there's no lock then there is nothing to reset.
- if (!lock_)
- return;
std::unique_ptr<HistogramMap> histograms_deleter;
std::unique_ptr<CallbackMap> callbacks_deleter;
std::unique_ptr<RangesMap> ranges_deleter;
- // We don't delete lock_ on purpose to avoid having to properly protect
- // against it going away after we checked for NULL in the static methods.
{
- base::AutoLock auto_lock(*lock_);
+ base::AutoLock auto_lock(lock_.Get());
histograms_deleter.reset(histograms_);
callbacks_deleter.reset(callbacks_);
ranges_deleter.reset(ranges_);
- histograms_ = NULL;
- callbacks_ = NULL;
- ranges_ = NULL;
+ histograms_ = nullptr;
+ callbacks_ = nullptr;
+ ranges_ = nullptr;
}
// We are going to leak the histograms and the ranges.
}
@@ -543,12 +500,13 @@ void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
// static
-StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
+StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = nullptr;
// static
-StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = NULL;
+StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = nullptr;
// static
-StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
+StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = nullptr;
// static
-base::Lock* StatisticsRecorder::lock_ = NULL;
+base::LazyInstance<base::Lock>::Leaky StatisticsRecorder::lock_ =
+ LAZY_INSTANCE_INITIALIZER;
} // namespace base
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index b4dae877099..6deddae0109 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -25,11 +25,11 @@
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
namespace base {
class BucketRanges;
-class Lock;
class BASE_EXPORT StatisticsRecorder {
public:
@@ -230,8 +230,11 @@ class BASE_EXPORT StatisticsRecorder {
static CallbackMap* callbacks_;
static RangesMap* ranges_;
- // Lock protects access to above maps.
- static base::Lock* lock_;
+ // Lock protects access to above maps. This is a LazyInstance to avoid races
+ // when the above methods are used before Initialize(). Previously each method
+ // would do |if (!lock_) return;| which would race with
+ // |lock_ = new Lock;| in StatisticsRecorder(). http://crbug.com/672852.
+ static base::LazyInstance<base::Lock>::Leaky lock_;
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
};
diff --git a/chromium/base/numerics/safe_conversions.h b/chromium/base/numerics/safe_conversions.h
index 4f45caf9c52..b0ec279eb58 100644
--- a/chromium/base/numerics/safe_conversions.h
+++ b/chromium/base/numerics/safe_conversions.h
@@ -8,44 +8,61 @@
#include <stddef.h>
#include <limits>
+#include <ostream>
#include <type_traits>
-#include "base/logging.h"
#include "base/numerics/safe_conversions_impl.h"
namespace base {
+// The following are helper constexpr template functions and classes for safely
+// performing a range of conversions, assignments, and tests:
+//
+// checked_cast<> - Analogous to static_cast<> for numeric types, except
+// that it CHECKs that the specified numeric conversion will not overflow
+// or underflow. NaN source will always trigger a CHECK.
+// The default CHECK triggers a crash, but the handler can be overriden.
+// saturated_cast<> - Analogous to static_cast<> for numeric types, except
+// that it returns a saturated result when the specified numeric conversion
+// would otherwise overflow or underflow. An NaN source returns 0 by
+// default, but can be overridden to return a different result.
+// strict_cast<> - Analogous to static_cast<> for numeric types, except that
+// it will cause a compile failure if the destination type is not large
+// enough to contain any value in the source type. It performs no runtime
+// checking and thus introduces no runtime overhead.
+// IsValueInRangeForNumericType<>() - A convenience function that returns true
+// if the type supplied to the template parameter can represent the value
+// passed as an argument to the function.
+// IsValueNegative<>() - A convenience function that will accept any arithmetic
+// type as an argument and will return whether the value is less than zero.
+// Unsigned types always return false.
+// SafeUnsignedAbs() - Returns the absolute value of the supplied integer
+// parameter as an unsigned result (thus avoiding an overflow if the value
+// is the signed, two's complement minimum).
+// StrictNumeric<> - A wrapper type that performs assignments and copies via
+// the strict_cast<> template, and can perform valid arithmetic comparisons
+// across any range of arithmetic types. StrictNumeric is the return type
+// for values extracted from a CheckedNumeric class instance. The raw
+// arithmetic value is extracted via static_cast to the underlying type.
+// MakeStrictNum() - Creates a new StrictNumeric from the underlying type of
+// the supplied arithmetic or StrictNumeric type.
+
// Convenience function that returns true if the supplied value is in range
// for the destination type.
template <typename Dst, typename Src>
constexpr bool IsValueInRangeForNumericType(Src value) {
- return internal::DstRangeRelationToSrcRange<Dst>(value) ==
- internal::RANGE_VALID;
-}
-
-// Convenience function for determining if a numeric value is negative without
-// throwing compiler warnings on: unsigned(value) < 0.
-template <typename T>
-constexpr typename std::enable_if<std::numeric_limits<T>::is_signed, bool>::type
-IsValueNegative(T value) {
- static_assert(std::numeric_limits<T>::is_specialized,
- "Argument must be numeric.");
- return value < 0;
+ return internal::DstRangeRelationToSrcRange<Dst>(value).IsValid();
}
-template <typename T>
-constexpr typename std::enable_if<!std::numeric_limits<T>::is_signed,
- bool>::type IsValueNegative(T) {
- static_assert(std::numeric_limits<T>::is_specialized,
- "Argument must be numeric.");
- return false;
-}
-
-// Just fires a CHECK(false). Used for numeric boundary errors.
+// Forces a crash, like a CHECK(false). Used for numeric boundary errors.
struct CheckOnFailure {
template <typename T>
static T HandleFailure() {
- CHECK(false);
+#if defined(__GNUC__) || defined(__clang__)
+ __builtin_trap();
+#else
+ ((void)(*(volatile char*)0 = 0));
+#endif
return T();
}
};
@@ -59,49 +76,62 @@ template <typename Dst,
constexpr Dst checked_cast(Src value) {
// This throws a compile-time error on evaluating the constexpr if it can be
// determined at compile-time as failing, otherwise it will CHECK at runtime.
- return IsValueInRangeForNumericType<Dst>(value)
- ? static_cast<Dst>(value)
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return IsValueInRangeForNumericType<Dst, SrcType>(value)
+ ? static_cast<Dst>(static_cast<SrcType>(value))
: CheckHandler::template HandleFailure<Dst>();
}
-// HandleNaN will return 0 in this case.
-struct SaturatedCastNaNBehaviorReturnZero {
- template <typename T>
- static constexpr T HandleFailure() {
- return T();
+// Default boundaries for integral/float: max/infinity, lowest/-infinity, 0/NaN.
+template <typename T>
+struct SaturationDefaultHandler {
+ static constexpr T NaN() {
+ return std::numeric_limits<T>::has_quiet_NaN
+ ? std::numeric_limits<T>::quiet_NaN()
+ : T();
+ }
+ static constexpr T max() { return std::numeric_limits<T>::max(); }
+ static constexpr T Overflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity()
+ : std::numeric_limits<T>::max();
+ }
+ static constexpr T lowest() { return std::numeric_limits<T>::lowest(); }
+ static constexpr T Underflow() {
+ return std::numeric_limits<T>::has_infinity
+ ? std::numeric_limits<T>::infinity() * -1
+ : std::numeric_limits<T>::lowest();
}
};
namespace internal {
-// This wrapper is used for C++11 constexpr support by avoiding the declaration
-// of local variables in the saturated_cast template function.
-template <typename Dst, class NaNHandler, typename Src>
-constexpr Dst saturated_cast_impl(const Src value,
- const RangeConstraint constraint) {
- return constraint == RANGE_VALID
- ? static_cast<Dst>(value)
- : (constraint == RANGE_UNDERFLOW
- ? std::numeric_limits<Dst>::min()
- : (constraint == RANGE_OVERFLOW
- ? std::numeric_limits<Dst>::max()
- : (constraint == RANGE_INVALID
- ? NaNHandler::template HandleFailure<Dst>()
- : (NOTREACHED(), static_cast<Dst>(value)))));
+
+template <typename Dst, template <typename> class S, typename Src>
+constexpr Dst saturated_cast_impl(Src value, RangeCheck constraint) {
+ // For some reason clang generates much better code when the branch is
+ // structured exactly this way, rather than a sequence of checks.
+ return !constraint.IsOverflowFlagSet()
+ ? (!constraint.IsUnderflowFlagSet() ? static_cast<Dst>(value)
+ : S<Dst>::Underflow())
+ // Skip this check for integral Src, which cannot be NaN.
+ : (std::is_integral<Src>::value || !constraint.IsUnderflowFlagSet()
+ ? S<Dst>::Overflow()
+ : S<Dst>::NaN());
}
-} // namespace internal
// saturated_cast<> is analogous to static_cast<> for numeric types, except
-// that the specified numeric conversion will saturate rather than overflow or
-// underflow. NaN assignment to an integral will defer the behavior to a
-// specified class. By default, it will return 0.
+// that the specified numeric conversion will saturate by default rather than
+// overflow or underflow, and NaN assignment to an integral will return 0.
+// All boundary condition behaviors can be overriden with a custom handler.
template <typename Dst,
- class NaNHandler = SaturatedCastNaNBehaviorReturnZero,
+ template <typename>
+ class SaturationHandler = SaturationDefaultHandler,
typename Src>
constexpr Dst saturated_cast(Src value) {
- return std::numeric_limits<Dst>::is_iec559
- ? static_cast<Dst>(value) // Floating point optimization.
- : internal::saturated_cast_impl<Dst, NaNHandler>(
- value, internal::DstRangeRelationToSrcRange<Dst>(value));
+ using SrcType = typename UnderlyingType<Src>::type;
+ return saturated_cast_impl<Dst, SaturationHandler, SrcType>(
+ value,
+ DstRangeRelationToSrcRange<Dst, SaturationHandler, SrcType>(value));
}
// strict_cast<> is analogous to static_cast<> for numeric types, except that
@@ -109,22 +139,40 @@ constexpr Dst saturated_cast(Src value) {
// to contain any value in the source type. It performs no runtime checking.
template <typename Dst, typename Src>
constexpr Dst strict_cast(Src value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
- static_assert(std::numeric_limits<Dst>::is_specialized,
- "Result must be numeric.");
- static_assert((internal::StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
- internal::NUMERIC_RANGE_CONTAINED),
- "The numeric conversion is out of range for this type. You "
- "should probably use one of the following conversion "
- "mechanisms on the value you want to pass:\n"
- "- base::checked_cast\n"
- "- base::saturated_cast\n"
- "- base::CheckedNumeric");
-
- return static_cast<Dst>(value);
+ using SrcType = typename UnderlyingType<Src>::type;
+ static_assert(UnderlyingType<Src>::is_numeric, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // Alternatively, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ static_assert(StaticDstRangeRelationToSrcRange<Dst, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED,
+ "The source type is out of range for the destination type. "
+ "Please see strict_cast<> comments for more information.");
+
+ return static_cast<Dst>(static_cast<SrcType>(value));
}
+// Some wrappers to statically check that a type is in range.
+template <typename Dst, typename Src, class Enable = void>
+struct IsNumericRangeContained {
+ static const bool value = false;
+};
+
+template <typename Dst, typename Src>
+struct IsNumericRangeContained<
+ Dst,
+ Src,
+ typename std::enable_if<ArithmeticOrUnderlyingEnum<Dst>::value &&
+ ArithmeticOrUnderlyingEnum<Src>::value>::type> {
+ static const bool value = StaticDstRangeRelationToSrcRange<Dst, Src>::value ==
+ NUMERIC_RANGE_CONTAINED;
+};
+
// StrictNumeric implements compile time range checking between numeric types by
// wrapping assignment operations in a strict_cast. This class is intended to be
// used for function arguments and return types, to ensure the destination type
@@ -138,7 +186,7 @@ constexpr Dst strict_cast(Src value) {
template <typename T>
class StrictNumeric {
public:
- typedef T type;
+ using type = T;
constexpr StrictNumeric() : value_(0) {}
@@ -150,21 +198,74 @@ class StrictNumeric {
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to StrictNumerics to make them easier to use.
template <typename Src>
- constexpr StrictNumeric(Src value)
+ constexpr StrictNumeric(Src value) // NOLINT(runtime/explicit)
: value_(strict_cast<T>(value)) {}
- // The numeric cast operator basically handles all the magic.
- template <typename Dst>
+ // If you got here from a compiler error, it's because you tried to assign
+ // from a source type to a destination type that has insufficient range.
+ // The solution may be to change the destination type you're assigning to,
+ // and use one large enough to represent the source.
+ // If you're assigning from a CheckedNumeric<> class, you may be able to use
+ // the AssignIfValid() member function, specify a narrower destination type to
+ // the member value functions (e.g. val.template ValueOrDie<Dst>()), use one
+ // of the value helper functions (e.g. ValueOrDieForType<Dst>(val)).
+ // If you've encountered an _ambiguous overload_ you can use a static_cast<>
+ // to explicitly cast the result to the destination type.
+ // If none of that works, you may be better served with the checked_cast<> or
+ // saturated_cast<> template functions for your particular use case.
+ template <typename Dst,
+ typename std::enable_if<
+ IsNumericRangeContained<Dst, T>::value>::type* = nullptr>
constexpr operator Dst() const {
- return strict_cast<Dst>(value_);
+ return static_cast<typename ArithmeticOrUnderlyingEnum<Dst>::type>(value_);
}
private:
const T value_;
};
-// Explicitly make a shorter size_t typedef for convenience.
-typedef StrictNumeric<size_t> SizeT;
+// Convience wrapper returns a StrictNumeric from the provided arithmetic type.
+template <typename T>
+constexpr StrictNumeric<typename UnderlyingType<T>::type> MakeStrictNum(
+ const T value) {
+ return value;
+}
+
+// Overload the ostream output operator to make logging work nicely.
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const StrictNumeric<T>& value) {
+ os << static_cast<T>(value);
+ return os;
+}
+
+#define STRICT_COMPARISON_OP(NAME, OP) \
+ template <typename L, typename R, \
+ typename std::enable_if< \
+ internal::IsStrictOp<L, R>::value>::type* = nullptr> \
+ constexpr bool operator OP(const L lhs, const R rhs) { \
+ return SafeCompare<NAME, typename UnderlyingType<L>::type, \
+ typename UnderlyingType<R>::type>(lhs, rhs); \
+ }
+
+STRICT_COMPARISON_OP(IsLess, <);
+STRICT_COMPARISON_OP(IsLessOrEqual, <=);
+STRICT_COMPARISON_OP(IsGreater, >);
+STRICT_COMPARISON_OP(IsGreaterOrEqual, >=);
+STRICT_COMPARISON_OP(IsEqual, ==);
+STRICT_COMPARISON_OP(IsNotEqual, !=);
+
+#undef STRICT_COMPARISON_OP
+};
+
+using internal::strict_cast;
+using internal::saturated_cast;
+using internal::SafeUnsignedAbs;
+using internal::StrictNumeric;
+using internal::MakeStrictNum;
+using internal::IsValueNegative;
+
+// Explicitly make a shorter size_t alias for convenience.
+using SizeT = StrictNumeric<size_t>;
} // namespace base
diff --git a/chromium/base/numerics/safe_conversions_impl.h b/chromium/base/numerics/safe_conversions_impl.h
index bdce1675700..24357fd6a57 100644
--- a/chromium/base/numerics/safe_conversions_impl.h
+++ b/chromium/base/numerics/safe_conversions_impl.h
@@ -5,10 +5,8 @@
#ifndef BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
#define BASE_NUMERICS_SAFE_CONVERSIONS_IMPL_H_
-#include <limits.h>
#include <stdint.h>
-#include <climits>
#include <limits>
#include <type_traits>
@@ -16,18 +14,68 @@ namespace base {
namespace internal {
// The std library doesn't provide a binary max_exponent for integers, however
-// we can compute one by adding one to the number of non-sign bits. This allows
-// for accurate range comparisons between floating point and integer types.
+// we can compute an analog using std::numeric_limits<>::digits.
template <typename NumericType>
struct MaxExponent {
- static_assert(std::is_arithmetic<NumericType>::value,
- "Argument must be numeric.");
- static const int value = std::numeric_limits<NumericType>::is_iec559
+ static const int value = std::is_floating_point<NumericType>::value
? std::numeric_limits<NumericType>::max_exponent
- : (sizeof(NumericType) * CHAR_BIT + 1 -
- std::numeric_limits<NumericType>::is_signed);
+ : std::numeric_limits<NumericType>::digits + 1;
};
+// The number of bits (including the sign) in an integer. Eliminates sizeof
+// hacks.
+template <typename NumericType>
+struct IntegerBitsPlusSign {
+ static const int value = std::numeric_limits<NumericType>::digits +
+ std::is_signed<NumericType>::value;
+};
+
+// Helper templates for integer manipulations.
+
+template <typename Integer>
+struct PositionOfSignBit {
+ static const size_t value = IntegerBitsPlusSign<Integer>::value - 1;
+};
+
+// Determines if a numeric value is negative without throwing compiler
+// warnings on: unsigned(value) < 0.
+template <typename T,
+ typename std::enable_if<std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T value) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return value < 0;
+}
+
+template <typename T,
+ typename std::enable_if<!std::is_signed<T>::value>::type* = nullptr>
+constexpr bool IsValueNegative(T) {
+ static_assert(std::is_arithmetic<T>::value, "Argument must be numeric.");
+ return false;
+}
+
+// This performs a fast negation, returning a signed value. It works on unsigned
+// arguments, but probably doesn't do what you want for any unsigned value
+// larger than max / 2 + 1 (i.e. signed min cast to unsigned).
+template <typename T>
+constexpr typename std::make_signed<T>::type ConditionalNegate(
+ T x,
+ bool is_negative) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using SignedT = typename std::make_signed<T>::type;
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return static_cast<SignedT>(
+ (static_cast<UnsignedT>(x) ^ -SignedT(is_negative)) + is_negative);
+}
+
+// This performs a safe, absolute value via unsigned overflow.
+template <typename T>
+constexpr typename std::make_unsigned<T>::type SafeUnsignedAbs(T value) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ return IsValueNegative(value) ? 0 - static_cast<UnsignedT>(value)
+ : static_cast<UnsignedT>(value);
+}
+
enum IntegerRepresentation {
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED
@@ -35,7 +83,7 @@ enum IntegerRepresentation {
// A range for a given nunmeric Src type is contained for a given numeric Dst
// type if both numeric_limits<Src>::max() <= numeric_limits<Dst>::max() and
-// numeric_limits<Src>::min() >= numeric_limits<Dst>::min() are true.
+// numeric_limits<Src>::lowest() >= numeric_limits<Dst>::lowest() are true.
// We implement this as template specializations rather than simple static
// comparisons to ensure type correctness in our comparisons.
enum NumericRangeRepresentation {
@@ -46,16 +94,14 @@ enum NumericRangeRepresentation {
// Helper templates to statically determine if our destination type can contain
// maximum and minimum values represented by the source type.
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign =
- std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED >
+template <typename Dst,
+ typename Src,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED>
struct StaticDstRangeRelationToSrcRange;
// Same sign: Dst is guaranteed to contain Src only if its range is equal or
@@ -90,29 +136,33 @@ struct StaticDstRangeRelationToSrcRange<Dst,
static const NumericRangeRepresentation value = NUMERIC_RANGE_NOT_CONTAINED;
};
-enum RangeConstraint {
- RANGE_VALID = 0x0, // Value can be represented by the destination type.
- RANGE_UNDERFLOW = 0x1, // Value would overflow.
- RANGE_OVERFLOW = 0x2, // Value would underflow.
- RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
-};
-
-// Helper function for coercing an int back to a RangeContraint.
-constexpr RangeConstraint GetRangeConstraint(int integer_range_constraint) {
- // TODO(jschuh): Once we get full C++14 support we want this
- // assert(integer_range_constraint >= RANGE_VALID &&
- // integer_range_constraint <= RANGE_INVALID)
- return static_cast<RangeConstraint>(integer_range_constraint);
-}
+// This class wraps the range constraints as separate booleans so the compiler
+// can identify constants and eliminate unused code paths.
+class RangeCheck {
+ public:
+ constexpr RangeCheck(bool is_in_lower_bound, bool is_in_upper_bound)
+ : is_underflow_(!is_in_lower_bound), is_overflow_(!is_in_upper_bound) {}
+ constexpr RangeCheck() : is_underflow_(0), is_overflow_(0) {}
+ constexpr bool IsValid() const { return !is_overflow_ && !is_underflow_; }
+ constexpr bool IsInvalid() const { return is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflow() const { return is_overflow_ && !is_underflow_; }
+ constexpr bool IsUnderflow() const { return !is_overflow_ && is_underflow_; }
+ constexpr bool IsOverflowFlagSet() const { return is_overflow_; }
+ constexpr bool IsUnderflowFlagSet() const { return is_underflow_; }
+ constexpr bool operator==(const RangeCheck rhs) const {
+ return is_underflow_ == rhs.is_underflow_ &&
+ is_overflow_ == rhs.is_overflow_;
+ }
+ constexpr bool operator!=(const RangeCheck rhs) const {
+ return !(*this == rhs);
+ }
-// This function creates a RangeConstraint from an upper and lower bound
-// check by taking advantage of the fact that only NaN can be out of range in
-// both directions at once.
-constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
- bool is_in_lower_bound) {
- return GetRangeConstraint((is_in_upper_bound ? 0 : RANGE_OVERFLOW) |
- (is_in_lower_bound ? 0 : RANGE_UNDERFLOW));
-}
+ private:
+ // Do not change the order of these member variables. The integral conversion
+ // optimization depends on this exact order.
+ const bool is_underflow_;
+ const bool is_overflow_;
+};
// The following helper template addresses a corner case in range checks for
// conversion from a floating-point type to an integral type of smaller range
@@ -135,131 +185,547 @@ constexpr inline RangeConstraint GetRangeConstraint(bool is_in_upper_bound,
// To fix this bug we manually truncate the maximum value when the destination
// type is an integral of larger precision than the source floating-point type,
// such that the resulting maximum is represented exactly as a floating point.
-template <typename Dst, typename Src>
+template <typename Dst, typename Src, template <typename> class Bounds>
struct NarrowingRange {
- typedef typename std::numeric_limits<Src> SrcLimits;
- typedef typename std::numeric_limits<Dst> DstLimits;
- // The following logic avoids warnings where the max function is
- // instantiated with invalid values for a bit shift (even though
- // such a function can never be called).
- static const int shift = (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
- SrcLimits::digits < DstLimits::digits &&
- SrcLimits::is_iec559 &&
- DstLimits::is_integer)
- ? (DstLimits::digits - SrcLimits::digits)
- : 0;
-
- static constexpr Dst max() {
- // We use UINTMAX_C below to avoid compiler warnings about shifting floating
- // points. Since it's a compile time calculation, it shouldn't have any
- // performance impact.
- return DstLimits::max() - static_cast<Dst>((UINTMAX_C(1) << shift) - 1);
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = typename std::numeric_limits<Dst>;
+
+ // Computes the mask required to make an accurate comparison between types.
+ static const int kShift =
+ (MaxExponent<Src>::value > MaxExponent<Dst>::value &&
+ SrcLimits::digits < DstLimits::digits)
+ ? (DstLimits::digits - SrcLimits::digits)
+ : 0;
+ template <
+ typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+
+ // Masks out the integer bits that are beyond the precision of the
+ // intermediate type used for comparison.
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift < DstLimits::digits, "");
+ return static_cast<T>(
+ ConditionalNegate(SafeUnsignedAbs(value) & ~((T(1) << kShift) - T(1)),
+ IsValueNegative(value)));
}
- static constexpr Dst min() {
- return std::numeric_limits<Dst>::is_iec559 ? -DstLimits::max()
- : DstLimits::min();
+ template <typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* =
+ nullptr>
+ static constexpr T Adjust(T value) {
+ static_assert(std::is_same<T, Dst>::value, "");
+ static_assert(kShift == 0, "");
+ return value;
}
+
+ static constexpr Dst max() { return Adjust(Bounds<Dst>::max()); }
+ static constexpr Dst lowest() { return Adjust(Bounds<Dst>::lowest()); }
};
-template <
- typename Dst,
- typename Src,
- IntegerRepresentation DstSign = std::numeric_limits<Dst>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- IntegerRepresentation SrcSign = std::numeric_limits<Src>::is_signed
- ? INTEGER_REPRESENTATION_SIGNED
- : INTEGER_REPRESENTATION_UNSIGNED,
- NumericRangeRepresentation DstRange =
- StaticDstRangeRelationToSrcRange<Dst, Src>::value >
+template <typename Dst,
+ typename Src,
+ template <typename> class Bounds,
+ IntegerRepresentation DstSign = std::is_signed<Dst>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ IntegerRepresentation SrcSign = std::is_signed<Src>::value
+ ? INTEGER_REPRESENTATION_SIGNED
+ : INTEGER_REPRESENTATION_UNSIGNED,
+ NumericRangeRepresentation DstRange =
+ StaticDstRangeRelationToSrcRange<Dst, Src>::value>
struct DstRangeRelationToSrcRangeImpl;
// The following templates are for ranges that must be verified at runtime. We
// split it into checks based on signedness to avoid confusing casts and
// compiler warnings on signed an unsigned comparisons.
-// Dst range is statically determined to contain Src: Nothing to check.
+// Same sign narrowing: The range is contained for normal limits.
template <typename Dst,
typename Src,
+ template <typename> class Bounds,
IntegerRepresentation DstSign,
IntegerRepresentation SrcSign>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
DstSign,
SrcSign,
NUMERIC_RANGE_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) { return RANGE_VALID; }
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ static_cast<Dst>(SrcLimits::lowest()) >= DstLimits::lowest() ||
+ static_cast<Dst>(value) >= DstLimits::lowest(),
+ static_cast<Dst>(SrcLimits::max()) <= DstLimits::max() ||
+ static_cast<Dst>(value) <= DstLimits::max());
+ }
};
// Signed to signed narrowing: Both the upper and lower boundaries may be
-// exceeded.
-template <typename Dst, typename Src>
+// exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return GetRangeConstraint((value <= NarrowingRange<Dst, Src>::max()),
- (value >= NarrowingRange<Dst, Src>::min()));
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(value >= DstLimits::lowest(), value <= DstLimits::max());
}
};
-// Unsigned to unsigned narrowing: Only the upper boundary can be exceeded.
-template <typename Dst, typename Src>
+// Unsigned to unsigned narrowing: Only the upper bound can be exceeded for
+// standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return GetRangeConstraint(value <= NarrowingRange<Dst, Src>::max(), true);
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ return RangeCheck(
+ DstLimits::lowest() == Dst(0) || value >= DstLimits::lowest(),
+ value <= DstLimits::max());
}
};
-// Unsigned to signed: The upper boundary may be exceeded.
-template <typename Dst, typename Src>
+// Unsigned to signed: Only the upper bound can be exceeded for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_SIGNED,
INTEGER_REPRESENTATION_UNSIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return sizeof(Dst) > sizeof(Src)
- ? RANGE_VALID
- : GetRangeConstraint(
- value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
- true);
+ static constexpr RangeCheck Check(Src value) {
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(DstLimits::lowest() <= Dst(0) ||
+ static_cast<Promotion>(value) >=
+ static_cast<Promotion>(DstLimits::lowest()),
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
// Signed to unsigned: The upper boundary may be exceeded for a narrower Dst,
-// and any negative value exceeds the lower boundary.
-template <typename Dst, typename Src>
+// and any negative value exceeds the lower boundary for standard limits.
+template <typename Dst, typename Src, template <typename> class Bounds>
struct DstRangeRelationToSrcRangeImpl<Dst,
Src,
+ Bounds,
INTEGER_REPRESENTATION_UNSIGNED,
INTEGER_REPRESENTATION_SIGNED,
NUMERIC_RANGE_NOT_CONTAINED> {
- static constexpr RangeConstraint Check(Src value) {
- return (MaxExponent<Dst>::value >= MaxExponent<Src>::value)
- ? GetRangeConstraint(true, value >= static_cast<Src>(0))
- : GetRangeConstraint(
- value <= static_cast<Src>(NarrowingRange<Dst, Src>::max()),
- value >= static_cast<Src>(0));
+ static constexpr RangeCheck Check(Src value) {
+ using SrcLimits = std::numeric_limits<Src>;
+ using DstLimits = NarrowingRange<Dst, Src, Bounds>;
+ using Promotion = decltype(Src() + Dst());
+ return RangeCheck(
+ value >= Src(0) && (DstLimits::lowest() == 0 ||
+ static_cast<Dst>(value) >= DstLimits::lowest()),
+ static_cast<Promotion>(SrcLimits::max()) <=
+ static_cast<Promotion>(DstLimits::max()) ||
+ static_cast<Promotion>(value) <=
+ static_cast<Promotion>(DstLimits::max()));
}
};
-template <typename Dst, typename Src>
-constexpr RangeConstraint DstRangeRelationToSrcRange(Src value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
- static_assert(std::numeric_limits<Dst>::is_specialized,
- "Result must be numeric.");
- return DstRangeRelationToSrcRangeImpl<Dst, Src>::Check(value);
+template <typename Dst,
+ template <typename> class Bounds = std::numeric_limits,
+ typename Src>
+constexpr RangeCheck DstRangeRelationToSrcRange(Src value) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Dst>::value, "Result must be numeric.");
+ static_assert(Bounds<Dst>::lowest() < Bounds<Dst>::max(), "");
+ return DstRangeRelationToSrcRangeImpl<Dst, Src, Bounds>::Check(value);
}
+// Integer promotion templates used by the portable checked integer arithmetic.
+template <size_t Size, bool IsSigned>
+struct IntegerForDigitsAndSign;
+
+#define INTEGER_FOR_DIGITS_AND_SIGN(I) \
+ template <> \
+ struct IntegerForDigitsAndSign<IntegerBitsPlusSign<I>::value, \
+ std::is_signed<I>::value> { \
+ using type = I; \
+ }
+
+INTEGER_FOR_DIGITS_AND_SIGN(int8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint8_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint16_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint32_t);
+INTEGER_FOR_DIGITS_AND_SIGN(int64_t);
+INTEGER_FOR_DIGITS_AND_SIGN(uint64_t);
+#undef INTEGER_FOR_DIGITS_AND_SIGN
+
+// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
+// support 128-bit math, then the ArithmeticPromotion template below will need
+// to be updated (or more likely replaced with a decltype expression).
+static_assert(IntegerBitsPlusSign<intmax_t>::value == 64,
+ "Max integer size not supported for this toolchain.");
+
+template <typename Integer, bool IsSigned = std::is_signed<Integer>::value>
+struct TwiceWiderInteger {
+ using type =
+ typename IntegerForDigitsAndSign<IntegerBitsPlusSign<Integer>::value * 2,
+ IsSigned>::type;
+};
+
+enum ArithmeticPromotionCategory {
+ LEFT_PROMOTION, // Use the type of the left-hand argument.
+ RIGHT_PROMOTION // Use the type of the right-hand argument.
+};
+
+// Determines the type that can represent the largest positive value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION>
+struct MaxExponentPromotion;
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that can represent the lowest arithmetic value.
+template <typename Lhs,
+ typename Rhs,
+ ArithmeticPromotionCategory Promotion =
+ std::is_signed<Lhs>::value
+ ? (std::is_signed<Rhs>::value
+ ? (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION)
+ : LEFT_PROMOTION)
+ : (std::is_signed<Rhs>::value
+ ? RIGHT_PROMOTION
+ : (MaxExponent<Lhs>::value < MaxExponent<Rhs>::value
+ ? LEFT_PROMOTION
+ : RIGHT_PROMOTION))>
+struct LowestValuePromotion;
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, LEFT_PROMOTION> {
+ using type = Lhs;
+};
+
+template <typename Lhs, typename Rhs>
+struct LowestValuePromotion<Lhs, Rhs, RIGHT_PROMOTION> {
+ using type = Rhs;
+};
+
+// Determines the type that is best able to represent an arithmetic result.
+template <
+ typename Lhs,
+ typename Rhs = Lhs,
+ bool is_intmax_type =
+ std::is_integral<typename MaxExponentPromotion<Lhs, Rhs>::type>::value&&
+ IntegerBitsPlusSign<typename MaxExponentPromotion<Lhs, Rhs>::type>::
+ value == IntegerBitsPlusSign<intmax_t>::value,
+ bool is_max_exponent =
+ StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Lhs>::value ==
+ NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
+ typename MaxExponentPromotion<Lhs, Rhs>::type,
+ Rhs>::value == NUMERIC_RANGE_CONTAINED>
+struct BigEnoughPromotion;
+
+// The side with the max exponent is big enough.
+template <typename Lhs, typename Rhs, bool is_intmax_type>
+struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = true;
+};
+
+// We can use a twice wider type to fit.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, false, false> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static const bool is_contained = true;
+};
+
+// No type is large enough.
+template <typename Lhs, typename Rhs>
+struct BigEnoughPromotion<Lhs, Rhs, true, false> {
+ using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// We can statically check if operations on the provided types can wrap, so we
+// can skip the checked operations if they're not needed. So, for an integer we
+// care if the destination type preserves the sign and is twice the width of
+// the source.
+template <typename T, typename Lhs, typename Rhs = Lhs>
+struct IsIntegerArithmeticSafe {
+ static const bool value =
+ !std::is_floating_point<T>::value &&
+ !std::is_floating_point<Lhs>::value &&
+ !std::is_floating_point<Rhs>::value &&
+ std::is_signed<T>::value >= std::is_signed<Lhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Lhs>::value) &&
+ std::is_signed<T>::value >= std::is_signed<Rhs>::value &&
+ IntegerBitsPlusSign<T>::value >= (2 * IntegerBitsPlusSign<Rhs>::value);
+};
+
+// Promotes to a type that can represent any possible result of a binary
+// arithmetic operation with the source types.
+template <typename Lhs,
+ typename Rhs,
+ bool is_promotion_possible = IsIntegerArithmeticSafe<
+ typename std::conditional<std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value,
+ intmax_t,
+ uintmax_t>::type,
+ typename MaxExponentPromotion<Lhs, Rhs>::type>::value>
+struct FastIntegerArithmeticPromotion;
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, true> {
+ using type =
+ typename TwiceWiderInteger<typename MaxExponentPromotion<Lhs, Rhs>::type,
+ std::is_signed<Lhs>::value ||
+ std::is_signed<Rhs>::value>::type;
+ static_assert(IsIntegerArithmeticSafe<type, Lhs, Rhs>::value, "");
+ static const bool is_contained = true;
+};
+
+template <typename Lhs, typename Rhs>
+struct FastIntegerArithmeticPromotion<Lhs, Rhs, false> {
+ using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
+ static const bool is_contained = false;
+};
+
+// This hacks around libstdc++ 4.6 missing stuff in type_traits.
+#if defined(__GLIBCXX__)
+#define PRIV_GLIBCXX_4_7_0 20120322
+#define PRIV_GLIBCXX_4_5_4 20120702
+#define PRIV_GLIBCXX_4_6_4 20121127
+#if (__GLIBCXX__ < PRIV_GLIBCXX_4_7_0 || __GLIBCXX__ == PRIV_GLIBCXX_4_5_4 || \
+ __GLIBCXX__ == PRIV_GLIBCXX_4_6_4)
+#define PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#undef PRIV_GLIBCXX_4_7_0
+#undef PRIV_GLIBCXX_4_5_4
+#undef PRIV_GLIBCXX_4_6_4
+#endif
+#endif
+
+// Extracts the underlying type from an enum.
+template <typename T, bool is_enum = std::is_enum<T>::value>
+struct ArithmeticOrUnderlyingEnum;
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, true> {
+#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+ using type = __underlying_type(T);
+#else
+ using type = typename std::underlying_type<T>::type;
+#endif
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+#if defined(PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX)
+#undef PRIV_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#endif
+
+template <typename T>
+struct ArithmeticOrUnderlyingEnum<T, false> {
+ using type = T;
+ static const bool value = std::is_arithmetic<type>::value;
+};
+
+// The following are helper templates used in the CheckedNumeric class.
+template <typename T>
+class CheckedNumeric;
+
+template <typename T>
+class StrictNumeric;
+
+// Used to treat CheckedNumeric and arithmetic underlying types the same.
+template <typename T>
+struct UnderlyingType {
+ using type = typename ArithmeticOrUnderlyingEnum<T>::type;
+ static const bool is_numeric = std::is_arithmetic<type>::value;
+ static const bool is_checked = false;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<CheckedNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = true;
+ static const bool is_strict = false;
+};
+
+template <typename T>
+struct UnderlyingType<StrictNumeric<T>> {
+ using type = T;
+ static const bool is_numeric = true;
+ static const bool is_checked = false;
+ static const bool is_strict = true;
+};
+
+template <typename L, typename R>
+struct IsCheckedOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_checked || UnderlyingType<R>::is_checked);
+};
+
+template <typename L, typename R>
+struct IsStrictOp {
+ static const bool value =
+ UnderlyingType<L>::is_numeric && UnderlyingType<R>::is_numeric &&
+ (UnderlyingType<L>::is_strict || UnderlyingType<R>::is_strict);
+};
+
+template <typename L, typename R>
+constexpr bool IsLessImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLess {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsLessOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsUnderflow() || r_range.IsOverflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) <=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsLessOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsLessOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreater {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+constexpr bool IsGreaterOrEqualImpl(const L lhs,
+ const R rhs,
+ const RangeCheck l_range,
+ const RangeCheck r_range) {
+ return l_range.IsOverflow() || r_range.IsUnderflow() ||
+ (l_range == r_range &&
+ static_cast<decltype(lhs + rhs)>(lhs) >=
+ static_cast<decltype(lhs + rhs)>(rhs));
+}
+
+template <typename L, typename R>
+struct IsGreaterOrEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return IsGreaterOrEqualImpl(lhs, rhs, DstRangeRelationToSrcRange<R>(lhs),
+ DstRangeRelationToSrcRange<L>(rhs));
+ }
+};
+
+template <typename L, typename R>
+struct IsEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) ==
+ DstRangeRelationToSrcRange<L>(rhs) &&
+ static_cast<decltype(lhs + rhs)>(lhs) ==
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+template <typename L, typename R>
+struct IsNotEqual {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ static constexpr bool Test(const L lhs, const R rhs) {
+ return DstRangeRelationToSrcRange<R>(lhs) !=
+ DstRangeRelationToSrcRange<L>(rhs) ||
+ static_cast<decltype(lhs + rhs)>(lhs) !=
+ static_cast<decltype(lhs + rhs)>(rhs);
+ }
+};
+
+// These perform the actual math operations on the CheckedNumerics.
+// Binary arithmetic operations.
+template <template <typename, typename> class C, typename L, typename R>
+constexpr bool SafeCompare(const L lhs, const R rhs) {
+ static_assert(std::is_arithmetic<L>::value && std::is_arithmetic<R>::value,
+ "Types must be numeric.");
+ using Promotion = BigEnoughPromotion<L, R>;
+ using BigType = typename Promotion::type;
+ return Promotion::is_contained
+ // Force to a larger type for speed if both are contained.
+ ? C<BigType, BigType>::Test(
+ static_cast<BigType>(static_cast<L>(lhs)),
+ static_cast<BigType>(static_cast<R>(rhs)))
+ // Let the template functions figure it out for mixed types.
+ : C<L, R>::Test(lhs, rhs);
+};
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/numerics/safe_math.h b/chromium/base/numerics/safe_math.h
index 511eb23f787..f5007db39c4 100644
--- a/chromium/base/numerics/safe_math.h
+++ b/chromium/base/numerics/safe_math.h
@@ -10,143 +10,257 @@
#include <limits>
#include <type_traits>
-#include "base/logging.h"
#include "base/numerics/safe_math_impl.h"
namespace base {
-
namespace internal {
-// CheckedNumeric implements all the logic and operators for detecting integer
+// CheckedNumeric<> implements all the logic and operators for detecting integer
// boundary conditions such as overflow, underflow, and invalid conversions.
// The CheckedNumeric type implicitly converts from floating point and integer
// data types, and contains overloads for basic arithmetic operations (i.e.: +,
-// -, *, /, %).
+// -, *, / for all types and %, <<, >>, &, |, ^ for integers). Type promotions
+// are a slightly modified version of the standard C arithmetic rules with the
+// two differences being that there is no default promotion to int and bitwise
+// logical operations always return an unsigned of the wider type.
+//
+// You may also use one of the variadic convenience functions, which accept
+// standard arithmetic or CheckedNumeric types, perform arithmetic operations,
+// and return a CheckedNumeric result. The supported functions are:
+// CheckAdd() - Addition.
+// CheckSub() - Subtraction.
+// CheckMul() - Multiplication.
+// CheckDiv() - Division.
+// CheckMod() - Modulous (integer only).
+// CheckLsh() - Left integer shift (integer only).
+// CheckRsh() - Right integer shift (integer only).
+// CheckAnd() - Bitwise AND (integer only with unsigned result).
+// CheckOr() - Bitwise OR (integer only with unsigned result).
+// CheckXor() - Bitwise XOR (integer only with unsigned result).
+// CheckMax() - Maximum of supplied arguments.
+// CheckMin() - Minimum of supplied arguments.
+//
+// The unary negation, increment, and decrement operators are supported, along
+// with the following unary arithmetic methods, which return a new
+// CheckedNumeric as a result of the operation:
+// Abs() - Absolute value.
+// UnsignedAbs() - Absolute value as an equal-width unsigned underlying type
+// (valid for only integral types).
+// Max() - Returns whichever is greater of the current instance or argument.
+// The underlying return type is whichever has the greatest magnitude.
+// Min() - Returns whichever is lowest of the current instance or argument.
+// The underlying return type is whichever has can represent the lowest
+// number in the smallest width (e.g. int8_t over unsigned, int over
+// int8_t, and float over int).
//
// The following methods convert from CheckedNumeric to standard numeric values:
-// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
-// has not wrapped and is not the result of an invalid conversion).
-// ValueOrDie() - Returns the underlying value. If the state is not valid this
-// call will crash on a CHECK.
-// ValueOrDefault() - Returns the current value, or the supplied default if the
-// state is not valid.
-// ValueFloating() - Returns the underlying floating point value (valid only
-// only for floating point CheckedNumeric types).
+// AssignIfValid() - Assigns the underlying value to the supplied destination
+// pointer if the value is currently valid and within the range
+// supported by the destination type. Returns true on success.
+// ****************************************************************************
+// * WARNING: All of the following functions return a StrictNumeric, which *
+// * is valid for comparison and assignment operations, but will trigger a *
+// * compile failure on attempts to assign to a type of insufficient range. *
+// ****************************************************************************
+// IsValid() - Returns true if the underlying numeric value is valid (i.e. has
+// has not wrapped and is not the result of an invalid conversion).
+// ValueOrDie() - Returns the underlying value. If the state is not valid this
+// call will crash on a CHECK.
+// ValueOrDefault() - Returns the current value, or the supplied default if the
+// state is not valid (will not trigger a CHECK).
+//
+// The following wrapper functions can be used to avoid the template
+// disambiguator syntax when converting a destination type.
+// IsValidForType<>() in place of: a.template IsValid<Dst>()
+// ValueOrDieForType<>() in place of: a.template ValueOrDie()
+// ValueOrDefaultForType<>() in place of: a.template ValueOrDefault(default)
+//
+// The following are general utility methods that are useful for converting
+// between arithmetic types and CheckedNumeric types:
+// CheckedNumeric::Cast<Dst>() - Instance method returning a CheckedNumeric
+// derived from casting the current instance to a CheckedNumeric of
+// the supplied destination type.
+// MakeCheckedNum() - Creates a new CheckedNumeric from the underlying type of
+// the supplied arithmetic, CheckedNumeric, or StrictNumeric type.
//
-// Bitwise operations are explicitly not supported, because correct
-// handling of some cases (e.g. sign manipulation) is ambiguous. Comparison
-// operations are explicitly not supported because they could result in a crash
-// on a CHECK condition. You should use patterns like the following for these
-// operations:
-// Bitwise operation:
-// CheckedNumeric<int> checked_int = untrusted_input_value;
-// int x = checked_int.ValueOrDefault(0) | kFlagValues;
-// Comparison:
+// Comparison operations are explicitly not supported because they could result
+// in a crash on an unexpected CHECK condition. You should use patterns like the
+// following for comparisons:
// CheckedNumeric<size_t> checked_size = untrusted_input_value;
// checked_size += HEADER LENGTH;
// if (checked_size.IsValid() && checked_size.ValueOrDie() < buffer_size)
// Do stuff...
+
template <typename T>
class CheckedNumeric {
static_assert(std::is_arithmetic<T>::value,
"CheckedNumeric<T>: T must be a numeric type.");
public:
- typedef T type;
+ using type = T;
- CheckedNumeric() {}
+ constexpr CheckedNumeric() {}
// Copy constructor.
template <typename Src>
- CheckedNumeric(const CheckedNumeric<Src>& rhs)
- : state_(rhs.ValueUnsafe(), rhs.IsValid()) {}
+ constexpr CheckedNumeric(const CheckedNumeric<Src>& rhs)
+ : state_(rhs.state_.value(), rhs.IsValid()) {}
template <typename Src>
- CheckedNumeric(Src value, bool is_valid) : state_(value, is_valid) {}
+ friend class CheckedNumeric;
// This is not an explicit constructor because we implicitly upgrade regular
// numerics to CheckedNumerics to make them easier to use.
template <typename Src>
- CheckedNumeric(Src value) // NOLINT(runtime/explicit)
+ constexpr CheckedNumeric(Src value) // NOLINT(runtime/explicit)
: state_(value) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
}
// This is not an explicit constructor because we want a seamless conversion
// from StrictNumeric types.
template <typename Src>
- CheckedNumeric(StrictNumeric<Src> value) // NOLINT(runtime/explicit)
- : state_(static_cast<Src>(value)) {
+ constexpr CheckedNumeric(
+ StrictNumeric<Src> value) // NOLINT(runtime/explicit)
+ : state_(static_cast<Src>(value)) {}
+
+ // IsValid() - The public API to test if a CheckedNumeric is currently valid.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter.
+ template <typename Dst = T>
+ constexpr bool IsValid() const {
+ return state_.is_valid() &&
+ IsValueInRangeForNumericType<Dst>(state_.value());
}
- // IsValid() is the public API to test if a CheckedNumeric is currently valid.
- bool IsValid() const { return state_.is_valid(); }
+ // AssignIfValid(Dst) - Assigns the underlying value if it is currently valid
+ // and is within the range supported by the destination type. Returns true if
+ // successful and false otherwise.
+ template <typename Dst>
+ constexpr bool AssignIfValid(Dst* result) const {
+ return IsValid<Dst>() ? ((*result = static_cast<Dst>(state_.value())), true)
+ : false;
+ }
- // ValueOrDie() The primary accessor for the underlying value. If the current
- // state is not valid it will CHECK and crash.
- T ValueOrDie() const {
- CHECK(IsValid());
- return state_.value();
+ // ValueOrDie() - The primary accessor for the underlying value. If the
+ // current state is not valid it will CHECK and crash.
+ // A range checked destination type can be supplied using the Dst template
+ // parameter, which will trigger a CHECK if the value is not in bounds for
+ // the destination.
+ // The CHECK behavior can be overridden by supplying a handler as a
+ // template parameter, for test code, etc. However, the handler cannot access
+ // the underlying value, and it is not available through other means.
+ template <typename Dst = T, class CheckHandler = CheckOnFailure>
+ constexpr StrictNumeric<Dst> ValueOrDie() const {
+ return IsValid<Dst>() ? static_cast<Dst>(state_.value())
+ : CheckHandler::template HandleFailure<Dst>();
}
- // ValueOrDefault(T default_value) A convenience method that returns the
+ // ValueOrDefault(T default_value) - A convenience method that returns the
// current value if the state is valid, and the supplied default_value for
// any other state.
- T ValueOrDefault(T default_value) const {
- return IsValid() ? state_.value() : default_value;
+ // A range checked destination type can be supplied using the Dst template
+ // parameter. WARNING: This function may fail to compile or CHECK at runtime
+ // if the supplied default_value is not within range of the destination type.
+ template <typename Dst = T, typename Src>
+ constexpr StrictNumeric<Dst> ValueOrDefault(const Src default_value) const {
+ return IsValid<Dst>() ? static_cast<Dst>(state_.value())
+ : checked_cast<Dst>(default_value);
}
- // ValueFloating() - Since floating point values include their validity state,
- // we provide an easy method for extracting them directly, without a risk of
- // crashing on a CHECK.
- T ValueFloating() const {
- static_assert(std::numeric_limits<T>::is_iec559, "Argument must be float.");
- return CheckedNumeric<T>::cast(*this).ValueUnsafe();
+ // Returns a checked numeric of the specified type, cast from the current
+ // CheckedNumeric. If the current state is invalid or the destination cannot
+ // represent the result then the returned CheckedNumeric will be invalid.
+ template <typename Dst>
+ constexpr CheckedNumeric<typename UnderlyingType<Dst>::type> Cast() const {
+ return *this;
}
- // ValueUnsafe() - DO NOT USE THIS IN EXTERNAL CODE - It is public right now
- // for tests and to avoid a big matrix of friend operator overloads. But the
- // values it returns are unintuitive and likely to change in the future.
- // Returns: the raw numeric value, regardless of the current state.
- T ValueUnsafe() const { return state_.value(); }
+ // This friend method is available solely for providing more detailed logging
+ // in the the tests. Do not implement it in production code, because the
+ // underlying values may change at any time.
+ template <typename U>
+ friend U GetNumericValueForTest(const CheckedNumeric<U>& src);
// Prototypes for the supported arithmetic operator overloads.
template <typename Src>
- CheckedNumeric& operator+=(Src rhs);
+ CheckedNumeric& operator+=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator-=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator*=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator-=(Src rhs);
+ CheckedNumeric& operator/=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator*=(Src rhs);
+ CheckedNumeric& operator%=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator/=(Src rhs);
+ CheckedNumeric& operator<<=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator%=(Src rhs);
+ CheckedNumeric& operator>>=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator<<=(Src rhs);
+ CheckedNumeric& operator&=(const Src rhs);
template <typename Src>
- CheckedNumeric& operator>>=(Src rhs);
-
- CheckedNumeric operator-() const {
- // Negation is always valid for floating point.
- T value = 0;
- bool is_valid = (std::numeric_limits<T>::is_iec559 || IsValid()) &&
- CheckedNeg(state_.value(), &value);
- return CheckedNumeric<T>(value, is_valid);
+ CheckedNumeric& operator|=(const Src rhs);
+ template <typename Src>
+ CheckedNumeric& operator^=(const Src rhs);
+
+ constexpr CheckedNumeric operator-() const {
+ return CheckedNumeric<T>(
+ NegateWrapper(state_.value()),
+ IsValid() &&
+ (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ NegateWrapper(state_.value()) !=
+ std::numeric_limits<T>::lowest()));
+ }
+
+ constexpr CheckedNumeric operator~() const {
+ return CheckedNumeric<decltype(InvertWrapper(T()))>(
+ InvertWrapper(state_.value()), IsValid());
+ }
+
+ constexpr CheckedNumeric Abs() const {
+ return CheckedNumeric<T>(
+ AbsWrapper(state_.value()),
+ IsValid() &&
+ (!std::is_signed<T>::value || std::is_floating_point<T>::value ||
+ AbsWrapper(state_.value()) != std::numeric_limits<T>::lowest()));
}
- CheckedNumeric Abs() const {
- // Absolute value is always valid for floating point.
- T value = 0;
- bool is_valid = (std::numeric_limits<T>::is_iec559 || IsValid()) &&
- CheckedAbs(state_.value(), &value);
- return CheckedNumeric<T>(value, is_valid);
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMaxOp, T, U>::type> Max(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMaxOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsGreater<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
+ }
+
+ template <typename U>
+ constexpr CheckedNumeric<typename MathWrapper<CheckedMinOp, T, U>::type> Min(
+ const U rhs) const {
+ using R = typename UnderlyingType<U>::type;
+ using result_type = typename MathWrapper<CheckedMinOp, T, U>::type;
+ // TODO(jschuh): This can be converted to the MathOp version and remain
+ // constexpr once we have C++14 support.
+ return CheckedNumeric<result_type>(
+ static_cast<result_type>(
+ IsLess<T, R>::Test(state_.value(), Wrapper<U>::value(rhs))
+ ? state_.value()
+ : Wrapper<U>::value(rhs)),
+ state_.is_valid() && Wrapper<U>::is_valid(rhs));
}
// This function is available only for integral types. It returns an unsigned
// integer of the same width as the source type, containing the absolute value
// of the source, and properly handling signed min.
- CheckedNumeric<typename UnsignedOrFloatForSize<T>::type> UnsignedAbs() const {
+ constexpr CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>
+ UnsignedAbs() const {
return CheckedNumeric<typename UnsignedOrFloatForSize<T>::type>(
SafeUnsignedAbs(state_.value()), state_.is_valid());
}
@@ -173,90 +287,221 @@ class CheckedNumeric {
return value;
}
- // These static methods behave like a convenience cast operator targeting
- // the desired CheckedNumeric type. As an optimization, a reference is
- // returned when Src is the same type as T.
+ // These perform the actual math operations on the CheckedNumerics.
+ // Binary arithmetic operations.
+ template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+ static CheckedNumeric MathOp(const L lhs, const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ T result = 0;
+ bool is_valid =
+ Wrapper<L>::is_valid(lhs) && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(Wrapper<L>::value(lhs), Wrapper<R>::value(rhs), &result);
+ return CheckedNumeric<T>(result, is_valid);
+ };
+
+ // Assignment arithmetic operations.
+ template <template <typename, typename, typename> class M, typename R>
+ CheckedNumeric& MathOp(const R rhs) {
+ using Math = typename MathWrapper<M, T, R>::math;
+ T result = 0; // Using T as the destination saves a range check.
+ bool is_valid = state_.is_valid() && Wrapper<R>::is_valid(rhs) &&
+ Math::Do(state_.value(), Wrapper<R>::value(rhs), &result);
+ *this = CheckedNumeric<T>(result, is_valid);
+ return *this;
+ };
+
+ private:
+ CheckedNumericState<T> state_;
+
template <typename Src>
- static CheckedNumeric<T> cast(
- Src u,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0) {
- return u;
- }
+ constexpr CheckedNumeric(Src value, bool is_valid)
+ : state_(value, is_valid) {}
+ // These wrappers allow us to handle state the same way for both
+ // CheckedNumeric and POD arithmetic types.
template <typename Src>
- static CheckedNumeric<T> cast(
- const CheckedNumeric<Src>& u,
- typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
- return u;
- }
+ struct Wrapper {
+ static constexpr bool is_valid(Src) { return true; }
+ static constexpr Src value(Src value) { return value; }
+ };
- static const CheckedNumeric<T>& cast(const CheckedNumeric<T>& u) { return u; }
+ template <typename Src>
+ struct Wrapper<CheckedNumeric<Src>> {
+ static constexpr bool is_valid(const CheckedNumeric<Src> v) {
+ return v.IsValid();
+ }
+ static constexpr Src value(const CheckedNumeric<Src> v) {
+ return v.state_.value();
+ }
+ };
- private:
- CheckedNumericState<T> state_;
+ template <typename Src>
+ struct Wrapper<StrictNumeric<Src>> {
+ static constexpr bool is_valid(const StrictNumeric<Src>) { return true; }
+ static constexpr Src value(const StrictNumeric<Src> v) {
+ return static_cast<Src>(v);
+ }
+ };
};
-// This is the boilerplate for the standard arithmetic operator overloads. A
-// macro isn't the prettiest solution, but it beats rewriting these five times.
-// Some details worth noting are:
-// * We apply the standard arithmetic promotions.
-// * We skip range checks for floating points.
-// * We skip range checks for destination integers with sufficient range.
-// TODO(jschuh): extract these out into templates.
-#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP, PROMOTION) \
- /* Binary arithmetic operator for CheckedNumerics of the same type. */ \
- template <typename L, typename R> \
- CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
- operator OP(const CheckedNumeric<L>& lhs, const CheckedNumeric<R>& rhs) { \
- using P = typename ArithmeticPromotion<PROMOTION, L, R>::type; \
- if (!rhs.IsValid() || !lhs.IsValid()) \
- return CheckedNumeric<P>(0, false); \
- /* Floating point always takes the fast path */ \
- if (std::is_floating_point<L>::value || std::is_floating_point<R>::value) \
- return CheckedNumeric<P>(lhs.ValueUnsafe() OP rhs.ValueUnsafe()); \
- P result = 0; \
- bool is_valid = \
- Checked##NAME(lhs.ValueUnsafe(), rhs.ValueUnsafe(), &result); \
- return CheckedNumeric<P>(result, is_valid); \
- } \
- /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
- template <typename L> \
- template <typename R> \
- CheckedNumeric<L>& CheckedNumeric<L>::operator COMPOUND_OP(R rhs) { \
- *this = *this OP rhs; \
- return *this; \
- } \
- /* Binary arithmetic operator for left CheckedNumeric and right numeric. */ \
- template <typename L, typename R, \
- typename std::enable_if<std::is_arithmetic<R>::value>::type* = \
- nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
- operator OP(const CheckedNumeric<L>& lhs, R rhs) { \
- return lhs OP CheckedNumeric<R>(rhs); \
- } \
- /* Binary arithmetic operator for left numeric and right CheckedNumeric. */ \
- template <typename L, typename R, \
- typename std::enable_if<std::is_arithmetic<L>::value>::type* = \
- nullptr> \
- CheckedNumeric<typename ArithmeticPromotion<PROMOTION, L, R>::type> \
- operator OP(L lhs, const CheckedNumeric<R>& rhs) { \
- return CheckedNumeric<L>(lhs) OP rhs; \
- }
+// Convenience functions to avoid the ugly template disambiguator syntax.
+template <typename Dst, typename Src>
+constexpr bool IsValidForType(const CheckedNumeric<Src> value) {
+ return value.template IsValid<Dst>();
+}
+
+template <typename Dst, typename Src>
+constexpr StrictNumeric<Dst> ValueOrDieForType(
+ const CheckedNumeric<Src> value) {
+ return value.template ValueOrDie<Dst>();
+}
+
+template <typename Dst, typename Src, typename Default>
+constexpr StrictNumeric<Dst> ValueOrDefaultForType(
+ const CheckedNumeric<Src> value,
+ const Default default_value) {
+ return value.template ValueOrDefault<Dst>(default_value);
+}
+
+// These variadic templates work out the return types.
+// TODO(jschuh): Rip all this out once we have C++14 non-trailing auto support.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType;
+
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct ResultType<M, L, R> {
+ using type = typename MathWrapper<M, L, R>::type;
+};
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=, MAX_EXPONENT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=, MAX_EXPONENT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=, MAX_EXPONENT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=, MAX_EXPONENT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=, MAX_EXPONENT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(LeftShift, <<, <<=, LEFT_PROMOTION)
-BASE_NUMERIC_ARITHMETIC_OPERATORS(RightShift, >>, >>=, LEFT_PROMOTION)
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+struct ResultType {
+ using type =
+ typename ResultType<M, typename ResultType<M, L, R>::type, Args...>::type;
+};
+// Convience wrapper to return a new CheckedNumeric from the provided arithmetic
+// or CheckedNumericType.
+template <typename T>
+constexpr CheckedNumeric<typename UnderlyingType<T>::type> MakeCheckedNum(
+ const T value) {
+ return value;
+}
+
+// These implement the variadic wrapper for the math operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+CheckedNumeric<typename MathWrapper<M, L, R>::type> ChkMathOp(const L lhs,
+ const R rhs) {
+ using Math = typename MathWrapper<M, L, R>::math;
+ return CheckedNumeric<typename Math::result_type>::template MathOp<M>(lhs,
+ rhs);
+}
+
+// General purpose wrapper template for arithmetic operations.
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R,
+ typename... Args>
+CheckedNumeric<typename ResultType<M, L, R, Args...>::type>
+ChkMathOp(const L lhs, const R rhs, const Args... args) {
+ auto tmp = ChkMathOp<M>(lhs, rhs);
+ return tmp.IsValid() ? ChkMathOp<M>(tmp, args...)
+ : decltype(ChkMathOp<M>(tmp, args...))(tmp);
+};
+
+// The following macros are just boilerplate for the standard arithmetic
+// operator overloads and variadic function templates. A macro isn't the nicest
+// solution, but it beats rewriting these over and over again.
+#define BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME) \
+ template <typename L, typename R, typename... Args> \
+ CheckedNumeric<typename ResultType<Checked##NAME##Op, L, R, Args...>::type> \
+ Check##NAME(const L lhs, const R rhs, const Args... args) { \
+ return ChkMathOp<Checked##NAME##Op, L, R, Args...>(lhs, rhs, args...); \
+ }
+
+#define BASE_NUMERIC_ARITHMETIC_OPERATORS(NAME, OP, COMPOUND_OP) \
+ /* Binary arithmetic operator for all CheckedNumeric operations. */ \
+ template <typename L, typename R, \
+ typename std::enable_if<IsCheckedOp<L, R>::value>::type* = \
+ nullptr> \
+ CheckedNumeric<typename MathWrapper<Checked##NAME##Op, L, R>::type> \
+ operator OP(const L lhs, const R rhs) { \
+ return decltype(lhs OP rhs)::template MathOp<Checked##NAME##Op>(lhs, rhs); \
+ } \
+ /* Assignment arithmetic operator implementation from CheckedNumeric. */ \
+ template <typename L> \
+ template <typename R> \
+ CheckedNumeric<L>& CheckedNumeric<L>::operator COMPOUND_OP(const R rhs) { \
+ return MathOp<Checked##NAME##Op>(rhs); \
+ } \
+ /* Variadic arithmetic functions that return CheckedNumeric. */ \
+ BASE_NUMERIC_ARITHMETIC_VARIADIC(NAME)
+
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Add, +, +=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Sub, -, -=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mul, *, *=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Div, /, /=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Mod, %, %=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Lsh, <<, <<=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Rsh, >>, >>=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(And, &, &=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Or, |, |=)
+BASE_NUMERIC_ARITHMETIC_OPERATORS(Xor, ^, ^=)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Max)
+BASE_NUMERIC_ARITHMETIC_VARIADIC(Min)
+
+#undef BASE_NUMERIC_ARITHMETIC_VARIADIC
#undef BASE_NUMERIC_ARITHMETIC_OPERATORS
+// These are some extra StrictNumeric operators to support simple pointer
+// arithmetic with our result types. Since wrapping on a pointer is always
+// bad, we trigger the CHECK condition here.
+template <typename L, typename R>
+L* operator+(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckAdd(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
+template <typename L, typename R>
+L* operator-(L* lhs, const StrictNumeric<R> rhs) {
+ uintptr_t result = CheckSub(reinterpret_cast<uintptr_t>(lhs),
+ CheckMul(sizeof(L), static_cast<R>(rhs)))
+ .template ValueOrDie<uintptr_t>();
+ return reinterpret_cast<L*>(result);
+}
+
} // namespace internal
using internal::CheckedNumeric;
+using internal::IsValidForType;
+using internal::ValueOrDieForType;
+using internal::ValueOrDefaultForType;
+using internal::MakeCheckedNum;
+using internal::CheckMax;
+using internal::CheckMin;
+using internal::CheckAdd;
+using internal::CheckSub;
+using internal::CheckMul;
+using internal::CheckDiv;
+using internal::CheckMod;
+using internal::CheckLsh;
+using internal::CheckRsh;
+using internal::CheckAnd;
+using internal::CheckOr;
+using internal::CheckXor;
} // namespace base
diff --git a/chromium/base/numerics/safe_math_impl.h b/chromium/base/numerics/safe_math_impl.h
index 71fc278b297..a224f692dd5 100644
--- a/chromium/base/numerics/safe_math_impl.h
+++ b/chromium/base/numerics/safe_math_impl.h
@@ -23,444 +23,247 @@ namespace internal {
// but it may not be fast. This code could be split based on
// platform/architecture and replaced with potentially faster implementations.
-// Integer promotion templates used by the portable checked integer arithmetic.
-template <size_t Size, bool IsSigned>
-struct IntegerForSizeAndSign;
-template <>
-struct IntegerForSizeAndSign<1, true> {
- typedef int8_t type;
-};
-template <>
-struct IntegerForSizeAndSign<1, false> {
- typedef uint8_t type;
-};
-template <>
-struct IntegerForSizeAndSign<2, true> {
- typedef int16_t type;
-};
-template <>
-struct IntegerForSizeAndSign<2, false> {
- typedef uint16_t type;
-};
-template <>
-struct IntegerForSizeAndSign<4, true> {
- typedef int32_t type;
-};
-template <>
-struct IntegerForSizeAndSign<4, false> {
- typedef uint32_t type;
-};
-template <>
-struct IntegerForSizeAndSign<8, true> {
- typedef int64_t type;
-};
-template <>
-struct IntegerForSizeAndSign<8, false> {
- typedef uint64_t type;
-};
-
-// WARNING: We have no IntegerForSizeAndSign<16, *>. If we ever add one to
-// support 128-bit math, then the ArithmeticPromotion template below will need
-// to be updated (or more likely replaced with a decltype expression).
-
-template <typename Integer>
-struct UnsignedIntegerForSize {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<sizeof(Integer), false>::type>::type type;
-};
-
-template <typename Integer>
-struct SignedIntegerForSize {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<sizeof(Integer), true>::type>::type type;
-};
-
-template <typename Integer>
-struct TwiceWiderInteger {
- typedef typename std::enable_if<
- std::numeric_limits<Integer>::is_integer,
- typename IntegerForSizeAndSign<
- sizeof(Integer) * 2,
- std::numeric_limits<Integer>::is_signed>::type>::type type;
-};
-
-template <typename Integer>
-struct PositionOfSignBit {
- static const typename std::enable_if<std::numeric_limits<Integer>::is_integer,
- size_t>::type value =
- CHAR_BIT * sizeof(Integer) - 1;
-};
-
// This is used for UnsignedAbs, where we need to support floating-point
// template instantiations even though we don't actually support the operations.
// However, there is no corresponding implementation of e.g. SafeUnsignedAbs,
// so the float versions will not compile.
template <typename Numeric,
- bool IsInteger = std::numeric_limits<Numeric>::is_integer,
- bool IsFloat = std::numeric_limits<Numeric>::is_iec559>
+ bool IsInteger = std::is_integral<Numeric>::value,
+ bool IsFloat = std::is_floating_point<Numeric>::value>
struct UnsignedOrFloatForSize;
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, true, false> {
- typedef typename UnsignedIntegerForSize<Numeric>::type type;
+ using type = typename std::make_unsigned<Numeric>::type;
};
template <typename Numeric>
struct UnsignedOrFloatForSize<Numeric, false, true> {
- typedef Numeric type;
-};
-
-// Helper templates for integer manipulations.
-
-template <typename T>
-constexpr bool HasSignBit(T x) {
- // Cast to unsigned since right shift on signed is undefined.
- return !!(static_cast<typename UnsignedIntegerForSize<T>::type>(x) >>
- PositionOfSignBit<T>::value);
-}
-
-// This wrapper undoes the standard integer promotions.
-template <typename T>
-constexpr T BinaryComplement(T x) {
- return static_cast<T>(~x);
-}
-
-// Return if a numeric value is negative regardless of type.
-template <typename T,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsNegative(T x) {
- return x < 0;
-}
-
-template <typename T,
- typename std::enable_if<std::is_arithmetic<T>::value &&
- !std::is_signed<T>::value>::type* = nullptr>
-constexpr bool IsNegative(T x) {
- return false;
-}
-
-enum ArithmeticPromotionCategory {
- LEFT_PROMOTION, // Use the type of the left-hand argument.
- RIGHT_PROMOTION, // Use the type of the right-hand argument.
- MAX_EXPONENT_PROMOTION, // Use the type supporting the largest exponent.
- BIG_ENOUGH_PROMOTION // Attempt to find a big enough type.
-};
-
-template <ArithmeticPromotionCategory Promotion,
- typename Lhs,
- typename Rhs = Lhs>
-struct ArithmeticPromotion;
-
-template <typename Lhs,
- typename Rhs,
- ArithmeticPromotionCategory Promotion =
- (MaxExponent<Lhs>::value > MaxExponent<Rhs>::value)
- ? LEFT_PROMOTION
- : RIGHT_PROMOTION>
-struct MaxExponentPromotion;
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, LEFT_PROMOTION> {
- using type = Lhs;
-};
-
-template <typename Lhs, typename Rhs>
-struct MaxExponentPromotion<Lhs, Rhs, RIGHT_PROMOTION> {
- using type = Rhs;
-};
-
-template <typename Lhs,
- typename Rhs = Lhs,
- bool is_intmax_type =
- std::is_integral<
- typename MaxExponentPromotion<Lhs, Rhs>::type>::value &&
- sizeof(typename MaxExponentPromotion<Lhs, Rhs>::type) ==
- sizeof(intmax_t),
- bool is_max_exponent =
- StaticDstRangeRelationToSrcRange<
- typename MaxExponentPromotion<Lhs, Rhs>::type,
- Lhs>::value ==
- NUMERIC_RANGE_CONTAINED&& StaticDstRangeRelationToSrcRange<
- typename MaxExponentPromotion<Lhs, Rhs>::type,
- Rhs>::value == NUMERIC_RANGE_CONTAINED>
-struct BigEnoughPromotion;
-
-// The side with the max exponent is big enough.
-template <typename Lhs, typename Rhs, bool is_intmax_type>
-struct BigEnoughPromotion<Lhs, Rhs, is_intmax_type, true> {
- using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
- static const bool is_contained = true;
-};
-
-// We can use a twice wider type to fit.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, false, false> {
- using type = typename IntegerForSizeAndSign<
- sizeof(typename MaxExponentPromotion<Lhs, Rhs>::type) * 2,
- std::is_signed<Lhs>::value || std::is_signed<Rhs>::value>::type;
- static const bool is_contained = true;
+ using type = Numeric;
};
-// No type is large enough.
-template <typename Lhs, typename Rhs>
-struct BigEnoughPromotion<Lhs, Rhs, true, false> {
- using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
- static const bool is_contained = false;
-};
-
-// These are the four supported promotion types.
-
-// Use the type of the left-hand argument.
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<LEFT_PROMOTION, Lhs, Rhs> {
- using type = Lhs;
- static const bool is_contained = true;
-};
-
-// Use the type of the right-hand argument.
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<RIGHT_PROMOTION, Lhs, Rhs> {
- using type = Rhs;
- static const bool is_contained = true;
-};
-
-// Use the type supporting the largest exponent.
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<MAX_EXPONENT_PROMOTION, Lhs, Rhs> {
- using type = typename MaxExponentPromotion<Lhs, Rhs>::type;
- static const bool is_contained = true;
-};
-
-// Attempt to find a big enough type.
-template <typename Lhs, typename Rhs>
-struct ArithmeticPromotion<BIG_ENOUGH_PROMOTION, Lhs, Rhs> {
- using type = typename BigEnoughPromotion<Lhs, Rhs>::type;
- static const bool is_contained = BigEnoughPromotion<Lhs, Rhs>::is_contained;
-};
-
-// We can statically check if operations on the provided types can wrap, so we
-// can skip the checked operations if they're not needed. So, for an integer we
-// care if the destination type preserves the sign and is twice the width of
-// the source.
-template <typename T, typename Lhs, typename Rhs>
-struct IsIntegerArithmeticSafe {
- static const bool value = !std::numeric_limits<T>::is_iec559 &&
- StaticDstRangeRelationToSrcRange<T, Lhs>::value ==
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Lhs)) &&
- StaticDstRangeRelationToSrcRange<T, Rhs>::value !=
- NUMERIC_RANGE_CONTAINED &&
- sizeof(T) >= (2 * sizeof(Rhs));
-};
-
-// Here are the actual portable checked integer math implementations.
-// TODO(jschuh): Break this code out from the enable_if pattern and find a clean
-// way to coalesce things into the CheckedNumericState specializations below.
+// Probe for builtin math overflow support on Clang and version check on GCC.
+#if defined(__has_builtin)
+#define USE_OVERFLOW_BUILTINS (__has_builtin(__builtin_add_overflow))
+#elif defined(__GNUC__)
+#define USE_OVERFLOW_BUILTINS (__GNUC__ >= 5)
+#else
+#define USE_OVERFLOW_BUILTINS (0)
+#endif
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
-CheckedAddImpl(T x, T y, T* result) {
+bool CheckedAddImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
- typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux + uy);
*result = static_cast<T>(uresult);
// Addition is valid if the sign of (x + y) is equal to either that of x or
// that of y.
- return (std::numeric_limits<T>::is_signed)
- ? HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (uresult ^ uy))))
- : (BinaryComplement(x) >=
- y); // Unsigned is either valid or underflow.
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (uresult ^ uy)) >= 0
+ : uresult >= uy; // Unsigned is either valid or underflow.
}
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedAdd(T x, U y, V* result) {
- using Promotion =
- typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
- Promotion presult;
- // Fail if either operand is out of range for the promoted type.
- // TODO(jschuh): This could be made to work for a broader range of values.
- bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
- IsValueInRangeForNumericType<Promotion>(y);
-
- if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
- presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
- } else {
- is_valid &= CheckedAddImpl(static_cast<Promotion>(x),
- static_cast<Promotion>(y), &presult);
+template <typename T, typename U, class Enable = void>
+struct CheckedAddOp {};
+
+template <typename T, typename U>
+struct CheckedAddOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+ return !__builtin_add_overflow(x, y, result);
+#else
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) + static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedAddImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+#endif
}
- *result = static_cast<V>(presult);
- return is_valid && IsValueInRangeForNumericType<V>(presult);
-}
+};
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
-CheckedSubImpl(T x, T y, T* result) {
+bool CheckedSubImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
// Since the value of x+y is undefined if we have a signed type, we compute
// it using the unsigned type of the same size.
- typedef typename UnsignedIntegerForSize<T>::type UnsignedDst;
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
UnsignedDst ux = static_cast<UnsignedDst>(x);
UnsignedDst uy = static_cast<UnsignedDst>(y);
UnsignedDst uresult = static_cast<UnsignedDst>(ux - uy);
*result = static_cast<T>(uresult);
// Subtraction is valid if either x and y have same sign, or (x-y) and x have
// the same sign.
- return (std::numeric_limits<T>::is_signed)
- ? HasSignBit(BinaryComplement(
- static_cast<UnsignedDst>((uresult ^ ux) & (ux ^ uy))))
- : (x >= y);
+ return (std::is_signed<T>::value)
+ ? static_cast<SignedDst>((uresult ^ ux) & (ux ^ uy)) >= 0
+ : x >= y;
}
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedSub(T x, U y, V* result) {
- using Promotion =
- typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
- Promotion presult;
- // Fail if either operand is out of range for the promoted type.
- // TODO(jschuh): This could be made to work for a broader range of values.
- bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
- IsValueInRangeForNumericType<Promotion>(y);
-
- if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
- presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
- } else {
- is_valid &= CheckedSubImpl(static_cast<Promotion>(x),
- static_cast<Promotion>(y), &presult);
+template <typename T, typename U, class Enable = void>
+struct CheckedSubOp {};
+
+template <typename T, typename U>
+struct CheckedSubOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+ return !__builtin_sub_overflow(x, y, result);
+#else
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) - static_cast<Promotion>(y);
+ } else {
+ is_valid &= CheckedSubImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ }
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+#endif
}
- *result = static_cast<V>(presult);
- return is_valid && IsValueInRangeForNumericType<V>(presult);
-}
+};
-// Integer multiplication is a bit complicated. In the fast case we just
-// we just promote to a twice wider type, and range check the result. In the
-// slow case we need to manually check that the result won't be truncated by
-// checking with division against the appropriate bound.
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- sizeof(T) * 2 <= sizeof(uintmax_t),
- bool>::type
-CheckedMulImpl(T x, T y, T* result) {
- typedef typename TwiceWiderInteger<T>::type IntermediateType;
- IntermediateType tmp =
- static_cast<IntermediateType>(x) * static_cast<IntermediateType>(y);
- *result = static_cast<T>(tmp);
- return DstRangeRelationToSrcRange<T>(tmp) == RANGE_VALID;
+bool CheckedMulImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ // Since the value of x*y is potentially undefined if we have a signed type,
+ // we compute it using the unsigned type of the same size.
+ using UnsignedDst = typename std::make_unsigned<T>::type;
+ using SignedDst = typename std::make_signed<T>::type;
+ const UnsignedDst ux = SafeUnsignedAbs(x);
+ const UnsignedDst uy = SafeUnsignedAbs(y);
+ UnsignedDst uresult = static_cast<UnsignedDst>(ux * uy);
+ const bool is_negative =
+ std::is_signed<T>::value && static_cast<SignedDst>(x ^ y) < 0;
+ *result = is_negative ? 0 - uresult : uresult;
+ // We have a fast out for unsigned identity or zero on the second operand.
+ // After that it's an unsigned overflow check on the absolute value, with
+ // a +1 bound for a negative result.
+ return uy <= UnsignedDst(!std::is_signed<T>::value || is_negative) ||
+ ux <= (std::numeric_limits<T>::max() + UnsignedDst(is_negative)) / uy;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed &&
- (sizeof(T) * 2 > sizeof(uintmax_t)),
- bool>::type
-CheckedMulImpl(T x, T y, T* result) {
- if (x && y) {
- if (x > 0) {
- if (y > 0) {
- if (x > std::numeric_limits<T>::max() / y)
- return false;
- } else {
- if (y < std::numeric_limits<T>::min() / x)
- return false;
- }
+template <typename T, typename U, class Enable = void>
+struct CheckedMulOp {};
+
+template <typename T, typename U>
+struct CheckedMulOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+#if USE_OVERFLOW_BUILTINS
+#if defined(__clang__)
+ // TODO(jschuh): Get the Clang runtime library issues sorted out so we can
+ // support full-width, mixed-sign multiply builtins.
+ // https://crbug.com/613003
+ static const bool kUseMaxInt =
+ // Narrower type than uintptr_t is always safe.
+ std::numeric_limits<__typeof__(x * y)>::digits <
+ std::numeric_limits<intptr_t>::digits ||
+ // Safe for intptr_t and uintptr_t if the sign matches.
+ (IntegerBitsPlusSign<__typeof__(x * y)>::value ==
+ IntegerBitsPlusSign<intptr_t>::value &&
+ std::is_signed<T>::value == std::is_signed<U>::value);
+#else
+ static const bool kUseMaxInt = true;
+#endif
+ if (kUseMaxInt)
+ return !__builtin_mul_overflow(x, y, result);
+#endif
+ using Promotion = typename FastIntegerArithmeticPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+
+ if (IsIntegerArithmeticSafe<Promotion, T, U>::value) {
+ presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
} else {
- if (y > 0) {
- if (x < std::numeric_limits<T>::min() / y)
- return false;
- } else {
- if (y < std::numeric_limits<T>::max() / x)
- return false;
- }
+ is_valid &= CheckedMulImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
}
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
- *result = x * y;
- return true;
-}
-
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed &&
- (sizeof(T) * 2 > sizeof(uintmax_t)),
- bool>::type
-CheckedMulImpl(T x, T y, T* result) {
- *result = x * y;
- return (y == 0 || x <= std::numeric_limits<T>::max() / y);
-}
+};
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedMul(T x, U y, V* result) {
- using Promotion =
- typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
- Promotion presult;
- // Fail if either operand is out of range for the promoted type.
- // TODO(jschuh): This could be made to work for a broader range of values.
- bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
- IsValueInRangeForNumericType<Promotion>(y);
-
- if (IsIntegerArithmeticSafe<Promotion, U, V>::value) {
- presult = static_cast<Promotion>(x) * static_cast<Promotion>(y);
- } else {
- is_valid &= CheckedMulImpl(static_cast<Promotion>(x),
- static_cast<Promotion>(y), &presult);
- }
- *result = static_cast<V>(presult);
- return is_valid && IsValueInRangeForNumericType<V>(presult);
-}
+// Avoid poluting the namespace once we're done with the macro.
+#undef USE_OVERFLOW_BUILTINS
// Division just requires a check for a zero denominator or an invalid negation
// on signed min/-1.
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer, bool>::type
-CheckedDivImpl(T x, T y, T* result) {
- if (y && (!std::numeric_limits<T>::is_signed ||
- x != std::numeric_limits<T>::min() || y != static_cast<T>(-1))) {
+bool CheckedDivImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
+ if (y && (!std::is_signed<T>::value ||
+ x != std::numeric_limits<T>::lowest() || y != static_cast<T>(-1))) {
*result = x / y;
return true;
}
return false;
}
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedDiv(T x, U y, V* result) {
- using Promotion =
- typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
- Promotion presult;
- // Fail if either operand is out of range for the promoted type.
- // TODO(jschuh): This could be made to work for a broader range of values.
- bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
- IsValueInRangeForNumericType<Promotion>(y);
- is_valid &= CheckedDivImpl(static_cast<Promotion>(x),
- static_cast<Promotion>(y), &presult);
- *result = static_cast<V>(presult);
- return is_valid && IsValueInRangeForNumericType<V>(presult);
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedDivOp {};
+
+template <typename T, typename U>
+struct CheckedDivOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ // Fail if either operand is out of range for the promoted type.
+ // TODO(jschuh): This could be made to work for a broader range of values.
+ bool is_valid = IsValueInRangeForNumericType<Promotion>(x) &&
+ IsValueInRangeForNumericType<Promotion>(y);
+ is_valid &= CheckedDivImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
+ }
+};
template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedModImpl(T x, T y, T* result) {
+bool CheckedModImpl(T x, T y, T* result) {
+ static_assert(std::is_integral<T>::value, "Type must be integral");
if (y > 0) {
*result = static_cast<T>(x % y);
return true;
@@ -468,189 +271,238 @@ CheckedModImpl(T x, T y, T* result) {
return false;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedModImpl(T x, T y, T* result) {
- if (y != 0) {
- *result = static_cast<T>(x % y);
- return true;
+template <typename T, typename U, class Enable = void>
+struct CheckedModOp {};
+
+template <typename T, typename U>
+struct CheckedModOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V>
+ static bool Do(T x, U y, V* result) {
+ using Promotion = typename BigEnoughPromotion<T, U>::type;
+ Promotion presult;
+ bool is_valid = CheckedModImpl(static_cast<Promotion>(x),
+ static_cast<Promotion>(y), &presult);
+ *result = static_cast<V>(presult);
+ return is_valid && IsValueInRangeForNumericType<V>(presult);
}
- return false;
-}
+};
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedMod(T x, U y, V* result) {
- using Promotion =
- typename ArithmeticPromotion<BIG_ENOUGH_PROMOTION, T, U>::type;
- Promotion presult;
- bool is_valid = CheckedModImpl(static_cast<Promotion>(x),
- static_cast<Promotion>(y), &presult);
- *result = static_cast<V>(presult);
- return is_valid && IsValueInRangeForNumericType<V>(presult);
-}
+template <typename T, typename U, class Enable = void>
+struct CheckedLshOp {};
// Left shift. Shifts less than 0 or greater than or equal to the number
// of bits in the promoted type are undefined. Shifts of negative values
// are undefined. Otherwise it is defined when the result fits.
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedLeftShift(T x, U shift, V* result) {
- using ShiftType = typename UnsignedIntegerForSize<T>::type;
- static const ShiftType kBitWidth = CHAR_BIT * sizeof(T);
- const ShiftType real_shift = static_cast<ShiftType>(shift);
- // Signed shift is not legal on negative values.
- if (!IsNegative(x) && real_shift < kBitWidth) {
- // Just use a multiplication because it's easy.
- // TODO(jschuh): This could probably be made more efficient.
- if (!std::is_signed<T>::value || real_shift != kBitWidth - 1)
- return CheckedMul(x, static_cast<T>(1) << shift, result);
- return !x; // Special case zero for a full width signed shift.
+template <typename T, typename U>
+struct CheckedLshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V>
+ static bool Do(T x, U shift, V* result) {
+ using ShiftType = typename std::make_unsigned<T>::type;
+ static const ShiftType kBitWidth = IntegerBitsPlusSign<T>::value;
+ const ShiftType real_shift = static_cast<ShiftType>(shift);
+ // Signed shift is not legal on negative values.
+ if (!IsValueNegative(x) && real_shift < kBitWidth) {
+ // Just use a multiplication because it's easy.
+ // TODO(jschuh): This could probably be made more efficient.
+ if (!std::is_signed<T>::value || real_shift != kBitWidth - 1)
+ return CheckedMulOp<T, T>::Do(x, static_cast<T>(1) << shift, result);
+ return !x; // Special case zero for a full width signed shift.
+ }
+ return false;
}
- return false;
-}
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedRshOp {};
// Right shift. Shifts less than 0 or greater than or equal to the number
// of bits in the promoted type are undefined. Otherwise, it is always defined,
// but a right shift of a negative value is implementation-dependent.
-template <typename T, typename U, typename V>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<U>::is_integer &&
- std::numeric_limits<V>::is_integer,
- bool>::type
-CheckedRightShift(T x, U shift, V* result) {
- // Use the type conversion push negative values out of range.
- using ShiftType = typename UnsignedIntegerForSize<T>::type;
- if (static_cast<ShiftType>(shift) < (CHAR_BIT * sizeof(T))) {
- T tmp = x >> shift;
+template <typename T, typename U>
+struct CheckedRshOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = T;
+ template <typename V = result_type>
+ static bool Do(T x, U shift, V* result) {
+ // Use the type conversion push negative values out of range.
+ using ShiftType = typename std::make_unsigned<T>::type;
+ if (static_cast<ShiftType>(shift) < IntegerBitsPlusSign<T>::value) {
+ T tmp = x >> shift;
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
+ }
+ return false;
+ }
+};
+
+template <typename T, typename U, class Enable = void>
+struct CheckedAndOp {};
+
+// For simplicity we support only unsigned integer results.
+template <typename T, typename U>
+struct CheckedAndOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) & static_cast<result_type>(y);
*result = static_cast<V>(tmp);
- return IsValueInRangeForNumericType<unsigned>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
}
- return false;
-}
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedNeg(T value, T* result) {
- // The negation of signed min is min, so catch that one.
- if (value != std::numeric_limits<T>::min()) {
- *result = static_cast<T>(-value);
- return true;
+template <typename T, typename U, class Enable = void>
+struct CheckedOrOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedOrOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) | static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
}
- return false;
-}
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedNeg(T value, T* result) {
- if (!value) { // The only legal unsigned negation is zero.
- *result = static_cast<T>(0);
- return true;
+template <typename T, typename U, class Enable = void>
+struct CheckedXorOp {};
+
+// For simplicity we support only unsigned integers.
+template <typename T, typename U>
+struct CheckedXorOp<T,
+ U,
+ typename std::enable_if<std::is_integral<T>::value &&
+ std::is_integral<U>::value>::type> {
+ using result_type = typename std::make_unsigned<
+ typename MaxExponentPromotion<T, U>::type>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ result_type tmp = static_cast<result_type>(x) ^ static_cast<result_type>(y);
+ *result = static_cast<V>(tmp);
+ return IsValueInRangeForNumericType<V>(tmp);
}
- return false;
-}
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedAbs(T value, T* result) {
- if (value != std::numeric_limits<T>::min()) {
- *result = std::abs(value);
+// Max doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMaxOp {};
+
+template <typename T, typename U>
+struct CheckedMaxOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename MaxExponentPromotion<T, U>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ *result = IsGreater<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
return true;
}
- return false;
-}
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- bool>::type
-CheckedAbs(T value, T* result) {
- // T is unsigned, so |value| must already be positive.
- *result = value;
- return true;
-}
+// Min doesn't really need to be implemented this way because it can't fail,
+// but it makes the code much cleaner to use the MathOp wrappers.
+template <typename T, typename U, class Enable = void>
+struct CheckedMinOp {};
+
+template <typename T, typename U>
+struct CheckedMinOp<
+ T,
+ U,
+ typename std::enable_if<std::is_arithmetic<T>::value &&
+ std::is_arithmetic<U>::value>::type> {
+ using result_type = typename LowestValuePromotion<T, U>::type;
+ template <typename V = result_type>
+ static bool Do(T x, U y, V* result) {
+ *result = IsLess<T, U>::Test(x, y) ? static_cast<result_type>(x)
+ : static_cast<result_type>(y);
+ return true;
+ }
+};
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- std::numeric_limits<T>::is_signed,
- typename UnsignedIntegerForSize<T>::type>::type
-SafeUnsignedAbs(T value) {
- typedef typename UnsignedIntegerForSize<T>::type UnsignedT;
- return value == std::numeric_limits<T>::min()
- ? static_cast<UnsignedT>(std::numeric_limits<T>::max()) + 1
- : static_cast<UnsignedT>(std::abs(value));
-}
+// This is just boilerplate that wraps the standard floating point arithmetic.
+// A macro isn't the nicest solution, but it beats rewriting these repeatedly.
+#define BASE_FLOAT_ARITHMETIC_OPS(NAME, OP) \
+ template <typename T, typename U> \
+ struct Checked##NAME##Op< \
+ T, U, typename std::enable_if<std::is_floating_point<T>::value || \
+ std::is_floating_point<U>::value>::type> { \
+ using result_type = typename MaxExponentPromotion<T, U>::type; \
+ template <typename V> \
+ static bool Do(T x, U y, V* result) { \
+ using Promotion = typename MaxExponentPromotion<T, U>::type; \
+ Promotion presult = x OP y; \
+ *result = static_cast<V>(presult); \
+ return IsValueInRangeForNumericType<V>(presult); \
+ } \
+ };
+
+BASE_FLOAT_ARITHMETIC_OPS(Add, +)
+BASE_FLOAT_ARITHMETIC_OPS(Sub, -)
+BASE_FLOAT_ARITHMETIC_OPS(Mul, *)
+BASE_FLOAT_ARITHMETIC_OPS(Div, /)
+
+#undef BASE_FLOAT_ARITHMETIC_OPS
+
+// Wrap the unary operations to allow SFINAE when instantiating integrals versus
+// floating points. These don't perform any overflow checking. Rather, they
+// exhibit well-defined overflow semantics and rely on the caller to detect
+// if an overflow occured.
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_integer &&
- !std::numeric_limits<T>::is_signed,
- T>::type
-SafeUnsignedAbs(T value) {
- // T is unsigned, so |value| must already be positive.
- return static_cast<T>(value);
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ using UnsignedT = typename std::make_unsigned<T>::type;
+ // This will compile to a NEG on Intel, and is normal negation on ARM.
+ return static_cast<T>(UnsignedT(0) - static_cast<UnsignedT>(value));
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedNeg(
- T value,
- bool*) {
- NOTREACHED();
- return static_cast<T>(-value);
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T NegateWrapper(T value) {
+ return -value;
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, T>::type CheckedAbs(
- T value,
- bool*) {
- NOTREACHED();
- return static_cast<T>(std::abs(value));
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr typename std::make_unsigned<T>::type InvertWrapper(T value) {
+ return ~value;
}
-// These are the floating point stubs that the compiler needs to see.
-#define BASE_FLOAT_ARITHMETIC_STUBS(NAME) \
- template <typename T, typename U, typename V> \
- typename std::enable_if<std::numeric_limits<T>::is_iec559 || \
- std::numeric_limits<U>::is_iec559 || \
- std::numeric_limits<V>::is_iec559, \
- bool>::type Checked##NAME(T, U, V*) { \
- NOTREACHED(); \
- return static_cast<T>(false); \
- }
-
-BASE_FLOAT_ARITHMETIC_STUBS(Add)
-BASE_FLOAT_ARITHMETIC_STUBS(Sub)
-BASE_FLOAT_ARITHMETIC_STUBS(Mul)
-BASE_FLOAT_ARITHMETIC_STUBS(Div)
-BASE_FLOAT_ARITHMETIC_STUBS(Mod)
-
-#undef BASE_FLOAT_ARITHMETIC_STUBS
-
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, bool>::type
-CheckedNeg(T value, T* result) {
- *result = static_cast<T>(-value);
- return true;
+template <typename T,
+ typename std::enable_if<std::is_integral<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return static_cast<T>(SafeUnsignedAbs(value));
}
-template <typename T>
-typename std::enable_if<std::numeric_limits<T>::is_iec559, bool>::type
-CheckedAbs(T value, T* result) {
- *result = static_cast<T>(std::abs(value));
- return true;
+template <
+ typename T,
+ typename std::enable_if<std::is_floating_point<T>::value>::type* = nullptr>
+constexpr T AbsWrapper(T value) {
+ return value < 0 ? -value : value;
}
// Floats carry around their validity state with them, but integers do not. So,
@@ -665,10 +517,10 @@ enum NumericRepresentation {
template <typename NumericType>
struct GetNumericRepresentation {
static const NumericRepresentation value =
- std::numeric_limits<NumericType>::is_integer
+ std::is_integral<NumericType>::value
? NUMERIC_INTEGER
- : (std::numeric_limits<NumericType>::is_iec559 ? NUMERIC_FLOATING
- : NUMERIC_UNKNOWN);
+ : (std::is_floating_point<NumericType>::value ? NUMERIC_FLOATING
+ : NUMERIC_UNKNOWN);
};
template <typename T, NumericRepresentation type =
@@ -679,39 +531,48 @@ class CheckedNumericState {};
template <typename T>
class CheckedNumericState<T, NUMERIC_INTEGER> {
private:
- T value_;
+ // is_valid_ precedes value_ because member intializers in the constructors
+ // are evaluated in field order, and is_valid_ must be read when initializing
+ // value_.
bool is_valid_;
+ T value_;
+
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrZero(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (std::is_integral<SrcType>::value || is_valid)
+ ? static_cast<T>(value)
+ : static_cast<T>(0);
+ }
public:
template <typename Src, NumericRepresentation type>
friend class CheckedNumericState;
- CheckedNumericState() : value_(0), is_valid_(true) {}
+ constexpr CheckedNumericState() : is_valid_(true), value_(0) {}
template <typename Src>
- CheckedNumericState(Src value, bool is_valid)
- : value_(static_cast<T>(value)),
- is_valid_(is_valid &&
- (DstRangeRelationToSrcRange<T>(value) == RANGE_VALID)) {
- static_assert(std::numeric_limits<Src>::is_specialized,
- "Argument must be numeric.");
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : is_valid_(is_valid && IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {
+ static_assert(std::is_arithmetic<Src>::value, "Argument must be numeric.");
}
// Copy constructor.
template <typename Src>
- CheckedNumericState(const CheckedNumericState<Src>& rhs)
- : value_(static_cast<T>(rhs.value())), is_valid_(rhs.IsValid()) {}
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : is_valid_(rhs.IsValid()),
+ value_(WellDefinedConversionOrZero(rhs.value(), is_valid_)) {}
template <typename Src>
- explicit CheckedNumericState(
- Src value,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0)
- : value_(static_cast<T>(value)),
- is_valid_(DstRangeRelationToSrcRange<T>(value) == RANGE_VALID) {}
-
- bool is_valid() const { return is_valid_; }
- T value() const { return value_; }
+ constexpr explicit CheckedNumericState(Src value)
+ : is_valid_(IsValueInRangeForNumericType<T>(value)),
+ value_(WellDefinedConversionOrZero(value, is_valid_)) {}
+
+ constexpr bool is_valid() const { return is_valid_; }
+ constexpr T value() const { return value_; }
};
// Floating points maintain their own validity, but need translation wrappers.
@@ -720,37 +581,58 @@ class CheckedNumericState<T, NUMERIC_FLOATING> {
private:
T value_;
+ // Ensures that a type conversion does not trigger undefined behavior.
+ template <typename Src>
+ static constexpr T WellDefinedConversionOrNaN(const Src value,
+ const bool is_valid) {
+ using SrcType = typename internal::UnderlyingType<Src>::type;
+ return (StaticDstRangeRelationToSrcRange<T, SrcType>::value ==
+ NUMERIC_RANGE_CONTAINED ||
+ is_valid)
+ ? static_cast<T>(value)
+ : std::numeric_limits<T>::quiet_NaN();
+ }
+
public:
template <typename Src, NumericRepresentation type>
friend class CheckedNumericState;
- CheckedNumericState() : value_(0.0) {}
+ constexpr CheckedNumericState() : value_(0.0) {}
template <typename Src>
- CheckedNumericState(
- Src value,
- bool is_valid,
- typename std::enable_if<std::numeric_limits<Src>::is_integer, int>::type =
- 0) {
- value_ = (is_valid && (DstRangeRelationToSrcRange<T>(value) == RANGE_VALID))
- ? static_cast<T>(value)
- : std::numeric_limits<T>::quiet_NaN();
- }
+ constexpr CheckedNumericState(Src value, bool is_valid)
+ : value_(WellDefinedConversionOrNaN(value, is_valid)) {}
template <typename Src>
- explicit CheckedNumericState(
- Src value,
- typename std::enable_if<std::numeric_limits<Src>::is_specialized,
- int>::type = 0)
- : value_(static_cast<T>(value)) {}
+ constexpr explicit CheckedNumericState(Src value)
+ : value_(WellDefinedConversionOrNaN(
+ value,
+ IsValueInRangeForNumericType<T>(value))) {}
// Copy constructor.
template <typename Src>
- CheckedNumericState(const CheckedNumericState<Src>& rhs)
- : value_(static_cast<T>(rhs.value())) {}
+ constexpr CheckedNumericState(const CheckedNumericState<Src>& rhs)
+ : value_(WellDefinedConversionOrNaN(
+ rhs.value(),
+ rhs.is_valid() && IsValueInRangeForNumericType<T>(rhs.value()))) {}
+
+ constexpr bool is_valid() const {
+ // Written this way because std::isfinite is not reliably constexpr.
+ // TODO(jschuh): Fix this if the libraries ever get fixed.
+ return value_ <= std::numeric_limits<T>::max() &&
+ value_ >= std::numeric_limits<T>::lowest();
+ }
+ constexpr T value() const { return value_; }
+};
- bool is_valid() const { return std::isfinite(value_); }
- T value() const { return value_; }
+template <template <typename, typename, typename> class M,
+ typename L,
+ typename R>
+struct MathWrapper {
+ using math = M<typename UnderlyingType<L>::type,
+ typename UnderlyingType<R>::type,
+ void>;
+ using type = typename math::result_type;
};
} // namespace internal
diff --git a/chromium/base/numerics/safe_numerics_unittest.cc b/chromium/base/numerics/safe_numerics_unittest.cc
index ac7a5ba5f80..ec6d0037c9f 100644
--- a/chromium/base/numerics/safe_numerics_unittest.cc
+++ b/chromium/base/numerics/safe_numerics_unittest.cc
@@ -9,6 +9,7 @@
#include <type_traits>
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "base/test/gtest_util.h"
@@ -21,19 +22,30 @@
using std::numeric_limits;
using base::CheckedNumeric;
+using base::IsValidForType;
+using base::ValueOrDieForType;
+using base::ValueOrDefaultForType;
+using base::MakeCheckedNum;
+using base::CheckMax;
+using base::CheckMin;
+using base::CheckAdd;
+using base::CheckSub;
+using base::CheckMul;
+using base::CheckDiv;
+using base::CheckMod;
+using base::CheckLsh;
+using base::CheckRsh;
using base::checked_cast;
using base::IsValueInRangeForNumericType;
using base::IsValueNegative;
using base::SizeT;
using base::StrictNumeric;
+using base::MakeStrictNum;
using base::saturated_cast;
using base::strict_cast;
using base::internal::MaxExponent;
-using base::internal::RANGE_VALID;
-using base::internal::RANGE_INVALID;
-using base::internal::RANGE_OVERFLOW;
-using base::internal::RANGE_UNDERFLOW;
-using base::internal::SignedIntegerForSize;
+using base::internal::IntegerBitsPlusSign;
+using base::internal::RangeCheck;
// These tests deliberately cause arithmetic boundary errors. If the compiler is
// aggressive enough, it can const detect these errors, so we disable warnings.
@@ -46,8 +58,8 @@ using base::internal::SignedIntegerForSize;
// wholy represented as the destination floating-point type.
template <typename Dst, typename Src>
Dst GetMaxConvertibleToFloat() {
- typedef numeric_limits<Dst> DstLimits;
- typedef numeric_limits<Src> SrcLimits;
+ using DstLimits = numeric_limits<Dst>;
+ using SrcLimits = numeric_limits<Src>;
static_assert(SrcLimits::is_specialized, "Source must be numeric.");
static_assert(DstLimits::is_specialized, "Destination must be numeric.");
CHECK(DstLimits::is_iec559);
@@ -62,20 +74,113 @@ Dst GetMaxConvertibleToFloat() {
return static_cast<Dst>(max);
}
+namespace base {
+namespace internal {
+
+// Test corner case promotions used
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int16_t, int8_t>::value, "");
+static_assert(IsIntegerArithmeticSafe<int32_t, int8_t, int16_t>::value, "");
+static_assert(!IsIntegerArithmeticSafe<int32_t, int32_t, int8_t>::value, "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+ std::is_same<BigEnoughPromotion<int16_t, int8_t>::type, int16_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<int32_t, uint32_t>::type, int64_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<intmax_t, int8_t>::type, intmax_t>::value,
+ "");
+static_assert(
+ std::is_same<BigEnoughPromotion<uintmax_t, int8_t>::type, uintmax_t>::value,
+ "");
+static_assert(BigEnoughPromotion<int16_t, int8_t>::is_contained, "");
+static_assert(BigEnoughPromotion<int32_t, uint32_t>::is_contained, "");
+static_assert(BigEnoughPromotion<intmax_t, int8_t>::is_contained, "");
+static_assert(!BigEnoughPromotion<uintmax_t, int8_t>::is_contained, "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<int16_t, int8_t>::type,
+ int32_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<int32_t, uint32_t>::type,
+ int64_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<intmax_t, int8_t>::type,
+ intmax_t>::value,
+ "");
+static_assert(
+ std::is_same<FastIntegerArithmeticPromotion<uintmax_t, int8_t>::type,
+ uintmax_t>::value,
+ "");
+static_assert(FastIntegerArithmeticPromotion<int16_t, int8_t>::is_contained,
+ "");
+static_assert(FastIntegerArithmeticPromotion<int32_t, uint32_t>::is_contained,
+ "");
+static_assert(!FastIntegerArithmeticPromotion<intmax_t, int8_t>::is_contained,
+ "");
+static_assert(!FastIntegerArithmeticPromotion<uintmax_t, int8_t>::is_contained,
+ "");
+
+template <typename U>
+U GetNumericValueForTest(const CheckedNumeric<U>& src) {
+ return src.state_.value();
+}
+} // namespace internal.
+} // namespace base.
+
+using base::internal::GetNumericValueForTest;
+
+// Logs the ValueOrDie() failure instead of crashing.
+struct LogOnFailure {
+ template <typename T>
+ static T HandleFailure() {
+ LOG(WARNING) << "ValueOrDie() failed unexpectedly.";
+ return T();
+ }
+};
+
// Helper macros to wrap displaying the conversion types and line numbers.
#define TEST_EXPECTED_VALIDITY(expected, actual) \
- EXPECT_EQ(expected, CheckedNumeric<Dst>(actual).IsValid()) \
- << "Result test: Value " << +(actual).ValueUnsafe() << " as " << dst \
- << " on line " << line
+ EXPECT_EQ(expected, (actual).template Cast<Dst>().IsValid()) \
+ << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
+ << dst << " on line " << line
#define TEST_EXPECTED_SUCCESS(actual) TEST_EXPECTED_VALIDITY(true, actual)
#define TEST_EXPECTED_FAILURE(actual) TEST_EXPECTED_VALIDITY(false, actual)
-#define TEST_EXPECTED_VALUE(expected, actual) \
- EXPECT_EQ(static_cast<Dst>(expected), \
- CheckedNumeric<Dst>(actual).ValueOrDie()) \
- << "Result test: Value " << +((actual).ValueUnsafe()) << " as " << dst \
- << " on line " << line
+// We have to handle promotions, so infer the underlying type below from actual.
+#define TEST_EXPECTED_VALUE(expected, actual) \
+ EXPECT_EQ(static_cast<typename std::decay<decltype(actual)>::type::type>( \
+ expected), \
+ ((actual) \
+ .template ValueOrDie< \
+ typename std::decay<decltype(actual)>::type::type, \
+ LogOnFailure>())) \
+ << "Result test: Value " << GetNumericValueForTest(actual) << " as " \
+ << dst << " on line " << line
+
+// Test the simple pointer arithmetic overrides.
+template <typename Dst>
+void TestStrictPointerMath() {
+ Dst dummy_value = 0;
+ Dst* dummy_ptr = &dummy_value;
+ static const Dst kDummyOffset = 2; // Don't want to go too far.
+ EXPECT_EQ(dummy_ptr + kDummyOffset,
+ dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_EQ(dummy_ptr - kDummyOffset,
+ dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_NE(dummy_ptr, dummy_ptr + StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_NE(dummy_ptr, dummy_ptr - StrictNumeric<Dst>(kDummyOffset));
+ EXPECT_DEATH_IF_SUPPORTED(
+ dummy_ptr + StrictNumeric<size_t>(std::numeric_limits<size_t>::max()),
+ "");
+}
// Signed integer arithmetic.
template <typename Dst>
@@ -85,35 +190,52 @@ static void TestSpecializedArithmetic(
typename std::enable_if<numeric_limits<Dst>::is_integer &&
numeric_limits<Dst>::is_signed,
int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_FAILURE(-CheckedNumeric<Dst>(DstLimits::lowest()));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(-DstLimits::max()).Abs());
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
- -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+ DstLimits::lowest());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) - -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) - -1);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
- -DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::lowest());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) / -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) / -1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) / 2);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) * -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * -1);
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ CheckedNumeric<Dst>(DstLimits::lowest() + 1) * Dst(-1));
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ CheckedNumeric<Dst>(-1) * Dst(DstLimits::lowest() + 1));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ CheckedNumeric<Dst>(DstLimits::lowest()) * Dst(1));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ CheckedNumeric<Dst>(1) * Dst(DstLimits::lowest()));
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).UnsignedAbs());
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-1) % -2);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -126,19 +248,25 @@ static void TestSpecializedArithmetic(
// Test bit shifts.
volatile Dst negative_one = -1;
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1));
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << (IntegerBitsPlusSign<Dst>::value - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+ << IntegerBitsPlusSign<Dst>::value);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
- TEST_EXPECTED_VALUE(static_cast<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 2),
- CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 2));
+ TEST_EXPECTED_VALUE(
+ static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2),
+ CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 2));
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0)
- << (sizeof(Dst) * CHAR_BIT - 1));
+ << (IntegerBitsPlusSign<Dst>::value - 1));
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT));
- TEST_EXPECTED_VALUE(0,
- CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+ IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_VALUE(
+ 0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+
+ TestStrictPointerMath<Dst>();
}
// Unsigned integer arithmetic.
@@ -149,24 +277,30 @@ static void TestSpecializedArithmetic(
typename std::enable_if<numeric_limits<Dst>::is_integer &&
!numeric_limits<Dst>::is_signed,
int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) + -1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::min()) - 1);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) - 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) / 2);
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).UnsignedAbs());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).UnsignedAbs());
TEST_EXPECTED_SUCCESS(
- CheckedNumeric<typename SignedIntegerForSize<Dst>::type>(
- std::numeric_limits<typename SignedIntegerForSize<Dst>::type>::min())
+ CheckedNumeric<typename std::make_signed<Dst>::type>(
+ std::numeric_limits<typename std::make_signed<Dst>::type>::lowest())
.UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::lowest(),
+ MakeCheckedNum(DstLimits::lowest()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(DstLimits::max(),
+ MakeCheckedNum(DstLimits::max()).UnsignedAbs());
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0).UnsignedAbs());
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1).UnsignedAbs());
// Modulus is legal only for integers.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::min()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
@@ -176,21 +310,47 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(0, checked_dst %= 1);
// Test that div by 0 is avoided but returns invalid result.
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) % 0);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << IntegerBitsPlusSign<Dst>::value);
// Test bit shifts.
volatile int negative_one = -1;
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << negative_one);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT));
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0) << (sizeof(Dst) * CHAR_BIT));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1)
+ << IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(0)
+ << IntegerBitsPlusSign<Dst>::value);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) << 1);
- TEST_EXPECTED_VALUE(static_cast<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1),
- CheckedNumeric<Dst>(1) << (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_VALUE(
+ static_cast<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1),
+ CheckedNumeric<Dst>(1) << (IntegerBitsPlusSign<Dst>::value - 1));
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) << 0);
TEST_EXPECTED_VALUE(2, CheckedNumeric<Dst>(1) << 1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT));
- TEST_EXPECTED_VALUE(0,
- CheckedNumeric<Dst>(1) >> (sizeof(Dst) * CHAR_BIT - 1));
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >>
+ IntegerBitsPlusSign<Dst>::value);
+ TEST_EXPECTED_VALUE(
+ 0, CheckedNumeric<Dst>(1) >> (IntegerBitsPlusSign<Dst>::value - 1));
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) & 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) & 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) & 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ MakeCheckedNum(DstLimits::max()) & -1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) | 0);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) | 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) | 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ CheckedNumeric<Dst>(0) | static_cast<Dst>(-1));
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) ^ 1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) ^ 0);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(0) ^ 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) ^ 0);
+ TEST_EXPECTED_VALUE(std::numeric_limits<Dst>::max(),
+ CheckedNumeric<Dst>(0) ^ static_cast<Dst>(-1));
+ TEST_EXPECTED_VALUE(DstLimits::max(), ~CheckedNumeric<Dst>(0));
+
+ TestStrictPointerMath<Dst>();
}
// Floating point arithmetic.
@@ -199,32 +359,31 @@ void TestSpecializedArithmetic(
const char* dst,
int line,
typename std::enable_if<numeric_limits<Dst>::is_iec559, int>::type = 0) {
- typedef numeric_limits<Dst> DstLimits;
- TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::min()));
+ using DstLimits = numeric_limits<Dst>;
+ TEST_EXPECTED_SUCCESS(-CheckedNumeric<Dst>(DstLimits::lowest()));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()).Abs());
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()).Abs());
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(-1).Abs());
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + -1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + -1);
TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::max()) + 1);
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) +
- -DstLimits::max());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) +
+ DstLimits::lowest());
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) -
- -DstLimits::max());
- TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(-DstLimits::max()) -
+ DstLimits::lowest());
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) -
DstLimits::max());
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) * 2);
+ TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::lowest()) * 2);
TEST_EXPECTED_VALUE(-0.5, CheckedNumeric<Dst>(-1.0) / 2);
- EXPECT_EQ(static_cast<Dst>(1.0), CheckedNumeric<Dst>(1.0).ValueFloating());
}
// Generic arithmetic tests.
template <typename Dst>
static void TestArithmetic(const char* dst, int line) {
- typedef numeric_limits<Dst> DstLimits;
+ using DstLimits = numeric_limits<Dst>;
EXPECT_EQ(true, CheckedNumeric<Dst>().IsValid());
EXPECT_EQ(false,
@@ -278,7 +437,7 @@ static void TestArithmetic(const char* dst, int line) {
TEST_EXPECTED_VALUE(2, (CheckedNumeric<Dst>(1) + 1));
if (numeric_limits<Dst>::is_signed)
TEST_EXPECTED_VALUE(0, (CheckedNumeric<Dst>(-1) + 1));
- TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::min()) + 1);
+ TEST_EXPECTED_SUCCESS(CheckedNumeric<Dst>(DstLimits::lowest()) + 1);
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(DstLimits::max()) +
DstLimits::max());
@@ -311,8 +470,8 @@ static void TestArithmetic(const char* dst, int line) {
// Generic division.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() / 1);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) / 1);
- TEST_EXPECTED_VALUE(DstLimits::min() / 2,
- CheckedNumeric<Dst>(DstLimits::min()) / 2);
+ TEST_EXPECTED_VALUE(DstLimits::lowest() / 2,
+ CheckedNumeric<Dst>(DstLimits::lowest()) / 2);
TEST_EXPECTED_VALUE(DstLimits::max() / 2,
CheckedNumeric<Dst>(DstLimits::max()) / 2);
@@ -354,28 +513,114 @@ enum NumericConversionType {
template <typename Dst, typename Src, NumericConversionType conversion>
struct TestNumericConversion {};
+enum RangeConstraint {
+ RANGE_VALID = 0x0, // Value can be represented by the destination type.
+ RANGE_UNDERFLOW = 0x1, // Value would underflow.
+ RANGE_OVERFLOW = 0x2, // Value would overflow.
+ RANGE_INVALID = RANGE_UNDERFLOW | RANGE_OVERFLOW // Invalid (i.e. NaN).
+};
+
+// These are some wrappers to make the tests a bit cleaner.
+constexpr RangeConstraint RangeCheckToEnum(const RangeCheck constraint) {
+ return static_cast<RangeConstraint>(
+ static_cast<int>(constraint.IsOverflowFlagSet()) << 1 |
+ static_cast<int>(constraint.IsUnderflowFlagSet()));
+}
+
// EXPECT_EQ wrappers providing specific detail on test failures.
-#define TEST_EXPECTED_RANGE(expected, actual) \
- EXPECT_EQ(expected, base::internal::DstRangeRelationToSrcRange<Dst>(actual)) \
- << "Conversion test: " << src << " value " << actual << " to " << dst \
+#define TEST_EXPECTED_RANGE(expected, actual) \
+ EXPECT_EQ(expected, \
+ RangeCheckToEnum( \
+ base::internal::DstRangeRelationToSrcRange<Dst>(actual))) \
+ << "Conversion test: " << src << " value " << actual << " to " << dst \
<< " on line " << line
template <typename Dst, typename Src>
+void TestStrictComparison() {
+ using DstLimits = numeric_limits<Dst>;
+ using SrcLimits = numeric_limits<Src>;
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < DstLimits::max(), "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) < SrcLimits::max(), "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= DstLimits::max()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) >= SrcLimits::max()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= DstLimits::max(),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::lowest()) <= SrcLimits::max(),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > DstLimits::max()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::lowest()) > SrcLimits::max()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) > DstLimits::lowest(), "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) > SrcLimits::lowest(), "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= DstLimits::lowest()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) <= SrcLimits::lowest()),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) >= DstLimits::lowest(),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) >= SrcLimits::lowest(),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < DstLimits::lowest()),
+ "");
+ static_assert(!(StrictNumeric<Src>(SrcLimits::max()) < SrcLimits::lowest()),
+ "");
+ static_assert(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(1),
+ "");
+ static_assert(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(0),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) != static_cast<Dst>(0),
+ "");
+ static_assert(StrictNumeric<Src>(SrcLimits::max()) != DstLimits::lowest(),
+ "");
+ static_assert(
+ !(StrictNumeric<Src>(static_cast<Src>(1)) != static_cast<Dst>(1)), "");
+ static_assert(
+ !(StrictNumeric<Src>(static_cast<Src>(1)) == static_cast<Dst>(0)), "");
+
+ // Due to differences in float handling between compilers, these aren't
+ // compile-time constants everywhere. So, we use run-time tests.
+ EXPECT_EQ(
+ SrcLimits::max(),
+ MakeCheckedNum(SrcLimits::max()).Max(DstLimits::lowest()).ValueOrDie());
+ EXPECT_EQ(
+ DstLimits::max(),
+ MakeCheckedNum(SrcLimits::lowest()).Max(DstLimits::max()).ValueOrDie());
+ EXPECT_EQ(
+ DstLimits::lowest(),
+ MakeCheckedNum(SrcLimits::max()).Min(DstLimits::lowest()).ValueOrDie());
+ EXPECT_EQ(
+ SrcLimits::lowest(),
+ MakeCheckedNum(SrcLimits::lowest()).Min(DstLimits::max()).ValueOrDie());
+ EXPECT_EQ(SrcLimits::lowest(), CheckMin(MakeStrictNum(1), MakeCheckedNum(0),
+ DstLimits::max(), SrcLimits::lowest())
+ .ValueOrDie());
+ EXPECT_EQ(DstLimits::max(), CheckMax(MakeStrictNum(1), MakeCheckedNum(0),
+ DstLimits::max(), SrcLimits::lowest())
+ .ValueOrDie());
+}
+
+template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- // Integral to floating.
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ // Integral to floating.
static_assert((DstLimits::is_iec559 && SrcLimits::is_integer) ||
- // Not floating to integral and...
- (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
- // Same sign, same numeric, source is narrower or same.
- ((SrcLimits::is_signed == DstLimits::is_signed &&
- sizeof(Dst) >= sizeof(Src)) ||
- // Or signed destination and source is smaller
- (DstLimits::is_signed && sizeof(Dst) > sizeof(Src)))),
+ // Not floating to integral and...
+ (!(DstLimits::is_integer && SrcLimits::is_iec559) &&
+ // Same sign, same numeric, source is narrower or same.
+ ((SrcLimits::is_signed == DstLimits::is_signed &&
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value) ||
+ // Or signed destination and source is smaller
+ (DstLimits::is_signed &&
+ MaxExponent<Dst>::value >= MaxExponent<Src>::value))),
"Comparison must be sign preserving and value preserving");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst = SrcLimits::max();
TEST_EXPECTED_SUCCESS(checked_dst);
if (MaxExponent<Dst>::value > MaxExponent<Src>::value) {
@@ -400,7 +645,7 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
TEST_EXPECTED_RANGE(RANGE_INVALID, SrcLimits::quiet_NaN());
} else if (numeric_limits<Src>::is_signed) {
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
}
}
};
@@ -408,14 +653,15 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_VALUE_PRESERVING> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
static_assert(SrcLimits::is_signed == DstLimits::is_signed,
"Destination and source sign must be the same");
- static_assert(sizeof(Dst) < sizeof(Src) ||
- (DstLimits::is_integer && SrcLimits::is_iec559),
+ static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
"Destination must be narrower than source");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
@@ -439,15 +685,15 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
TEST_EXPECTED_RANGE(
RANGE_VALID,
static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
- TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
}
} else if (SrcLimits::is_signed) {
TEST_EXPECTED_VALUE(-1, checked_dst - static_cast<Src>(1));
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(-1));
} else {
TEST_EXPECTED_FAILURE(checked_dst - static_cast<Src>(1));
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
}
}
};
@@ -455,19 +701,21 @@ struct TestNumericConversion<Dst, Src, SIGN_PRESERVING_NARROW> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert(sizeof(Dst) >= sizeof(Src),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value >= MaxExponent<Src>::value,
"Destination must be equal or wider than source.");
static_assert(SrcLimits::is_signed, "Source must be signed");
static_assert(!DstLimits::is_signed, "Destination must be unsigned");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(SrcLimits::max(), checked_dst + SrcLimits::max());
TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
@@ -477,24 +725,32 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_WIDEN_OR_EQUAL> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert((DstLimits::is_integer && SrcLimits::is_iec559) ||
- (sizeof(Dst) < sizeof(Src)),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value < MaxExponent<Src>::value,
"Destination must be narrower than source.");
static_assert(SrcLimits::is_signed, "Source must be signed.");
static_assert(!DstLimits::is_signed, "Destination must be unsigned.");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
TEST_EXPECTED_FAILURE(checked_dst + static_cast<Src>(-1));
- TEST_EXPECTED_FAILURE(checked_dst + -SrcLimits::max());
+ TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, static_cast<Src>(-1));
+
+ // Additional saturation tests.
+ EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max())) << src;
+ EXPECT_EQ(DstLimits::lowest(), saturated_cast<Dst>(SrcLimits::lowest()));
+
if (SrcLimits::is_iec559) {
+ EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::quiet_NaN()));
+
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::max() * -1);
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::infinity());
TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::infinity() * -1);
@@ -509,10 +765,10 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
TEST_EXPECTED_RANGE(
RANGE_VALID,
static_cast<Src>(GetMaxConvertibleToFloat<Src, Dst>()));
- TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::min()));
+ TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(DstLimits::lowest()));
}
} else {
- TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_UNDERFLOW, SrcLimits::lowest());
}
}
};
@@ -520,21 +776,27 @@ struct TestNumericConversion<Dst, Src, SIGN_TO_UNSIGN_NARROW> {
template <typename Dst, typename Src>
struct TestNumericConversion<Dst, Src, UNSIGN_TO_SIGN_NARROW_OR_EQUAL> {
static void Test(const char *dst, const char *src, int line) {
- typedef numeric_limits<Src> SrcLimits;
- typedef numeric_limits<Dst> DstLimits;
- static_assert(sizeof(Dst) <= sizeof(Src),
+ using SrcLimits = numeric_limits<Src>;
+ using DstLimits = numeric_limits<Dst>;
+ static_assert(MaxExponent<Dst>::value <= MaxExponent<Src>::value,
"Destination must be narrower or equal to source.");
static_assert(!SrcLimits::is_signed, "Source must be unsigned.");
static_assert(DstLimits::is_signed, "Destination must be signed.");
+ TestStrictComparison<Dst, Src>();
+
const CheckedNumeric<Dst> checked_dst;
TEST_EXPECTED_VALUE(1, checked_dst + static_cast<Src>(1));
TEST_EXPECTED_FAILURE(checked_dst + SrcLimits::max());
- TEST_EXPECTED_VALUE(SrcLimits::min(), checked_dst + SrcLimits::min());
+ TEST_EXPECTED_VALUE(SrcLimits::lowest(), checked_dst + SrcLimits::lowest());
- TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::min());
+ TEST_EXPECTED_RANGE(RANGE_VALID, SrcLimits::lowest());
TEST_EXPECTED_RANGE(RANGE_OVERFLOW, SrcLimits::max());
TEST_EXPECTED_RANGE(RANGE_VALID, static_cast<Src>(1));
+
+ // Additional saturation tests.
+ EXPECT_EQ(DstLimits::max(), saturated_cast<Dst>(SrcLimits::max()));
+ EXPECT_EQ(Dst(0), saturated_cast<Dst>(SrcLimits::lowest()));
}
};
@@ -633,6 +895,43 @@ TEST(SafeNumerics, SizeTOperations) {
TEST_NUMERIC_CONVERSION(int, size_t, UNSIGN_TO_SIGN_NARROW_OR_EQUAL);
}
+// A one-off test to ensure StrictNumeric won't resolve to an incorrect type.
+// If this fails we'll just get a compiler error on an ambiguous overload.
+int TestOverload(int) { // Overload fails.
+ return 0;
+}
+uint8_t TestOverload(uint8_t) { // Overload fails.
+ return 0;
+}
+size_t TestOverload(size_t) { // Overload succeeds.
+ return 0;
+}
+
+static_assert(
+ std::is_same<decltype(TestOverload(StrictNumeric<int>())), int>::value,
+ "");
+static_assert(std::is_same<decltype(TestOverload(StrictNumeric<size_t>())),
+ size_t>::value,
+ "");
+
+template <typename T>
+struct CastTest1 {
+ static constexpr T NaN() { return -1; }
+ static constexpr T max() { return numeric_limits<T>::max() - 1; }
+ static constexpr T Overflow() { return max(); }
+ static constexpr T lowest() { return numeric_limits<T>::lowest() + 1; }
+ static constexpr T Underflow() { return lowest(); }
+};
+
+template <typename T>
+struct CastTest2 {
+ static constexpr T NaN() { return 11; }
+ static constexpr T max() { return 10; }
+ static constexpr T Overflow() { return max(); }
+ static constexpr T lowest() { return 1; }
+ static constexpr T Underflow() { return lowest(); }
+};
+
TEST(SafeNumerics, CastTests) {
// MSVC catches and warns that we're forcing saturation in these tests.
// Since that's intentional, we need to shut this warning off.
@@ -646,7 +945,7 @@ TEST(SafeNumerics, CastTests) {
double double_large = numeric_limits<double>::max();
double double_infinity = numeric_limits<float>::infinity();
double double_large_int = numeric_limits<int>::max();
- double double_small_int = numeric_limits<int>::min();
+ double double_small_int = numeric_limits<int>::lowest();
// Just test that the casts compile, since the other tests cover logic.
EXPECT_EQ(0, checked_cast<int>(static_cast<size_t>(0)));
@@ -662,9 +961,9 @@ TEST(SafeNumerics, CastTests) {
EXPECT_FALSE(CheckedNumeric<unsigned>(StrictNumeric<int>(-1)).IsValid());
EXPECT_TRUE(IsValueNegative(-1));
- EXPECT_TRUE(IsValueNegative(numeric_limits<int>::min()));
- EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::min()));
- EXPECT_TRUE(IsValueNegative(-numeric_limits<double>::max()));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<int>::lowest()));
+ EXPECT_FALSE(IsValueNegative(numeric_limits<unsigned>::lowest()));
+ EXPECT_TRUE(IsValueNegative(numeric_limits<double>::lowest()));
EXPECT_FALSE(IsValueNegative(0));
EXPECT_FALSE(IsValueNegative(1));
EXPECT_FALSE(IsValueNegative(0u));
@@ -691,22 +990,81 @@ TEST(SafeNumerics, CastTests) {
EXPECT_EQ(saturated_cast<int>(double_large), numeric_limits<int>::max());
EXPECT_EQ(saturated_cast<float>(double_large), double_infinity);
EXPECT_EQ(saturated_cast<float>(-double_large), -double_infinity);
- EXPECT_EQ(numeric_limits<int>::min(), saturated_cast<int>(double_small_int));
+ EXPECT_EQ(numeric_limits<int>::lowest(),
+ saturated_cast<int>(double_small_int));
EXPECT_EQ(numeric_limits<int>::max(), saturated_cast<int>(double_large_int));
+ // Test the saturated cast overrides.
+ using FloatLimits = numeric_limits<float>;
+ using IntLimits = numeric_limits<int>;
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(FloatLimits::quiet_NaN())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(FloatLimits::infinity())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(FloatLimits::max())));
+ EXPECT_EQ(CastTest1<int>::max(),
+ (saturated_cast<int, CastTest1>(float(IntLimits::max()))));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(-FloatLimits::infinity())));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(FloatLimits::lowest())));
+ EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0.0)));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1.0)));
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1.0)));
+ EXPECT_EQ(0, (saturated_cast<int, CastTest1>(0)));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest1>(1)));
+ EXPECT_EQ(-1, (saturated_cast<int, CastTest1>(-1)));
+ EXPECT_EQ(CastTest1<int>::lowest(),
+ (saturated_cast<int, CastTest1>(float(IntLimits::lowest()))));
+ EXPECT_EQ(11, (saturated_cast<int, CastTest2>(FloatLimits::quiet_NaN())));
+ EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::infinity())));
+ EXPECT_EQ(10, (saturated_cast<int, CastTest2>(FloatLimits::max())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(-FloatLimits::infinity())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(FloatLimits::lowest())));
+ EXPECT_EQ(1, (saturated_cast<int, CastTest2>(0U)));
+
float not_a_number = std::numeric_limits<float>::infinity() -
std::numeric_limits<float>::infinity();
EXPECT_TRUE(std::isnan(not_a_number));
EXPECT_EQ(0, saturated_cast<int>(not_a_number));
-}
-TEST(SafeNumerics, SaturatedCastChecks) {
- float not_a_number = std::numeric_limits<float>::infinity() -
- std::numeric_limits<float>::infinity();
- EXPECT_TRUE(std::isnan(not_a_number));
- EXPECT_DEATH_IF_SUPPORTED(
- (saturated_cast<int, base::CheckOnFailure>(not_a_number)),
- "");
+ // Test the CheckedNumeric value extractions functions.
+ auto int8_min = MakeCheckedNum(numeric_limits<int8_t>::lowest());
+ auto int8_max = MakeCheckedNum(numeric_limits<int8_t>::max());
+ auto double_max = MakeCheckedNum(numeric_limits<double>::max());
+ static_assert(
+ std::is_same<int16_t,
+ decltype(int8_min.ValueOrDie<int16_t>())::type>::value,
+ "ValueOrDie returning incorrect type.");
+ static_assert(
+ std::is_same<int16_t,
+ decltype(int8_min.ValueOrDefault<int16_t>(0))::type>::value,
+ "ValueOrDefault returning incorrect type.");
+ EXPECT_FALSE(IsValidForType<uint8_t>(int8_min));
+ EXPECT_TRUE(IsValidForType<uint8_t>(int8_max));
+ EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::lowest()),
+ ValueOrDieForType<int>(int8_min));
+ EXPECT_TRUE(IsValidForType<uint32_t>(int8_max));
+ EXPECT_EQ(static_cast<int>(numeric_limits<int8_t>::max()),
+ ValueOrDieForType<int>(int8_max));
+ EXPECT_EQ(0, ValueOrDefaultForType<int>(double_max, 0));
+ uint8_t uint8_dest = 0;
+ int16_t int16_dest = 0;
+ double double_dest = 0;
+ EXPECT_TRUE(int8_max.AssignIfValid(&uint8_dest));
+ EXPECT_EQ(static_cast<uint8_t>(numeric_limits<int8_t>::max()), uint8_dest);
+ EXPECT_FALSE(int8_min.AssignIfValid(&uint8_dest));
+ EXPECT_TRUE(int8_max.AssignIfValid(&int16_dest));
+ EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::max()), int16_dest);
+ EXPECT_TRUE(int8_min.AssignIfValid(&int16_dest));
+ EXPECT_EQ(static_cast<int16_t>(numeric_limits<int8_t>::lowest()), int16_dest);
+ EXPECT_FALSE(double_max.AssignIfValid(&uint8_dest));
+ EXPECT_FALSE(double_max.AssignIfValid(&int16_dest));
+ EXPECT_TRUE(double_max.AssignIfValid(&double_dest));
+ EXPECT_EQ(numeric_limits<double>::max(), double_dest);
+ EXPECT_EQ(1, checked_cast<int>(StrictNumeric<int>(1)));
+ EXPECT_EQ(1, saturated_cast<int>(StrictNumeric<int>(1)));
+ EXPECT_EQ(1, strict_cast<int>(StrictNumeric<int>(1)));
}
TEST(SafeNumerics, IsValueInRangeForNumericType) {
@@ -719,9 +1077,9 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000000)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(UINT64_C(0x100000001)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_FALSE(IsValueInRangeForNumericType<uint32_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(1));
@@ -735,13 +1093,13 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0xffffffff)));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(INT64_C(0x100000000)));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int32_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min()) - 1));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest()) - 1));
EXPECT_FALSE(IsValueInRangeForNumericType<int32_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(1));
@@ -752,10 +1110,10 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000000)));
EXPECT_TRUE(IsValueInRangeForNumericType<uint64_t>(UINT64_C(0x100000001)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(INT64_C(-1)));
EXPECT_FALSE(IsValueInRangeForNumericType<uint64_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(0));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(1));
@@ -777,11 +1135,11 @@ TEST(SafeNumerics, IsValueInRangeForNumericType) {
EXPECT_FALSE(
IsValueInRangeForNumericType<int64_t>(UINT64_C(0xffffffffffffffff)));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- std::numeric_limits<int32_t>::min()));
+ std::numeric_limits<int32_t>::lowest()));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- static_cast<int64_t>(std::numeric_limits<int32_t>::min())));
+ static_cast<int64_t>(std::numeric_limits<int32_t>::lowest())));
EXPECT_TRUE(IsValueInRangeForNumericType<int64_t>(
- std::numeric_limits<int64_t>::min()));
+ std::numeric_limits<int64_t>::lowest()));
}
TEST(SafeNumerics, CompoundNumericOperations) {
@@ -807,3 +1165,22 @@ TEST(SafeNumerics, CompoundNumericOperations) {
too_large /= d;
EXPECT_FALSE(too_large.IsValid());
}
+
+TEST(SafeNumerics, VariadicNumericOperations) {
+ auto a = CheckAdd(1, 2UL, MakeCheckedNum(3LL), 4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(a)::type>(10), a);
+ auto b = CheckSub(MakeCheckedNum(20.0), 2UL, 4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(b)::type>(14.0), b);
+ auto c = CheckMul(20.0, MakeCheckedNum(1), 5, 3UL).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(c)::type>(300.0), c);
+ auto d = CheckDiv(20.0, 2.0, MakeCheckedNum(5LL), -4).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(d)::type>(-.5), d);
+ auto e = CheckMod(MakeCheckedNum(20), 3).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(e)::type>(2), e);
+ auto f = CheckLsh(1, MakeCheckedNum(2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(f)::type>(4), f);
+ auto g = CheckRsh(4, MakeCheckedNum(2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(g)::type>(1), g);
+ auto h = CheckRsh(CheckAdd(1, 1, 1, 1), CheckSub(4, 2)).ValueOrDie();
+ EXPECT_EQ(static_cast<decltype(h)::type>(1), h);
+}
diff --git a/chromium/base/numerics/saturated_arithmetic.h b/chromium/base/numerics/saturated_arithmetic.h
new file mode 100644
index 00000000000..7e24fe3617b
--- /dev/null
+++ b/chromium/base/numerics/saturated_arithmetic.h
@@ -0,0 +1,101 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SATURATED_ARITHMETIC_H_
+#define BASE_NUMERICS_SATURATED_ARITHMETIC_H_
+
+#include <stdint.h>
+
+#include <limits>
+
+#include "base/compiler_specific.h"
+
+#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS) && \
+ defined(COMPILER_GCC) && !defined(OS_NACL) && __OPTIMIZE__
+
+// If we're building ARM 32-bit on GCC we replace the C++ versions with some
+// native ARM assembly for speed.
+#include "base/numerics/saturated_arithmetic_arm.h"
+
+#else
+
+namespace base {
+
+ALWAYS_INLINE int32_t SaturatedAddition(int32_t a, int32_t b) {
+ uint32_t ua = a;
+ uint32_t ub = b;
+ uint32_t result = ua + ub;
+
+ // Can only overflow if the signed bit of the two values match. If the
+ // signed bit of the result and one of the values differ it overflowed.
+ // The branch compiles to a CMOVNS instruction on x86.
+ if (~(ua ^ ub) & (result ^ ua) & (1 << 31))
+ return std::numeric_limits<int>::max() + (ua >> 31);
+
+ return result;
+}
+
+ALWAYS_INLINE int32_t SaturatedSubtraction(int32_t a, int32_t b) {
+ uint32_t ua = a;
+ uint32_t ub = b;
+ uint32_t result = ua - ub;
+
+ // Can only overflow if the signed bit of the two input values differ. If
+ // the signed bit of the result and the first value differ it overflowed.
+ // The branch compiles to a CMOVNS instruction on x86.
+ if ((ua ^ ub) & (result ^ ua) & (1 << 31))
+ return std::numeric_limits<int>::max() + (ua >> 31);
+
+ return result;
+}
+
+ALWAYS_INLINE int32_t SaturatedNegative(int32_t a) {
+ if (UNLIKELY(a == std::numeric_limits<int>::min()))
+ return std::numeric_limits<int>::max();
+ return -a;
+}
+
+ALWAYS_INLINE int GetMaxSaturatedSetResultForTesting(int fractional_shift) {
+ // For C version the set function maxes out to max int, this differs from
+ // the ARM asm version, see saturated_arithmetic_arm.h for the equivalent asm
+ // version.
+ return std::numeric_limits<int>::max();
+}
+
+ALWAYS_INLINE int GetMinSaturatedSetResultForTesting(int fractional_shift) {
+ return std::numeric_limits<int>::min();
+}
+
+template <int fractional_shift>
+ALWAYS_INLINE int SaturatedSet(int value) {
+ const int kIntMaxForLayoutUnit =
+ std::numeric_limits<int>::max() >> fractional_shift;
+
+ const int kIntMinForLayoutUnit =
+ std::numeric_limits<int>::min() >> fractional_shift;
+
+ if (value > kIntMaxForLayoutUnit)
+ return std::numeric_limits<int>::max();
+
+ if (value < kIntMinForLayoutUnit)
+ return std::numeric_limits<int>::min();
+
+ return value << fractional_shift;
+}
+
+template <int fractional_shift>
+ALWAYS_INLINE int SaturatedSet(unsigned value) {
+ const unsigned kIntMaxForLayoutUnit =
+ std::numeric_limits<int>::max() >> fractional_shift;
+
+ if (value >= kIntMaxForLayoutUnit)
+ return std::numeric_limits<int>::max();
+
+ return value << fractional_shift;
+}
+
+} // namespace base
+
+#endif // CPU(ARM) && COMPILER(GCC)
+#endif // BASE_NUMERICS_SATURATED_ARITHMETIC_H_
diff --git a/chromium/base/numerics/saturated_arithmetic_arm.h b/chromium/base/numerics/saturated_arithmetic_arm.h
new file mode 100644
index 00000000000..e5017e4ea37
--- /dev/null
+++ b/chromium/base/numerics/saturated_arithmetic_arm.h
@@ -0,0 +1,102 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NUMERICS_SATURATED_ARITHMETIC_ARM_H_
+#define BASE_NUMERICS_SATURATED_ARITHMETIC_ARM_H_
+
+#include <limits>
+
+namespace base {
+
+inline int32_t SaturatedAddition(int32_t a, int32_t b) {
+ int32_t result;
+
+ asm("qadd %[output],%[first],%[second]"
+ : [output] "=r"(result)
+ : [first] "r"(a), [second] "r"(b));
+
+ return result;
+}
+
+inline int32_t SaturatedSubtraction(int32_t a, int32_t b) {
+ int32_t result;
+
+ asm("qsub %[output],%[first],%[second]"
+ : [output] "=r"(result)
+ : [first] "r"(a), [second] "r"(b));
+
+ return result;
+}
+
+inline int32_t SaturatedNegative(int32_t a) {
+ return SaturatedSubtraction(0, a);
+}
+
+inline int GetMaxSaturatedSetResultForTesting(int fractional_shift) {
+ // For ARM Asm version the set function maxes out to the biggest
+ // possible integer part with the fractional part zero'd out.
+ // e.g. 0x7fffffc0.
+ return std::numeric_limits<int>::max() & ~((1 << fractional_shift) - 1);
+}
+
+inline int GetMinSaturatedSetResultForTesting(int fractional_shift) {
+ return std::numeric_limits<int>::min();
+}
+
+template <int fractional_shift>
+inline int SaturatedSet(int value) {
+ // Figure out how many bits are left for storing the integer part of
+ // the fixed point number, and saturate our input to that
+ enum { Saturate = 32 - fractional_shift };
+
+ int result;
+
+ // The following ARM code will Saturate the passed value to the number of
+ // bits used for the whole part of the fixed point representation, then
+ // shift it up into place. This will result in the low <FractionShift> bits
+ // all being 0's. When the value saturates this gives a different result
+ // to from the C++ case; in the C++ code a saturated value has all the low
+ // bits set to 1 (for a +ve number at least). This cannot be done rapidly
+ // in ARM ... we live with the difference, for the sake of speed.
+
+ asm("ssat %[output],%[saturate],%[value]\n\t"
+ "lsl %[output],%[shift]"
+ : [output] "=r"(result)
+ : [value] "r"(value), [saturate] "n"(Saturate),
+ [shift] "n"(fractional_shift));
+
+ return result;
+}
+
+template <int fractional_shift>
+inline int SaturatedSet(unsigned value) {
+ // Here we are being passed an unsigned value to saturate,
+ // even though the result is returned as a signed integer. The ARM
+ // instruction for unsigned saturation therefore needs to be given one
+ // less bit (i.e. the sign bit) for the saturation to work correctly; hence
+ // the '31' below.
+ enum { Saturate = 31 - fractional_shift };
+
+ // The following ARM code will Saturate the passed value to the number of
+ // bits used for the whole part of the fixed point representation, then
+ // shift it up into place. This will result in the low <FractionShift> bits
+ // all being 0's. When the value saturates this gives a different result
+ // to from the C++ case; in the C++ code a saturated value has all the low
+ // bits set to 1. This cannot be done rapidly in ARM, so we live with the
+ // difference, for the sake of speed.
+
+ int result;
+
+ asm("usat %[output],%[saturate],%[value]\n\t"
+ "lsl %[output],%[shift]"
+ : [output] "=r"(result)
+ : [value] "r"(value), [saturate] "n"(Saturate),
+ [shift] "n"(fractional_shift));
+
+ return result;
+}
+
+} // namespace base
+
+#endif // BASE_NUMERICS_SATURATED_ARITHMETIC_ARM_H_
diff --git a/chromium/base/numerics/saturated_arithmetic_unittest.cc b/chromium/base/numerics/saturated_arithmetic_unittest.cc
new file mode 100644
index 00000000000..498f5b77105
--- /dev/null
+++ b/chromium/base/numerics/saturated_arithmetic_unittest.cc
@@ -0,0 +1,141 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/numerics/saturated_arithmetic.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(SaturatedArithmeticTest, Addition) {
+ int int_max = std::numeric_limits<int>::max();
+ int int_min = std::numeric_limits<int>::min();
+
+ EXPECT_EQ(0, SaturatedAddition(0, 0));
+ EXPECT_EQ(1, SaturatedAddition(0, 1));
+ EXPECT_EQ(100, SaturatedAddition(0, 100));
+ EXPECT_EQ(150, SaturatedAddition(100, 50));
+
+ EXPECT_EQ(-1, SaturatedAddition(0, -1));
+ EXPECT_EQ(0, SaturatedAddition(1, -1));
+ EXPECT_EQ(50, SaturatedAddition(100, -50));
+ EXPECT_EQ(-50, SaturatedAddition(50, -100));
+
+ EXPECT_EQ(int_max - 1, SaturatedAddition(int_max - 1, 0));
+ EXPECT_EQ(int_max, SaturatedAddition(int_max - 1, 1));
+ EXPECT_EQ(int_max, SaturatedAddition(int_max - 1, 2));
+ EXPECT_EQ(int_max - 1, SaturatedAddition(0, int_max - 1));
+ EXPECT_EQ(int_max, SaturatedAddition(1, int_max - 1));
+ EXPECT_EQ(int_max, SaturatedAddition(2, int_max - 1));
+ EXPECT_EQ(int_max, SaturatedAddition(int_max - 1, int_max - 1));
+ EXPECT_EQ(int_max, SaturatedAddition(int_max, int_max));
+
+ EXPECT_EQ(int_min, SaturatedAddition(int_min, 0));
+ EXPECT_EQ(int_min + 1, SaturatedAddition(int_min + 1, 0));
+ EXPECT_EQ(int_min + 2, SaturatedAddition(int_min + 1, 1));
+ EXPECT_EQ(int_min + 3, SaturatedAddition(int_min + 1, 2));
+ EXPECT_EQ(int_min, SaturatedAddition(int_min + 1, -1));
+ EXPECT_EQ(int_min, SaturatedAddition(int_min + 1, -2));
+ EXPECT_EQ(int_min + 1, SaturatedAddition(0, int_min + 1));
+ EXPECT_EQ(int_min, SaturatedAddition(-1, int_min + 1));
+ EXPECT_EQ(int_min, SaturatedAddition(-2, int_min + 1));
+
+ EXPECT_EQ(int_max / 2 + 10000, SaturatedAddition(int_max / 2, 10000));
+ EXPECT_EQ(int_max, SaturatedAddition(int_max / 2 + 1, int_max / 2 + 1));
+ EXPECT_EQ(-1, SaturatedAddition(int_min, int_max));
+}
+
+TEST(SaturatedArithmeticTest, Subtraction) {
+ int int_max = std::numeric_limits<int>::max();
+ int int_min = std::numeric_limits<int>::min();
+
+ EXPECT_EQ(0, SaturatedSubtraction(0, 0));
+ EXPECT_EQ(-1, SaturatedSubtraction(0, 1));
+ EXPECT_EQ(-100, SaturatedSubtraction(0, 100));
+ EXPECT_EQ(50, SaturatedSubtraction(100, 50));
+
+ EXPECT_EQ(1, SaturatedSubtraction(0, -1));
+ EXPECT_EQ(2, SaturatedSubtraction(1, -1));
+ EXPECT_EQ(150, SaturatedSubtraction(100, -50));
+ EXPECT_EQ(150, SaturatedSubtraction(50, -100));
+
+ EXPECT_EQ(int_max, SaturatedSubtraction(int_max, 0));
+ EXPECT_EQ(int_max - 1, SaturatedSubtraction(int_max, 1));
+ EXPECT_EQ(int_max - 1, SaturatedSubtraction(int_max - 1, 0));
+ EXPECT_EQ(int_max, SaturatedSubtraction(int_max - 1, -1));
+ EXPECT_EQ(int_max, SaturatedSubtraction(int_max - 1, -2));
+ EXPECT_EQ(-int_max + 1, SaturatedSubtraction(0, int_max - 1));
+ EXPECT_EQ(-int_max, SaturatedSubtraction(-1, int_max - 1));
+ EXPECT_EQ(-int_max - 1, SaturatedSubtraction(-2, int_max - 1));
+ EXPECT_EQ(-int_max - 1, SaturatedSubtraction(-3, int_max - 1));
+
+ EXPECT_EQ(int_min, SaturatedSubtraction(int_min, 0));
+ EXPECT_EQ(int_min + 1, SaturatedSubtraction(int_min + 1, 0));
+ EXPECT_EQ(int_min, SaturatedSubtraction(int_min + 1, 1));
+ EXPECT_EQ(int_min, SaturatedSubtraction(int_min + 1, 2));
+
+ EXPECT_EQ(0, SaturatedSubtraction(int_min, int_min));
+ EXPECT_EQ(0, SaturatedSubtraction(int_max, int_max));
+ EXPECT_EQ(int_max, SaturatedSubtraction(int_max, int_min));
+}
+
+TEST(SaturatedArithmeticTest, SetSigned) {
+ int int_max = std::numeric_limits<int>::max();
+ int int_min = std::numeric_limits<int>::min();
+
+ const int kFractionBits = 6;
+ const int kIntMaxForLayoutUnit = int_max >> kFractionBits;
+ const int kIntMinForLayoutUnit = int_min >> kFractionBits;
+
+ EXPECT_EQ(0, SaturatedSet<kFractionBits>(0));
+
+ // Internally the max number we can represent (without saturating)
+ // is all the (non-sign) bits set except for the bottom n fraction bits
+ const int max_internal_representation = int_max ^ ((1 << kFractionBits) - 1);
+ EXPECT_EQ(max_internal_representation,
+ SaturatedSet<kFractionBits>(kIntMaxForLayoutUnit));
+
+ EXPECT_EQ(GetMaxSaturatedSetResultForTesting(kFractionBits),
+ SaturatedSet<kFractionBits>(kIntMaxForLayoutUnit + 100));
+
+ EXPECT_EQ((kIntMaxForLayoutUnit - 100) << kFractionBits,
+ SaturatedSet<kFractionBits>(kIntMaxForLayoutUnit - 100));
+
+ EXPECT_EQ(GetMinSaturatedSetResultForTesting(kFractionBits),
+ SaturatedSet<kFractionBits>(kIntMinForLayoutUnit));
+
+ EXPECT_EQ(GetMinSaturatedSetResultForTesting(kFractionBits),
+ SaturatedSet<kFractionBits>(kIntMinForLayoutUnit - 100));
+
+ // Shifting negative numbers left has undefined behavior, so use
+ // multiplication instead of direct shifting here.
+ EXPECT_EQ((kIntMinForLayoutUnit + 100) * (1 << kFractionBits),
+ SaturatedSet<kFractionBits>(kIntMinForLayoutUnit + 100));
+}
+
+TEST(SaturatedArithmeticTest, SetUnsigned) {
+ int int_max = std::numeric_limits<int>::max();
+
+ const int kFractionBits = 6;
+ const int kIntMaxForLayoutUnit = int_max >> kFractionBits;
+
+ EXPECT_EQ(0, SaturatedSet<kFractionBits>((unsigned)0));
+
+ EXPECT_EQ(GetMaxSaturatedSetResultForTesting(kFractionBits),
+ SaturatedSet<kFractionBits>((unsigned)kIntMaxForLayoutUnit));
+
+ const unsigned kOverflowed = kIntMaxForLayoutUnit + 100;
+ EXPECT_EQ(GetMaxSaturatedSetResultForTesting(kFractionBits),
+ SaturatedSet<kFractionBits>(kOverflowed));
+
+ const unsigned kNotOverflowed = kIntMaxForLayoutUnit - 100;
+ EXPECT_EQ((kIntMaxForLayoutUnit - 100) << kFractionBits,
+ SaturatedSet<kFractionBits>(kNotOverflowed));
+}
+
+} // namespace base
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index dc683d40812..afb1010b67c 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -55,11 +55,6 @@
///////////////////////////////////////////////////////////////////////////////
namespace base {
-
-// Forward declaration for ObserverListThreadSafeTraits.
-template <class ObserverType>
-class ObserverListThreadSafe;
-
namespace internal {
template <typename ObserverType, typename Method>
@@ -75,27 +70,9 @@ struct Dispatcher<ObserverType, void(ReceiverType::*)(Params...)> {
} // namespace internal
-// This class is used to work around VS2005 not accepting:
-//
-// friend class
-// base::RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
-//
-// Instead of friending the class, we could friend the actual function
-// which calls delete. However, this ends up being
-// RefCountedThreadSafe::DeleteInternal(), which is private. So we
-// define our own templated traits class so we can friend it.
-template <class T>
-struct ObserverListThreadSafeTraits {
- static void Destruct(const ObserverListThreadSafe<T>* x) {
- delete x;
- }
-};
-
template <class ObserverType>
class ObserverListThreadSafe
- : public RefCountedThreadSafe<
- ObserverListThreadSafe<ObserverType>,
- ObserverListThreadSafeTraits<ObserverType>> {
+ : public RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>> {
public:
using NotificationType =
typename ObserverList<ObserverType>::NotificationType;
@@ -180,8 +157,7 @@ class ObserverListThreadSafe
}
private:
- // See comment above ObserverListThreadSafeTraits' definition.
- friend struct ObserverListThreadSafeTraits<ObserverType>;
+ friend class RefCountedThreadSafe<ObserverListThreadSafe<ObserverType>>;
struct ObserverListContext {
explicit ObserverListContext(NotificationType type)
diff --git a/chromium/base/pickle.cc b/chromium/base/pickle.cc
index cfb316c13e5..02f39b57b7b 100644
--- a/chromium/base/pickle.cc
+++ b/chromium/base/pickle.cc
@@ -233,7 +233,6 @@ void PickleSizer::AddBytes(int length) {
void PickleSizer::AddAttachment() {
// From IPC::Message::WriteAttachment
- AddBool();
AddInt();
}
diff --git a/chromium/base/posix/global_descriptors.h b/chromium/base/posix/global_descriptors.h
index edb299de5c9..30db2b7ab72 100644
--- a/chromium/base/posix/global_descriptors.h
+++ b/chromium/base/posix/global_descriptors.h
@@ -52,18 +52,7 @@ class BASE_EXPORT GlobalDescriptors {
// Often we want a canonical descriptor for a given Key. In this case, we add
// the following constant to the key value:
-#if !defined(OS_ANDROID)
static const int kBaseDescriptor = 3; // 0, 1, 2 are already taken.
-#else
- // 3 used by __android_log_write().
- // 4 used by... something important on Android M.
- // 5 used by... something important on Android L... on low-end devices.
- // TODO(amistry): An Android, this mechanism is only used for tests since the
- // content child launcher spawns a process by creating a new Activity using
- // the Android APIs. For tests, come up with a way that doesn't require using
- // a pre-defined fd.
- static const int kBaseDescriptor = 6;
-#endif
// Return the singleton instance of GlobalDescriptors.
static GlobalDescriptors* GetInstance();
diff --git a/chromium/base/post_task_and_reply_with_result_internal.h b/chromium/base/post_task_and_reply_with_result_internal.h
new file mode 100644
index 00000000000..1456129324a
--- /dev/null
+++ b/chromium/base/post_task_and_reply_with_result_internal.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+#define BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
+
+#include <utility>
+
+#include "base/callback.h"
+
+namespace base {
+
+namespace internal {
+
+// Adapts a function that produces a result via a return value to
+// one that returns via an output parameter.
+template <typename ReturnType>
+void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
+ ReturnType* result) {
+ *result = func.Run();
+}
+
+// Adapts a T* result to a callblack that expects a T.
+template <typename TaskReturnType, typename ReplyArgType>
+void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
+ TaskReturnType* result) {
+ callback.Run(std::move(*result));
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_POST_TASK_AND_REPLY_WITH_RESULT_INTERNAL_H_
diff --git a/chromium/base/power_monitor/power_monitor_device_source.cc b/chromium/base/power_monitor/power_monitor_device_source.cc
index d7060c2a228..5df58003375 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source.cc
@@ -4,13 +4,9 @@
#include "base/power_monitor/power_monitor_device_source.h"
-#include "base/threading/thread_task_runner_handle.h"
-
namespace base {
PowerMonitorDeviceSource::PowerMonitorDeviceSource() {
- DCHECK(ThreadTaskRunnerHandle::IsSet());
-
#if defined(OS_MACOSX)
PlatformInit();
#endif
diff --git a/chromium/base/power_monitor/power_monitor_device_source.h b/chromium/base/power_monitor/power_monitor_device_source.h
index 69237cd6434..1e2c885fa45 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.h
+++ b/chromium/base/power_monitor/power_monitor_device_source.h
@@ -7,8 +7,6 @@
#include "base/base_export.h"
#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-#include "base/observer_list_threadsafe.h"
#include "base/power_monitor/power_monitor_source.h"
#include "base/power_monitor/power_observer.h"
#include "build/build_config.h"
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index ecb08421cfa..2911b36c2f4 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -17,8 +17,7 @@
#include "base/macros.h"
#include "base/process/memory.h"
#include "base/process/process_iterator.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "base/win/object_watcher.h"
+#include "base/task_scheduler/post_task.h"
namespace base {
@@ -38,62 +37,6 @@ const DWORD kDebuggerTerminatedExitCode = 0x40010004;
// process goes away.
const DWORD kProcessKilledExitCode = 1;
-// Maximum amount of time (in milliseconds) to wait for the process to exit.
-static const int kWaitInterval = 2000;
-
-class TimerExpiredTask : public win::ObjectWatcher::Delegate {
- public:
- explicit TimerExpiredTask(Process process);
- ~TimerExpiredTask() override;
-
- void TimedOut();
-
- // win::ObjectWatcher::Delegate implementation.
- void OnObjectSignaled(HANDLE object) override;
-
- private:
- void KillProcess();
-
- // The process that we are watching.
- Process process_;
-
- win::ObjectWatcher watcher_;
-
- DISALLOW_COPY_AND_ASSIGN(TimerExpiredTask);
-};
-
-TimerExpiredTask::TimerExpiredTask(Process process)
- : process_(std::move(process)) {
- watcher_.StartWatchingOnce(process_.Handle(), this);
-}
-
-TimerExpiredTask::~TimerExpiredTask() {
- TimedOut();
-}
-
-void TimerExpiredTask::TimedOut() {
- if (process_.IsValid())
- KillProcess();
-}
-
-void TimerExpiredTask::OnObjectSignaled(HANDLE object) {
- process_.Close();
-}
-
-void TimerExpiredTask::KillProcess() {
- // Stop watching the process handle since we're killing it.
- watcher_.StopWatching();
-
- // OK, time to get frisky. We don't actually care when the process
- // terminates. We just care that it eventually terminates, and that's what
- // TerminateProcess should do for us. Don't check for the result code since
- // it fails quite often. This should be investigated eventually.
- process_.Terminate(kProcessKilledExitCode, false);
-
- // Now, just cleanup as if the process exited normally.
- OnObjectSignaled(process_.Handle());
-}
-
} // namespace
TerminationStatus GetTerminationStatus(ProcessHandle handle, int* exit_code) {
@@ -196,14 +139,22 @@ void EnsureProcessTerminated(Process process) {
DCHECK(!process.is_current());
// If already signaled, then we are done!
- if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0) {
+ if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0)
return;
- }
- ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, Bind(&TimerExpiredTask::TimedOut,
- Owned(new TimerExpiredTask(std::move(process)))),
- TimeDelta::FromMilliseconds(kWaitInterval));
+ PostDelayedTaskWithTraits(
+ FROM_HERE,
+ TaskTraits()
+ .WithPriority(TaskPriority::BACKGROUND)
+ .WithShutdownBehavior(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN),
+ Bind(
+ [](Process process) {
+ if (WaitForSingleObject(process.Handle(), 0) == WAIT_OBJECT_0)
+ return;
+ process.Terminate(kProcessKilledExitCode, false);
+ },
+ Passed(&process)),
+ TimeDelta::FromSeconds(2));
}
} // namespace base
diff --git a/chromium/base/process/process.h b/chromium/base/process/process.h
index 5538475863b..978ca78c046 100644
--- a/chromium/base/process/process.h
+++ b/chromium/base/process/process.h
@@ -108,12 +108,12 @@ class BASE_EXPORT Process {
// any process.
// NOTE: |exit_code| is optional, nullptr can be passed if the exit code is
// not required.
- bool WaitForExit(int* exit_code);
+ bool WaitForExit(int* exit_code) const;
// Same as WaitForExit() but only waits for up to |timeout|.
// NOTE: |exit_code| is optional, nullptr can be passed if the exit code
// is not required.
- bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code);
+ bool WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const;
#if defined(OS_MACOSX)
// The Mac needs a Mach port in order to manipulate a process's priority,
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index d9c9952201b..2448a715ace 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -145,6 +145,9 @@ class BASE_EXPORT ProcessMetrics {
// usage in bytes, as per definition of WorkingSetBytes. Note that this
// function is somewhat expensive on Windows (a few ms per process).
bool GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const;
+ // Computes pss (proportional set size) of a process. Note that this
+ // function is somewhat expensive on Windows (a few ms per process).
+ bool GetProportionalSetSizeBytes(uint64_t* pss_bytes) const;
#if defined(OS_MACOSX)
// Fills both CommitedKBytes and WorkingSetKBytes in a single operation. This
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index 5b2777bb364..d2f0c935531 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -155,9 +155,58 @@ class WorkingSetInformationBuffer {
return UncheckedMalloc(size, reinterpret_cast<void**>(&buffer_));
}
- PSAPI_WORKING_SET_INFORMATION* get() { return buffer_; }
const PSAPI_WORKING_SET_INFORMATION* operator ->() const { return buffer_; }
+ size_t GetPageEntryCount() const { return number_of_entries; }
+
+ // This function is used to get page entries for a process.
+ bool QueryPageEntries(const ProcessHandle& process_) {
+ int retries = 5;
+ number_of_entries = 4096; // Just a guess.
+
+ for (;;) {
+ size_t buffer_size =
+ sizeof(PSAPI_WORKING_SET_INFORMATION) +
+ (number_of_entries * sizeof(PSAPI_WORKING_SET_BLOCK));
+
+ if (!Reserve(buffer_size))
+ return false;
+
+ // On success, |buffer_| is populated with info about the working set of
+ // |process_|. On ERROR_BAD_LENGTH failure, increase the size of the
+ // buffer and try again.
+ if (QueryWorkingSet(process_, buffer_, buffer_size))
+ break; // Success
+
+ if (GetLastError() != ERROR_BAD_LENGTH)
+ return false;
+
+ number_of_entries = buffer_->NumberOfEntries;
+
+ // Maybe some entries are being added right now. Increase the buffer to
+ // take that into account. Increasing by 10% should generally be enough,
+ // especially considering the potentially low memory condition during the
+ // call (when called from OomMemoryDetails) and the potentially high
+ // number of entries (300K was observed in crash dumps).
+ number_of_entries *= 1.1;
+
+ if (--retries == 0) {
+ // If we're looping, eventually fail.
+ return false;
+ }
+ }
+
+ // TODO(chengx): Remove the comment and the logic below. It is no longer
+ // needed since we don't have Win2000 support.
+ // On windows 2000 the function returns 1 even when the buffer is too small.
+ // The number of entries that we are going to parse is the minimum between
+ // the size we allocated and the real number of entries.
+ number_of_entries = std::min(number_of_entries,
+ static_cast<size_t>(buffer_->NumberOfEntries));
+
+ return true;
+ }
+
private:
void Clear() {
free(buffer_);
@@ -166,6 +215,9 @@ class WorkingSetInformationBuffer {
PSAPI_WORKING_SET_INFORMATION* buffer_ = nullptr;
+ // Number of page entries.
+ size_t number_of_entries = 0;
+
DISALLOW_COPY_AND_ASSIGN(WorkingSetInformationBuffer);
};
@@ -179,44 +231,12 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
DCHECK(ws_usage);
memset(ws_usage, 0, sizeof(*ws_usage));
- DWORD number_of_entries = 4096; // Just a guess.
WorkingSetInformationBuffer buffer;
- int retries = 5;
- for (;;) {
- DWORD buffer_size = sizeof(PSAPI_WORKING_SET_INFORMATION) +
- (number_of_entries * sizeof(PSAPI_WORKING_SET_BLOCK));
-
- if (!buffer.Reserve(buffer_size))
- return false;
-
- // Call the function once to get number of items
- if (QueryWorkingSet(process_, buffer.get(), buffer_size))
- break; // Success
-
- if (GetLastError() != ERROR_BAD_LENGTH)
- return false;
-
- number_of_entries = static_cast<DWORD>(buffer->NumberOfEntries);
-
- // Maybe some entries are being added right now. Increase the buffer to
- // take that into account. Increasing by 10% should generally be enough,
- // especially considering the potentially low memory condition during the
- // call (when called from OomMemoryDetails) and the potentially high
- // number of entries (300K was observed in crash dumps).
- number_of_entries = static_cast<DWORD>(number_of_entries * 1.1);
-
- if (--retries == 0) {
- // If we're looping, eventually fail.
- return false;
- }
- }
+ if (!buffer.QueryPageEntries(process_))
+ return false;
- // On windows 2000 the function returns 1 even when the buffer is too small.
- // The number of entries that we are going to parse is the minimum between the
- // size we allocated and the real number of entries.
- number_of_entries =
- std::min(number_of_entries, static_cast<DWORD>(buffer->NumberOfEntries));
- for (unsigned int i = 0; i < number_of_entries; i++) {
+ size_t num_page_entries = buffer.GetPageEntryCount();
+ for (size_t i = 0; i < num_page_entries; i++) {
if (buffer->WorkingSetInfo[i].Shared) {
ws_shareable++;
if (buffer->WorkingSetInfo[i].ShareCount > 1)
@@ -229,6 +249,28 @@ bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
ws_usage->priv = ws_private * PAGESIZE_KB;
ws_usage->shareable = ws_shareable * PAGESIZE_KB;
ws_usage->shared = ws_shared * PAGESIZE_KB;
+
+ return true;
+}
+
+// This function calculates the proportional set size for a process.
+bool ProcessMetrics::GetProportionalSetSizeBytes(uint64_t* pss_bytes) const {
+ double ws_pss = 0.0;
+
+ WorkingSetInformationBuffer buffer;
+ if (!buffer.QueryPageEntries(process_))
+ return false;
+
+ size_t num_page_entries = buffer.GetPageEntryCount();
+ for (size_t i = 0; i < num_page_entries; i++) {
+ if (buffer->WorkingSetInfo[i].Shared &&
+ buffer->WorkingSetInfo[i].ShareCount > 0)
+ ws_pss += 1.0 / buffer->WorkingSetInfo[i].ShareCount;
+ else
+ ws_pss += 1.0;
+ }
+
+ *pss_bytes = static_cast<uint64_t>(ws_pss * GetPageSize());
return true;
}
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 20fab921416..55b7ac90e29 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -350,11 +350,11 @@ bool Process::Terminate(int exit_code, bool wait) const {
}
#endif // !defined(OS_NACL_NONSFI)
-bool Process::WaitForExit(int* exit_code) {
+bool Process::WaitForExit(int* exit_code) const {
return WaitForExitWithTimeout(TimeDelta::Max(), exit_code);
}
-bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedProcessWaitActivity process_activity(this);
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 87fdd012309..7031706e0b7 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -257,7 +257,7 @@ TEST_F(ProcessUtilTest, GetProcId) {
}
#endif // defined(OS_WIN)
-#if !defined(OS_MACOSX)
+#if !defined(OS_MACOSX) && !defined(OS_ANDROID)
// This test is disabled on Mac, since it's flaky due to ReportCrash
// taking a variable amount of time to parse and load the debug and
// symbol data for this unit test's executable before firing the
@@ -266,23 +266,14 @@ TEST_F(ProcessUtilTest, GetProcId) {
// TODO(gspencer): turn this test process into a very small program
// with no symbols (instead of using the multiprocess testing
// framework) to reduce the ReportCrash overhead.
+//
+// It is disabled on Android as MultiprocessTests are started as services that
+// the framework restarts on crashes.
const char kSignalFileCrash[] = "CrashingChildProcess.die";
MULTIPROCESS_TEST_MAIN(CrashingChildProcess) {
WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileCrash).c_str());
-#if defined(OS_ANDROID)
- // Android L+ expose signal and sigaction symbols that override the system
- // ones. There is a bug in these functions where a request to set the handler
- // to SIG_DFL is ignored. In that case, an infinite loop is entered as the
- // signal is repeatedly sent to the crash dump signal handler.
- // To work around this, directly call the system's sigaction.
- struct kernel_sigaction sa;
- memset(&sa, 0, sizeof(sa));
- sys_sigemptyset(&sa.sa_mask);
- sa.sa_handler_ = SIG_DFL;
- sa.sa_flags = SA_RESTART;
- sys_rt_sigaction(SIGSEGV, &sa, NULL, sizeof(kernel_sigset_t));
-#elif defined(OS_POSIX)
+#if defined(OS_POSIX)
// Have to disable to signal handler for segv so we can get a crash
// instead of an abnormal termination through the crash dump handler.
::signal(SIGSEGV, SIG_DFL);
@@ -331,7 +322,7 @@ TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
base::debug::EnableInProcessStackDumping();
remove(signal_file.c_str());
}
-#endif // !defined(OS_MACOSX)
+#endif // !defined(OS_MACOSX) && !defined(OS_ANDROID)
MULTIPROCESS_TEST_MAIN(KilledChildProcess) {
WaitToDie(ProcessUtilTest::GetSignalFilePath(kSignalFileKill).c_str());
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index 6629f4565a7..afc6a786ba4 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -137,12 +137,12 @@ bool Process::Terminate(int exit_code, bool wait) const {
return result;
}
-bool Process::WaitForExit(int* exit_code) {
+bool Process::WaitForExit(int* exit_code) const {
return WaitForExitWithTimeout(TimeDelta::FromMilliseconds(INFINITE),
exit_code);
}
-bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) {
+bool Process::WaitForExitWithTimeout(TimeDelta timeout, int* exit_code) const {
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedProcessWaitActivity process_activity(this);
diff --git a/chromium/base/profiler/native_stack_sampler.h b/chromium/base/profiler/native_stack_sampler.h
index c7ef84ae31c..8d7e441b693 100644
--- a/chromium/base/profiler/native_stack_sampler.h
+++ b/chromium/base/profiler/native_stack_sampler.h
@@ -21,12 +21,22 @@ class NativeStackSamplerTestDelegate;
// given thread.
class NativeStackSampler {
public:
+ // The callback type used to add annotations to a sample during collection.
+ // This is passed to the native sampler to be applied at the most appropriate
+ // time. It is a simple function-pointer because the generated code must be
+ // completely predictable and do nothing that could acquire a mutex; a
+ // Callback object is code outside the control of this object and could,
+ // for example, acquire a mutex as part of allocating memory for a LOG
+ // message.
+ using AnnotateCallback = void (*)(StackSamplingProfiler::Sample*);
+
virtual ~NativeStackSampler();
// Creates a stack sampler that records samples for |thread_handle|. Returns
// null if this platform does not support stack sampling.
static std::unique_ptr<NativeStackSampler> Create(
PlatformThreadId thread_id,
+ AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
// The following functions are all called on the SamplingThread (not the
diff --git a/chromium/base/profiler/native_stack_sampler_posix.cc b/chromium/base/profiler/native_stack_sampler_posix.cc
index a3867661cd8..54abb2e3d3a 100644
--- a/chromium/base/profiler/native_stack_sampler_posix.cc
+++ b/chromium/base/profiler/native_stack_sampler_posix.cc
@@ -8,6 +8,7 @@ namespace base {
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
+ AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
return std::unique_ptr<NativeStackSampler>();
}
diff --git a/chromium/base/profiler/native_stack_sampler_win.cc b/chromium/base/profiler/native_stack_sampler_win.cc
index 063374f19d5..e1605109fe9 100644
--- a/chromium/base/profiler/native_stack_sampler_win.cc
+++ b/chromium/base/profiler/native_stack_sampler_win.cc
@@ -92,6 +92,19 @@ void RewritePointerIfInOriginalStack(uintptr_t top, uintptr_t bottom,
}
#endif
+void CopyMemoryFromStack(void* to, const void* from, size_t length)
+ NO_SANITIZE("address") {
+#if defined(ADDRESS_SANITIZER)
+ // The following loop is an inlined version of memcpy. The code must be
+ // inlined to avoid instrumentation when using ASAN (memory sanitizer). The
+ // stack profiler is generating false positive when walking the stack.
+ for (size_t pos = 0; pos < length; ++pos)
+ reinterpret_cast<char*>(to)[pos] = reinterpret_cast<const char*>(from)[pos];
+#else
+ std::memcpy(to, from, length);
+#endif
+}
+
// Rewrites possible pointers to locations within the stack to point to the
// corresponding locations in the copy, and rewrites the non-volatile registers
// in |context| likewise. This is necessary to handle stack frames with dynamic
@@ -319,6 +332,8 @@ void SuspendThreadAndRecordStack(
void* stack_copy_buffer,
size_t stack_copy_buffer_size,
std::vector<RecordedFrame>* stack,
+ NativeStackSampler::AnnotateCallback annotator,
+ StackSamplingProfiler::Sample* sample,
NativeStackSamplerTestDelegate* test_delegate) {
DCHECK(stack->empty());
@@ -353,8 +368,10 @@ void SuspendThreadAndRecordStack(
if (PointsToGuardPage(bottom))
return;
- std::memcpy(stack_copy_buffer, reinterpret_cast<const void*>(bottom),
- top - bottom);
+ (*annotator)(sample);
+
+ CopyMemoryFromStack(stack_copy_buffer,
+ reinterpret_cast<const void*>(bottom), top - bottom);
}
if (test_delegate)
@@ -370,6 +387,7 @@ void SuspendThreadAndRecordStack(
class NativeStackSamplerWin : public NativeStackSampler {
public:
NativeStackSamplerWin(win::ScopedHandle thread_handle,
+ AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate);
~NativeStackSamplerWin() override;
@@ -385,7 +403,7 @@ class NativeStackSamplerWin : public NativeStackSampler {
// reserved stack size is 1 MB and Chrome Windows threads currently always
// use the default, but this allows for expansion if it occurs. The size
// beyond the actual stack size consists of unallocated virtual memory pages
- // so carries little cost (just a bit of wated address space).
+ // so carries little cost (just a bit of wasted address space).
kStackCopyBufferSize = 2 * 1024 * 1024
};
@@ -408,6 +426,8 @@ class NativeStackSamplerWin : public NativeStackSampler {
win::ScopedHandle thread_handle_;
+ const AnnotateCallback annotator_;
+
NativeStackSamplerTestDelegate* const test_delegate_;
// The stack base address corresponding to |thread_handle_|.
@@ -430,11 +450,15 @@ class NativeStackSamplerWin : public NativeStackSampler {
NativeStackSamplerWin::NativeStackSamplerWin(
win::ScopedHandle thread_handle,
+ AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate)
- : thread_handle_(thread_handle.Take()), test_delegate_(test_delegate),
+ : thread_handle_(thread_handle.Take()),
+ annotator_(annotator),
+ test_delegate_(test_delegate),
thread_stack_base_address_(
GetThreadEnvironmentBlock(thread_handle_.Get())->Tib.StackBase),
stack_copy_buffer_(new unsigned char[kStackCopyBufferSize]) {
+ DCHECK(annotator_);
}
NativeStackSamplerWin::~NativeStackSamplerWin() {
@@ -456,7 +480,7 @@ void NativeStackSamplerWin::RecordStackSample(
std::vector<RecordedFrame> stack;
SuspendThreadAndRecordStack(thread_handle_.Get(), thread_stack_base_address_,
stack_copy_buffer_.get(), kStackCopyBufferSize,
- &stack, test_delegate_);
+ &stack, annotator_, sample, test_delegate_);
CopyToSample(stack, sample, current_modules_);
}
@@ -508,11 +532,11 @@ void NativeStackSamplerWin::CopyToSample(
const std::vector<RecordedFrame>& stack,
StackSamplingProfiler::Sample* sample,
std::vector<StackSamplingProfiler::Module>* modules) {
- sample->clear();
- sample->reserve(stack.size());
+ sample->frames.clear();
+ sample->frames.reserve(stack.size());
for (const RecordedFrame& frame : stack) {
- sample->push_back(StackSamplingProfiler::Frame(
+ sample->frames.push_back(StackSamplingProfiler::Frame(
reinterpret_cast<uintptr_t>(frame.instruction_pointer),
GetModuleIndex(frame.module.Get(), modules)));
}
@@ -522,6 +546,7 @@ void NativeStackSamplerWin::CopyToSample(
std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
PlatformThreadId thread_id,
+ AnnotateCallback annotator,
NativeStackSamplerTestDelegate* test_delegate) {
#if _WIN64
// Get the thread's handle.
@@ -532,7 +557,7 @@ std::unique_ptr<NativeStackSampler> NativeStackSampler::Create(
if (thread_handle) {
return std::unique_ptr<NativeStackSampler>(new NativeStackSamplerWin(
- win::ScopedHandle(thread_handle), test_delegate));
+ win::ScopedHandle(thread_handle), annotator, test_delegate));
}
#endif
return std::unique_ptr<NativeStackSampler>();
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index e25440f80c5..f294251cd32 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -82,6 +82,22 @@ void AsyncRunner::RunCallbackAndDeleteInstance(
task_runner->DeleteSoon(FROM_HERE, object_to_be_deleted.release());
}
+void ChangeAtomicFlags(subtle::Atomic32* flags,
+ subtle::Atomic32 set,
+ subtle::Atomic32 clear) {
+ DCHECK(set != 0 || clear != 0);
+ DCHECK_EQ(0, set & clear);
+
+ subtle::Atomic32 bits = subtle::NoBarrier_Load(flags);
+ while (true) {
+ subtle::Atomic32 existing =
+ subtle::NoBarrier_CompareAndSwap(flags, bits, (bits | set) & ~clear);
+ if (existing == bits)
+ break;
+ bits = existing;
+ }
+}
+
} // namespace
// StackSamplingProfiler::Module ----------------------------------------------
@@ -106,6 +122,21 @@ StackSamplingProfiler::Frame::Frame()
: instruction_pointer(0), module_index(kUnknownModuleIndex) {
}
+// StackSamplingProfiler::Sample ----------------------------------------------
+
+StackSamplingProfiler::Sample::Sample() {}
+
+StackSamplingProfiler::Sample::Sample(const Sample& sample) = default;
+
+StackSamplingProfiler::Sample::~Sample() {}
+
+StackSamplingProfiler::Sample::Sample(const Frame& frame) {
+ frames.push_back(std::move(frame));
+}
+
+StackSamplingProfiler::Sample::Sample(const std::vector<Frame>& frames)
+ : frames(frames) {}
+
// StackSamplingProfiler::CallStackProfile ------------------------------------
StackSamplingProfiler::CallStackProfile::CallStackProfile() {}
@@ -229,6 +260,8 @@ void StackSamplingProfiler::SamplingThread::Stop() {
// StackSamplingProfiler ------------------------------------------------------
+subtle::Atomic32 StackSamplingProfiler::process_milestones_ = 0;
+
StackSamplingProfiler::SamplingParams::SamplingParams()
: initial_delay(TimeDelta::FromMilliseconds(0)),
bursts(1),
@@ -272,7 +305,8 @@ void StackSamplingProfiler::Start() {
return;
std::unique_ptr<NativeStackSampler> native_sampler =
- NativeStackSampler::Create(thread_id_, test_delegate_);
+ NativeStackSampler::Create(thread_id_, &RecordAnnotations,
+ test_delegate_);
if (!native_sampler)
return;
@@ -288,6 +322,27 @@ void StackSamplingProfiler::Stop() {
sampling_thread_->Stop();
}
+// static
+void StackSamplingProfiler::SetProcessMilestone(int milestone) {
+ DCHECK_LE(0, milestone);
+ DCHECK_GT(static_cast<int>(sizeof(process_milestones_) * 8), milestone);
+ DCHECK_EQ(0, subtle::NoBarrier_Load(&process_milestones_) & (1 << milestone));
+ ChangeAtomicFlags(&process_milestones_, 1 << milestone, 0);
+}
+
+// static
+void StackSamplingProfiler::ResetAnnotationsForTesting() {
+ subtle::NoBarrier_Store(&process_milestones_, 0u);
+}
+
+// static
+void StackSamplingProfiler::RecordAnnotations(Sample* sample) {
+ // The code inside this method must not do anything that could acquire a
+ // mutex, including allocating memory (which includes LOG messages) because
+ // that mutex could be held by a stopped thread, thus resulting in deadlock.
+ sample->process_milestones = subtle::NoBarrier_Load(&process_milestones_);
+}
+
// StackSamplingProfiler::Frame global functions ------------------------------
bool operator==(const StackSamplingProfiler::Module& a,
@@ -296,6 +351,26 @@ bool operator==(const StackSamplingProfiler::Module& a,
a.filename == b.filename;
}
+bool operator==(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b) {
+ return a.process_milestones == b.process_milestones && a.frames == b.frames;
+}
+
+bool operator!=(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b) {
+ return !(a == b);
+}
+
+bool operator<(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b) {
+ if (a.process_milestones < b.process_milestones)
+ return true;
+ if (a.process_milestones > b.process_milestones)
+ return false;
+
+ return a.frames < b.frames;
+}
+
bool operator==(const StackSamplingProfiler::Frame &a,
const StackSamplingProfiler::Frame &b) {
return a.instruction_pointer == b.instruction_pointer &&
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index cf1daf7e16d..622f6e5764a 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -11,6 +11,7 @@
#include <string>
#include <vector>
+#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/callback.h"
#include "base/files/file_path.h"
@@ -107,8 +108,27 @@ class BASE_EXPORT StackSamplingProfiler {
size_t module_index;
};
- // Sample represents a set of stack frames.
- using Sample = std::vector<Frame>;
+ // Sample represents a set of stack frames with some extra information.
+ struct BASE_EXPORT Sample {
+ Sample();
+ Sample(const Sample& sample);
+ ~Sample();
+
+ // These constructors are used only during testing.
+ Sample(const Frame& frame);
+ Sample(const std::vector<Frame>& frames);
+
+ // The entire stack frame when the sample is taken.
+ std::vector<Frame> frames;
+
+ // A bit-field indicating which process milestones have passed. This can be
+ // used to tell where in the process lifetime the samples are taken. Just
+ // as a "lifetime" can only move forward, these bits mark the milestones of
+ // the processes life as they occur. Bits can be set but never reset. The
+ // actual definition of the individual bits is left to the user of this
+ // module.
+ uint32_t process_milestones = 0;
+ };
// CallStackProfile represents a set of samples.
struct BASE_EXPORT CallStackProfile {
@@ -201,6 +221,15 @@ class BASE_EXPORT StackSamplingProfiler {
// whichever occurs first.
void Stop();
+ // Set the current system state that is recorded with each captured stack
+ // frame. This is thread-safe so can be called from anywhere. The parameter
+ // value should be from an enumeration of the appropriate type with values
+ // ranging from 0 to 31, inclusive. This sets bits within Sample field of
+ // |process_milestones|. The actual meanings of these bits are defined
+ // (globally) by the caller(s).
+ static void SetProcessMilestone(int milestone);
+ static void ResetAnnotationsForTesting();
+
private:
// SamplingThread is a separate thread used to suspend and sample stacks from
// the target thread.
@@ -243,6 +272,16 @@ class BASE_EXPORT StackSamplingProfiler {
DISALLOW_COPY_AND_ASSIGN(SamplingThread);
};
+ // Adds annotations to a Sample.
+ static void RecordAnnotations(Sample* sample);
+
+ // This global variables holds the current system state and is recorded with
+ // every captured sample, done on a separate thread which is why updates to
+ // this must be atomic. A PostTask to move the the updates to that thread
+ // would skew the timing and a lock could result in deadlock if the thread
+ // making a change was also being profiled and got stopped.
+ static subtle::Atomic32 process_milestones_;
+
// The thread whose stack will be sampled.
PlatformThreadId thread_id_;
@@ -263,6 +302,12 @@ class BASE_EXPORT StackSamplingProfiler {
// done in tests and by the metrics provider code.
BASE_EXPORT bool operator==(const StackSamplingProfiler::Module& a,
const StackSamplingProfiler::Module& b);
+BASE_EXPORT bool operator==(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b);
+BASE_EXPORT bool operator!=(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b);
+BASE_EXPORT bool operator<(const StackSamplingProfiler::Sample& a,
+ const StackSamplingProfiler::Sample& b);
BASE_EXPORT bool operator==(const StackSamplingProfiler::Frame& a,
const StackSamplingProfiler::Frame& b);
BASE_EXPORT bool operator<(const StackSamplingProfiler::Frame& a,
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index 4fc70b8e050..075d6838c1a 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -49,6 +49,7 @@ namespace base {
using SamplingParams = StackSamplingProfiler::SamplingParams;
using Frame = StackSamplingProfiler::Frame;
+using Frames = std::vector<StackSamplingProfiler::Frame>;
using Module = StackSamplingProfiler::Module;
using Sample = StackSamplingProfiler::Sample;
using CallStackProfile = StackSamplingProfiler::CallStackProfile;
@@ -396,7 +397,7 @@ const void* MaybeFixupFunctionAddressForILT(const void* function_address) {
// Searches through the frames in |sample|, returning an iterator to the first
// frame that has an instruction pointer within |target_function|. Returns
// sample.end() if no such frames are found.
-Sample::const_iterator FindFirstFrameWithinFunction(
+Frames::const_iterator FindFirstFrameWithinFunction(
const Sample& sample,
TargetFunction target_function) {
uintptr_t function_start = reinterpret_cast<uintptr_t>(
@@ -404,12 +405,12 @@ Sample::const_iterator FindFirstFrameWithinFunction(
target_function)));
uintptr_t function_end =
reinterpret_cast<uintptr_t>(target_function(nullptr, nullptr, nullptr));
- for (auto it = sample.begin(); it != sample.end(); ++it) {
+ for (auto it = sample.frames.begin(); it != sample.frames.end(); ++it) {
if ((it->instruction_pointer >= function_start) &&
(it->instruction_pointer <= function_end))
return it;
}
- return sample.end();
+ return sample.frames.end();
}
// Formats a sample into a string that can be output for test diagnostics.
@@ -417,7 +418,7 @@ std::string FormatSampleForDiagnosticOutput(
const Sample& sample,
const std::vector<Module>& modules) {
std::string output;
- for (const Frame& frame : sample) {
+ for (const Frame& frame : sample.frames) {
output += StringPrintf(
"0x%p %s\n", reinterpret_cast<const void*>(frame.instruction_pointer),
modules[frame.module_index].filename.AsUTF8Unsafe().c_str());
@@ -518,13 +519,12 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Sample::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.end())
+ Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ sample, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::SignalAndWaitUntilSignaled))
+ &TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
@@ -536,7 +536,7 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// ... WaitableEvent and system frames ...
// TargetThread::SignalAndWaitUntilSignaled
// TargetThread::OtherLibraryCallback
- EXPECT_EQ(2, sample.end() - end_frame)
+ EXPECT_EQ(2, sample.frames.end() - end_frame)
<< "Stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
} else {
@@ -545,20 +545,19 @@ void TestLibraryUnload(bool wait_until_unloaded) {
// the same stack as |wait_until_unloaded|, if not we should have the full
// stack. The important thing is that we should not crash.
- if ((sample.end() - 1) - end_frame == 2) {
+ if ((sample.frames.end() - 1) - end_frame == 2) {
// This is the same case as |wait_until_unloaded|.
return;
}
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Sample::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.end())
+ Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
+ sample, &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::CallThroughOtherLibrary))
+ &TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
@@ -586,6 +585,8 @@ void TestLibraryUnload(bool wait_until_unloaded) {
#define MAYBE_Basic DISABLED_Basic
#endif
TEST(StackSamplingProfilerTest, MAYBE_Basic) {
+ StackSamplingProfiler::ResetAnnotationsForTesting();
+
SamplingParams params;
params.sampling_interval = TimeDelta::FromMilliseconds(0);
params.samples_per_burst = 1;
@@ -600,7 +601,8 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
ASSERT_EQ(1u, profile.samples.size());
EXPECT_EQ(params.sampling_interval, profile.sampling_period);
const Sample& sample = profile.samples[0];
- for (const auto& frame : sample) {
+ EXPECT_EQ(0u, sample.process_milestones);
+ for (const auto& frame : sample.frames) {
ASSERT_GE(frame.module_index, 0u);
ASSERT_LT(frame.module_index, profile.modules.size());
}
@@ -608,13 +610,12 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled() and that the frame has this
// executable's module.
- Sample::const_iterator loc = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(loc != sample.end())
+ Frames::const_iterator loc = FindFirstFrameWithinFunction(
+ sample, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(loc != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::SignalAndWaitUntilSignaled))
+ &TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
FilePath executable_path;
@@ -622,6 +623,41 @@ TEST(StackSamplingProfilerTest, MAYBE_Basic) {
EXPECT_EQ(executable_path, profile.modules[loc->module_index].filename);
}
+// Checks that annotations are recorded in samples.
+#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
+#define MAYBE_Annotations Annotations
+#else
+#define MAYBE_Annotations DISABLED_Annotations
+#endif
+TEST(StackSamplingProfilerTest, MAYBE_Annotations) {
+ StackSamplingProfiler::ResetAnnotationsForTesting();
+
+ SamplingParams params;
+ params.sampling_interval = TimeDelta::FromMilliseconds(0);
+ params.samples_per_burst = 1;
+
+ // Check that a run picks up annotations.
+ StackSamplingProfiler::SetProcessMilestone(1);
+ std::vector<CallStackProfile> profiles1;
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles1);
+ ASSERT_EQ(1u, profiles1.size());
+ const CallStackProfile& profile1 = profiles1[0];
+ ASSERT_EQ(1u, profile1.samples.size());
+ const Sample& sample1 = profile1.samples[0];
+ EXPECT_EQ(1u << 1, sample1.process_milestones);
+
+ // Run it a second time but with changed annotations. These annotations
+ // should appear in the first acquired sample.
+ StackSamplingProfiler::SetProcessMilestone(2);
+ std::vector<CallStackProfile> profiles2;
+ CaptureProfiles(params, AVeryLongTimeDelta(), &profiles2);
+ ASSERT_EQ(1u, profiles2.size());
+ const CallStackProfile& profile2 = profiles2[0];
+ ASSERT_EQ(1u, profile2.samples.size());
+ const Sample& sample2 = profile2.samples[0];
+ EXPECT_EQ(sample1.process_milestones | (1u << 2), sample2.process_milestones);
+}
+
// Checks that the profiler handles stacks containing dynamically-allocated
// stack memory.
#if defined(STACK_SAMPLING_PROFILER_SUPPORTED)
@@ -657,24 +693,22 @@ TEST(StackSamplingProfilerTest, MAYBE_Alloca) {
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Sample::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.end())
+ Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ sample, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::SignalAndWaitUntilSignaled))
+ &TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
// Check that the stack contains a frame for TargetThread::CallWithAlloca().
- Sample::const_iterator alloca_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::CallWithAlloca);
- ASSERT_TRUE(alloca_frame != sample.end())
+ Frames::const_iterator alloca_frame =
+ FindFirstFrameWithinFunction(sample, &TargetThread::CallWithAlloca);
+ ASSERT_TRUE(alloca_frame != sample.frames.end())
<< "Function at "
- << MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::CallWithAlloca))
+ << MaybeFixupFunctionAddressForILT(
+ reinterpret_cast<const void*>(&TargetThread::CallWithAlloca))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
@@ -927,25 +961,23 @@ TEST(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
// Check that the stack contains a frame for
// TargetThread::CallThroughOtherLibrary().
- Sample::const_iterator other_library_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::CallThroughOtherLibrary);
- ASSERT_TRUE(other_library_frame != sample.end())
+ Frames::const_iterator other_library_frame = FindFirstFrameWithinFunction(
+ sample, &TargetThread::CallThroughOtherLibrary);
+ ASSERT_TRUE(other_library_frame != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::CallThroughOtherLibrary))
+ &TargetThread::CallThroughOtherLibrary))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
// Check that the stack contains a frame for
// TargetThread::SignalAndWaitUntilSignaled().
- Sample::const_iterator end_frame = FindFirstFrameWithinFunction(
- sample,
- &TargetThread::SignalAndWaitUntilSignaled);
- ASSERT_TRUE(end_frame != sample.end())
+ Frames::const_iterator end_frame = FindFirstFrameWithinFunction(
+ sample, &TargetThread::SignalAndWaitUntilSignaled);
+ ASSERT_TRUE(end_frame != sample.frames.end())
<< "Function at "
<< MaybeFixupFunctionAddressForILT(reinterpret_cast<const void*>(
- &TargetThread::SignalAndWaitUntilSignaled))
+ &TargetThread::SignalAndWaitUntilSignaled))
<< " was not found in stack:\n"
<< FormatSampleForDiagnosticOutput(sample, profile.modules);
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index a2322f84958..4c19d3589fd 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -19,12 +19,14 @@ RunLoop::RunLoop()
running_(false),
quit_when_idle_received_(false),
weak_factory_(this) {
+ DCHECK(loop_);
}
RunLoop::~RunLoop() {
}
void RunLoop::Run() {
+ DCHECK(thread_checker_.CalledOnValidThread());
if (!BeforeRun())
return;
@@ -44,6 +46,7 @@ void RunLoop::RunUntilIdle() {
}
void RunLoop::Quit() {
+ DCHECK(thread_checker_.CalledOnValidThread());
quit_called_ = true;
if (running_ && loop_->run_loop_ == this) {
// This is the inner-most RunLoop, so quit now.
@@ -52,6 +55,7 @@ void RunLoop::Quit() {
}
void RunLoop::QuitWhenIdle() {
+ DCHECK(thread_checker_.CalledOnValidThread());
quit_when_idle_received_ = true;
}
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index 635018f434e..077d097ba9a 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -10,6 +10,7 @@
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop.h"
+#include "base/threading/thread_checker.h"
#include "build/build_config.h"
namespace base {
@@ -105,6 +106,8 @@ class BASE_EXPORT RunLoop {
// that we should quit Run once it becomes idle.
bool quit_when_idle_received_;
+ base::ThreadChecker thread_checker_;
+
// WeakPtrFactory for QuitClosure safety.
base::WeakPtrFactory<RunLoop> weak_factory_;
diff --git a/chromium/base/scoped_generic.h b/chromium/base/scoped_generic.h
index 84de6b7d50d..c2d51cfdb46 100644
--- a/chromium/base/scoped_generic.h
+++ b/chromium/base/scoped_generic.h
@@ -14,7 +14,7 @@
namespace base {
-// This class acts like ScopedPtr with a custom deleter (although is slightly
+// This class acts like unique_ptr with a custom deleter (although is slightly
// less fancy in some of the more escoteric respects) except that it keeps a
// copy of the object rather than a pointer, and we require that the contained
// object has some kind of "invalid" value.
@@ -22,12 +22,12 @@ namespace base {
// Defining a scoper based on this class allows you to get a scoper for
// non-pointer types without having to write custom code for set, reset, and
// move, etc. and get almost identical semantics that people are used to from
-// scoped_ptr.
+// unique_ptr.
//
// It is intended that you will typedef this class with an appropriate deleter
// to implement clean up tasks for objects that act like pointers from a
// resource management standpoint but aren't, such as file descriptors and
-// various types of operating system handles. Using scoped_ptr for these
+// various types of operating system handles. Using unique_ptr for these
// things requires that you keep a pointer to the handle valid for the lifetime
// of the scoper (which is easy to mess up).
//
@@ -97,7 +97,7 @@ class ScopedGeneric {
}
// Frees the currently owned object, if any. Then takes ownership of a new
- // object, if given. Self-resets are not allowd as on scoped_ptr. See
+ // object, if given. Self-resets are not allowd as on unique_ptr. See
// http://crbug.com/162971
void reset(const element_type& value = traits_type::InvalidValue()) {
if (data_.generic != traits_type::InvalidValue() && data_.generic == value)
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index 3f7555dde54..016bb156450 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -54,46 +54,6 @@ inline char* string_as_array(std::string* str) {
return str->empty() ? NULL : &*str->begin();
}
-// The following functions are useful for cleaning up STL containers whose
-// elements point to allocated memory.
-
-// STLDeleteElements() deletes all the elements in an STL container and clears
-// the container. This function is suitable for use with a vector, set,
-// hash_set, or any other STL container which defines sensible begin(), end(),
-// and clear() methods.
-//
-// If container is NULL, this function is a no-op.
-template <class T>
-void STLDeleteElements(T* container) {
- if (!container)
- return;
-
- for (auto it = container->begin(); it != container->end();) {
- auto temp = it;
- ++it;
- delete *temp;
- }
-
- container->clear();
-}
-
-// Given an STL container consisting of (key, value) pairs, STLDeleteValues
-// deletes all the "value" components and clears the container. Does nothing
-// in the case it's given a NULL pointer.
-template <class T>
-void STLDeleteValues(T* container) {
- if (!container)
- return;
-
- for (auto it = container->begin(); it != container->end();) {
- auto temp = it;
- ++it;
- delete temp->second;
- }
-
- container->clear();
-}
-
// Test to see if a set, map, hash_set or hash_map contains a particular key.
// Returns true if the key is in the collection.
template <typename Collection, typename Key>
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index 755811ded7e..9148def07c1 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -10,6 +10,7 @@
#include <wctype.h>
#include <limits>
+#include <type_traits>
#include "base/logging.h"
#include "base/numerics/safe_math.h"
@@ -35,7 +36,8 @@ struct IntToStringT {
// The ValueOrDie call below can never fail, because UnsignedAbs is valid
// for all valid inputs.
- auto res = CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+ typename std::make_unsigned<INT>::type res =
+ CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
CHR* end = outbuf + kOutputBufSize;
CHR* i = end;
diff --git a/chromium/base/strings/utf_string_conversions.cc b/chromium/base/strings/utf_string_conversions.cc
index 6b17eacd6ce..85450c6566d 100644
--- a/chromium/base/strings/utf_string_conversions.cc
+++ b/chromium/base/strings/utf_string_conversions.cc
@@ -180,10 +180,6 @@ bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
}
std::string UTF16ToUTF8(StringPiece16 utf16) {
- if (IsStringASCII(utf16)) {
- return std::string(utf16.begin(), utf16.end());
- }
-
std::string ret;
// Ignore the success flag of this call, it will do the best it can for
// invalid input, which is what we want here.
diff --git a/chromium/base/supports_user_data.cc b/chromium/base/supports_user_data.cc
index 6ba3ff6c8db..d5110555333 100644
--- a/chromium/base/supports_user_data.cc
+++ b/chromium/base/supports_user_data.cc
@@ -9,12 +9,13 @@
namespace base {
SupportsUserData::SupportsUserData() {
- // Harmless to construct on a different thread to subsequent usage.
- thread_checker_.DetachFromThread();
+ // Harmless to construct on a different execution sequence to subsequent
+ // usage.
+ sequence_checker_.DetachFromSequence();
}
SupportsUserData::Data* SupportsUserData::GetUserData(const void* key) const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
DataMap::const_iterator found = user_data_.find(key);
if (found != user_data_.end())
return found->second.get();
@@ -22,21 +23,21 @@ SupportsUserData::Data* SupportsUserData::GetUserData(const void* key) const {
}
void SupportsUserData::SetUserData(const void* key, Data* data) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
user_data_[key] = WrapUnique(data);
}
void SupportsUserData::RemoveUserData(const void* key) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
user_data_.erase(key);
}
-void SupportsUserData::DetachUserDataThread() {
- thread_checker_.DetachFromThread();
+void SupportsUserData::DetachFromSequence() {
+ sequence_checker_.DetachFromSequence();
}
SupportsUserData::~SupportsUserData() {
- DCHECK(thread_checker_.CalledOnValidThread() || user_data_.empty());
+ DCHECK(sequence_checker_.CalledOnValidSequence() || user_data_.empty());
DataMap local_user_data;
user_data_.swap(local_user_data);
// Now this->user_data_ is empty, and any destructors called transitively from
diff --git a/chromium/base/supports_user_data.h b/chromium/base/supports_user_data.h
index a9f1c932048..a4c7c9fc890 100644
--- a/chromium/base/supports_user_data.h
+++ b/chromium/base/supports_user_data.h
@@ -11,6 +11,10 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/sequence_checker.h"
+
+// TODO(gab): Removing this include causes IWYU failures in other headers,
+// remove it in a follow- up CL.
#include "base/threading/thread_checker.h"
namespace base {
@@ -26,7 +30,7 @@ class BASE_EXPORT SupportsUserData {
// class to any class with a virtual destructor.
class BASE_EXPORT Data {
public:
- virtual ~Data() {}
+ virtual ~Data() = default;
};
// The user data allows the clients to associate data with this object.
@@ -38,10 +42,10 @@ class BASE_EXPORT SupportsUserData {
void RemoveUserData(const void* key);
// SupportsUserData is not thread-safe, and on debug build will assert it is
- // only used on one thread. Calling this method allows the caller to hand
- // the SupportsUserData instance across threads. Use only if you are taking
- // full control of the synchronization of that hand over.
- void DetachUserDataThread();
+ // only used on one execution sequence. Calling this method allows the caller
+ // to hand the SupportsUserData instance across execution sequences. Use only
+ // if you are taking full control of the synchronization of that hand over.
+ void DetachFromSequence();
protected:
virtual ~SupportsUserData();
@@ -52,7 +56,7 @@ class BASE_EXPORT SupportsUserData {
// Externally-defined data accessible by key.
DataMap user_data_;
// Guards usage of |user_data_|
- ThreadChecker thread_checker_;
+ SequenceChecker sequence_checker_;
DISALLOW_COPY_AND_ASSIGN(SupportsUserData);
};
diff --git a/chromium/base/synchronization/spin_lock.cc b/chromium/base/synchronization/spin_lock.cc
new file mode 100644
index 00000000000..531d7985cf4
--- /dev/null
+++ b/chromium/base/synchronization/spin_lock.cc
@@ -0,0 +1,82 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/synchronization/spin_lock.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#elif defined(OS_POSIX)
+#include <sched.h>
+#endif
+
+// The YIELD_PROCESSOR macro wraps an architecture specific-instruction that
+// informs the processor we're in a busy wait, so it can handle the branch more
+// intelligently and e.g. reduce power to our core or give more resources to the
+// other hyper-thread on this core. See the following for context:
+// https://software.intel.com/en-us/articles/benefitting-power-and-performance-sleep-loops
+//
+// The YIELD_THREAD macro tells the OS to relinquish our quantum. This is
+// basically a worst-case fallback, and if you're hitting it with any frequency
+// you really should be using a proper lock (such as |base::Lock|)rather than
+// these spinlocks.
+#if defined(OS_WIN)
+#define YIELD_PROCESSOR YieldProcessor()
+#define YIELD_THREAD SwitchToThread()
+#elif defined(COMPILER_GCC) || defined(__clang__)
+#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_X86)
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#elif defined(ARCH_CPU_ARMEL) || defined(ARCH_CPU_ARM64)
+#define YIELD_PROCESSOR __asm__ __volatile__("yield")
+#elif defined(ARCH_CPU_MIPSEL)
+// The MIPS32 docs state that the PAUSE instruction is a no-op on older
+// architectures (first added in MIPS32r2). To avoid assembler errors when
+// targeting pre-r2, we must encode the instruction manually.
+#define YIELD_PROCESSOR __asm__ __volatile__(".word 0x00000140")
+#elif defined(ARCH_CPU_MIPS64EL) && __mips_isa_rev >= 2
+// Don't bother doing using .word here since r2 is the lowest supported mips64
+// that Chromium supports.
+#define YIELD_PROCESSOR __asm__ __volatile__("pause")
+#endif
+#endif
+
+#ifndef YIELD_PROCESSOR
+#warning "Processor yield not supported on this architecture."
+#define YIELD_PROCESSOR ((void)0)
+#endif
+
+#ifndef YIELD_THREAD
+#if defined(OS_POSIX)
+#define YIELD_THREAD sched_yield()
+#else
+#warning "Thread yield not supported on this OS."
+#define YIELD_THREAD ((void)0)
+#endif
+#endif
+
+namespace base {
+namespace subtle {
+
+void SpinLock::LockSlow() {
+ // The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
+ // critical section defaults, and various other recommendations.
+ // TODO(jschuh): Further tuning may be warranted.
+ static const int kYieldProcessorTries = 1000;
+ do {
+ do {
+ for (int count = 0; count < kYieldProcessorTries; ++count) {
+ // Let the processor know we're spinning.
+ YIELD_PROCESSOR;
+ if (!lock_.load(std::memory_order_relaxed) &&
+ LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+ return;
+ }
+
+ // Give the OS a chance to schedule something on this core.
+ YIELD_THREAD;
+ } while (lock_.load(std::memory_order_relaxed));
+ } while (UNLIKELY(lock_.exchange(true, std::memory_order_acquire)));
+}
+
+} // namespace subtle
+} // namespace base
diff --git a/chromium/base/synchronization/spin_lock.h b/chromium/base/synchronization/spin_lock.h
new file mode 100644
index 00000000000..cf44414de49
--- /dev/null
+++ b/chromium/base/synchronization/spin_lock.h
@@ -0,0 +1,52 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_SYNCHRONIZATION_SPIN_LOCK_H
+#define BASE_SYNCHRONIZATION_SPIN_LOCK_H
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+
+// Spinlock is a simple spinlock class based on the standard CPU primitive of
+// atomic increment and decrement of an int at a given memory address. These are
+// intended only for very short duration locks and assume a system with multiple
+// cores. For any potentially longer wait you should use a real lock, such as
+// |base::Lock|.
+//
+// |SpinLock|s MUST be globals. Using them as (e.g.) struct/class members will
+// result in an uninitialized lock, which is dangerously incorrect.
+
+namespace base {
+namespace subtle {
+
+class SpinLock {
+ public:
+ using Guard = std::lock_guard<SpinLock>;
+
+ ALWAYS_INLINE void lock() {
+ static_assert(sizeof(lock_) == sizeof(int),
+ "int and lock_ are different sizes");
+ if (LIKELY(!lock_.exchange(true, std::memory_order_acquire)))
+ return;
+ LockSlow();
+ }
+
+ ALWAYS_INLINE void unlock() { lock_.store(false, std::memory_order_release); }
+
+ private:
+ // This is called if the initial attempt to acquire the lock fails. It's
+ // slower, but has a much better scheduling and power consumption behavior.
+ BASE_EXPORT void LockSlow();
+
+ std::atomic_int lock_;
+};
+
+} // namespace subtle
+} // namespace base
+
+#endif // BASE_SYNCHRONIZATION_SPIN_LOCK_H
diff --git a/chromium/base/synchronization/waitable_event.h b/chromium/base/synchronization/waitable_event.h
index 3863e98455e..761965f03a0 100644
--- a/chromium/base/synchronization/waitable_event.h
+++ b/chromium/base/synchronization/waitable_event.h
@@ -25,6 +25,7 @@
namespace base {
class TimeDelta;
+class TimeTicks;
// A WaitableEvent can be a useful thread synchronization tool when you want to
// allow one thread to wait for another thread to finish some work. For
@@ -86,12 +87,17 @@ class BASE_EXPORT WaitableEvent {
// delete e;
void Wait();
- // Wait up until max_time has passed for the event to be signaled. Returns
- // true if the event was signaled. If this method returns false, then it
- // does not necessarily mean that max_time was exceeded.
+ // Wait up until wait_delta has passed for the event to be signaled. Returns
+ // true if the event was signaled.
//
// TimedWait can synchronise its own destruction like |Wait|.
- bool TimedWait(const TimeDelta& max_time);
+ bool TimedWait(const TimeDelta& wait_delta);
+
+ // Wait up until end_time deadline has passed for the event to be signaled.
+ // Return true if the event was signaled.
+ //
+ // TimedWaitUntil can synchronise its own destruction like |Wait|.
+ bool TimedWaitUntil(const TimeTicks& end_time);
#if defined(OS_WIN)
HANDLE handle() const { return handle_.Get(); }
diff --git a/chromium/base/synchronization/waitable_event_posix.cc b/chromium/base/synchronization/waitable_event_posix.cc
index a8b686b88ce..5dfff468ad6 100644
--- a/chromium/base/synchronization/waitable_event_posix.cc
+++ b/chromium/base/synchronization/waitable_event_posix.cc
@@ -153,17 +153,22 @@ class SyncWaiter : public WaitableEvent::Waiter {
};
void WaitableEvent::Wait() {
- bool result = TimedWait(TimeDelta::FromSeconds(-1));
+ bool result = TimedWaitUntil(TimeTicks::Max());
DCHECK(result) << "TimedWait() should never fail with infinite timeout";
}
-bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+ // TimeTicks takes care of overflow including the cases when wait_delta
+ // is a maximum value.
+ return TimedWaitUntil(TimeTicks::Now() + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+ base::ThreadRestrictions::AssertWaitAllowed();
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedEventWaitActivity event_activity(this);
- base::ThreadRestrictions::AssertWaitAllowed();
- const TimeTicks end_time(TimeTicks::Now() + max_time);
- const bool finite_time = max_time.ToInternalValue() >= 0;
+ const bool finite_time = !end_time.is_max();
kernel_->lock_.Acquire();
if (kernel_->signaled_) {
diff --git a/chromium/base/synchronization/waitable_event_unittest.cc b/chromium/base/synchronization/waitable_event_unittest.cc
index ac5c9f12558..c0e280aa974 100644
--- a/chromium/base/synchronization/waitable_event_unittest.cc
+++ b/chromium/base/synchronization/waitable_event_unittest.cc
@@ -136,13 +136,7 @@ TEST(WaitableEventTest, WaitMany) {
// Tests that using TimeDelta::Max() on TimedWait() is not the same as passing
// a timeout of 0. (crbug.com/465948)
-#if defined(OS_POSIX)
-// crbug.com/465948 not fixed yet.
-#define MAYBE_TimedWait DISABLED_TimedWait
-#else
-#define MAYBE_TimedWait TimedWait
-#endif
-TEST(WaitableEventTest, MAYBE_TimedWait) {
+TEST(WaitableEventTest, TimedWait) {
WaitableEvent* ev =
new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
@@ -153,11 +147,58 @@ TEST(WaitableEventTest, MAYBE_TimedWait) {
TimeTicks start = TimeTicks::Now();
PlatformThread::Create(0, &signaler, &thread);
- ev->TimedWait(TimeDelta::Max());
+ EXPECT_TRUE(ev->TimedWait(TimeDelta::Max()));
EXPECT_GE(TimeTicks::Now() - start, thread_delay);
delete ev;
PlatformThread::Join(thread);
}
+// Tests that a sub-ms TimedWait doesn't time out promptly.
+TEST(WaitableEventTest, SubMsTimedWait) {
+ WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ TimeDelta delay = TimeDelta::FromMicroseconds(900);
+ TimeTicks start_time = TimeTicks::Now();
+ ev.TimedWait(delay);
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+}
+
+// Tests that TimedWaitUntil can be safely used with various end_time deadline
+// values.
+TEST(WaitableEventTest, TimedWaitUntil) {
+ WaitableEvent ev(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+
+ TimeTicks start_time(TimeTicks::Now());
+ TimeDelta delay = TimeDelta::FromMilliseconds(10);
+
+ // Should be OK to wait for the current time or time in the past.
+ // That should end promptly and be equivalent to IsSignalled.
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time));
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time - delay));
+
+ // Should be OK to wait for zero TimeTicks().
+ EXPECT_FALSE(ev.TimedWaitUntil(TimeTicks()));
+
+ // Waiting for a time in the future shouldn't end before the deadline
+ // if the event isn't signalled.
+ EXPECT_FALSE(ev.TimedWaitUntil(start_time + delay));
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+ // Test that passing TimeTicks::Max to TimedWaitUntil is valid and isn't
+ // the same as passing TimeTicks(). Also verifies that signaling event
+ // ends the wait promptly.
+ WaitableEventSignaler signaler(delay, &ev);
+ PlatformThreadHandle thread;
+ start_time = TimeTicks::Now();
+ PlatformThread::Create(0, &signaler, &thread);
+
+ EXPECT_TRUE(ev.TimedWaitUntil(TimeTicks::Max()));
+ EXPECT_GE(TimeTicks::Now() - start_time, delay);
+
+ PlatformThread::Join(thread);
+}
+
} // namespace base
diff --git a/chromium/base/synchronization/waitable_event_win.cc b/chromium/base/synchronization/waitable_event_win.cc
index d80cabb3ff3..993dbb12224 100644
--- a/chromium/base/synchronization/waitable_event_win.cc
+++ b/chromium/base/synchronization/waitable_event_win.cc
@@ -7,6 +7,7 @@
#include <windows.h>
#include <stddef.h>
+#include <algorithm>
#include <utility>
#include "base/debug/activity_tracker.h"
@@ -44,54 +45,96 @@ void WaitableEvent::Signal() {
}
bool WaitableEvent::IsSignaled() {
- return TimedWait(TimeDelta());
+ DWORD result = WaitForSingleObject(handle_.Get(), 0);
+ DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
+ << "Unexpected WaitForSingleObject result " << result;
+ return result == WAIT_OBJECT_0;
}
void WaitableEvent::Wait() {
+ base::ThreadRestrictions::AssertWaitAllowed();
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedEventWaitActivity event_activity(this);
- base::ThreadRestrictions::AssertWaitAllowed();
DWORD result = WaitForSingleObject(handle_.Get(), INFINITE);
// It is most unexpected that this should ever fail. Help consumers learn
// about it if it should ever fail.
DCHECK_EQ(WAIT_OBJECT_0, result) << "WaitForSingleObject failed";
}
-bool WaitableEvent::TimedWait(const TimeDelta& max_time) {
+namespace {
+
+// Helper function called from TimedWait and TimedWaitUntil.
+bool WaitUntil(HANDLE handle, const TimeTicks& now, const TimeTicks& end_time) {
+ TimeDelta delta = end_time - now;
+ DCHECK_GT(delta, TimeDelta());
+
+ do {
+ // On Windows, waiting for less than 1 ms results in WaitForSingleObject
+ // returning promptly which may result in the caller code spinning.
+ // We need to ensure that we specify at least the minimally possible 1 ms
+ // delay unless the initial timeout was exactly zero.
+ delta = std::max(delta, TimeDelta::FromMilliseconds(1));
+ // Truncate the timeout to milliseconds.
+ DWORD timeout_ms = saturated_cast<DWORD>(delta.InMilliseconds());
+ DWORD result = WaitForSingleObject(handle, timeout_ms);
+ DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
+ << "Unexpected WaitForSingleObject result " << result;
+ switch (result) {
+ case WAIT_OBJECT_0:
+ return true;
+ case WAIT_TIMEOUT:
+ // TimedWait can time out earlier than the specified |timeout| on
+ // Windows. To make this consistent with the posix implementation we
+ // should guarantee that TimedWait doesn't return earlier than the
+ // specified |max_time| and wait again for the remaining time.
+ delta = end_time - TimeTicks::Now();
+ break;
+ }
+ } while (delta > TimeDelta());
+ return false;
+}
+
+} // namespace
+
+bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
+ DCHECK_GE(wait_delta, TimeDelta());
+ if (wait_delta.is_zero())
+ return IsSignaled();
+
+ base::ThreadRestrictions::AssertWaitAllowed();
// Record the event that this thread is blocking upon (for hang diagnosis).
base::debug::ScopedEventWaitActivity event_activity(this);
- DCHECK_GE(max_time, TimeDelta());
- if (!max_time.is_zero())
- base::ThreadRestrictions::AssertWaitAllowed();
-
- // Truncate the timeout to milliseconds. The API specifies that this method
- // can return in less than |max_time| (when returning false), as the argument
- // is the maximum time that a caller is willing to wait.
- DWORD timeout = saturated_cast<DWORD>(max_time.InMilliseconds());
-
- DWORD result = WaitForSingleObject(handle_.Get(), timeout);
- switch (result) {
- case WAIT_OBJECT_0:
- return true;
- case WAIT_TIMEOUT:
- return false;
- }
- // It is most unexpected that this should ever fail. Help consumers learn
- // about it if it should ever fail.
- NOTREACHED() << "WaitForSingleObject failed";
- return false;
+ TimeTicks now(TimeTicks::Now());
+ // TimeTicks takes care of overflow including the cases when wait_delta
+ // is a maximum value.
+ return WaitUntil(handle_.Get(), now, now + wait_delta);
+}
+
+bool WaitableEvent::TimedWaitUntil(const TimeTicks& end_time) {
+ if (end_time.is_null())
+ return IsSignaled();
+
+ base::ThreadRestrictions::AssertWaitAllowed();
+ // Record the event that this thread is blocking upon (for hang diagnosis).
+ base::debug::ScopedEventWaitActivity event_activity(this);
+
+ TimeTicks now(TimeTicks::Now());
+ if (end_time <= now)
+ return IsSignaled();
+
+ return WaitUntil(handle_.Get(), now, end_time);
}
// static
size_t WaitableEvent::WaitMany(WaitableEvent** events, size_t count) {
DCHECK(count) << "Cannot wait on no events";
+ base::ThreadRestrictions::AssertWaitAllowed();
// Record an event (the first) that this thread is blocking upon.
base::debug::ScopedEventWaitActivity event_activity(events[0]);
- base::ThreadRestrictions::AssertWaitAllowed();
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
CHECK_LE(count, static_cast<size_t>(MAXIMUM_WAIT_OBJECTS))
<< "Can only wait on " << MAXIMUM_WAIT_OBJECTS << " with WaitMany";
diff --git a/chromium/base/sys_byteorder.h b/chromium/base/sys_byteorder.h
index 8d9066c7022..9ee1827e1e6 100644
--- a/chromium/base/sys_byteorder.h
+++ b/chromium/base/sys_byteorder.h
@@ -13,6 +13,7 @@
#include <stdint.h>
+#include "base/logging.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
@@ -46,6 +47,21 @@ inline uint64_t ByteSwap(uint64_t x) {
#endif
}
+inline uintptr_t ByteSwapUintPtrT(uintptr_t x) {
+ // We do it this way because some build configurations are ILP32 even when
+ // defined(ARCH_CPU_64_BITS). Unfortunately, we can't use sizeof in #ifs. But,
+ // because these conditionals are constexprs, the irrelevant branches will
+ // likely be optimized away, so this construction should not result in code
+ // bloat.
+ if (sizeof(uintptr_t) == 4) {
+ return ByteSwap(static_cast<uint32_t>(x));
+ } else if (sizeof(uintptr_t) == 8) {
+ return ByteSwap(static_cast<uint64_t>(x));
+ } else {
+ NOTREACHED();
+ }
+}
+
// Converts the bytes in |x| from host order (endianness) to little endian, and
// returns the result.
inline uint16_t ByteSwapToLE16(uint16_t x) {
diff --git a/chromium/base/sys_byteorder_unittest.cc b/chromium/base/sys_byteorder_unittest.cc
index 0352c2a97c6..8167be3b9ab 100644
--- a/chromium/base/sys_byteorder_unittest.cc
+++ b/chromium/base/sys_byteorder_unittest.cc
@@ -6,6 +6,7 @@
#include <stdint.h>
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -40,6 +41,25 @@ TEST(ByteOrderTest, ByteSwap64) {
EXPECT_EQ(k64BitTestData, reswapped);
}
+TEST(ByteOrderTest, ByteSwapUintPtrT) {
+#if defined(ARCH_CPU_64_BITS)
+ const uintptr_t test_data = static_cast<uintptr_t>(k64BitTestData);
+ const uintptr_t swapped_test_data =
+ static_cast<uintptr_t>(k64BitSwappedTestData);
+#elif defined(ARCH_CPU_32_BITS)
+ const uintptr_t test_data = static_cast<uintptr_t>(k32BitTestData);
+ const uintptr_t swapped_test_data =
+ static_cast<uintptr_t>(k32BitSwappedTestData);
+#else
+#error architecture not supported
+#endif
+
+ uintptr_t swapped = base::ByteSwapUintPtrT(test_data);
+ EXPECT_EQ(swapped_test_data, swapped);
+ uintptr_t reswapped = base::ByteSwapUintPtrT(swapped);
+ EXPECT_EQ(test_data, reswapped);
+}
+
TEST(ByteOrderTest, ByteSwapToLE16) {
uint16_t le = base::ByteSwapToLE16(k16BitTestData);
#if defined(ARCH_CPU_LITTLE_ENDIAN)
diff --git a/chromium/base/sys_info.h b/chromium/base/sys_info.h
index b10747703d2..e35feff735e 100644
--- a/chromium/base/sys_info.h
+++ b/chromium/base/sys_info.h
@@ -107,9 +107,19 @@ class BASE_EXPORT SysInfo {
static bool GetLsbReleaseValue(const std::string& key, std::string* value);
// Convenience function for GetLsbReleaseValue("CHROMEOS_RELEASE_BOARD",...).
- // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+ // Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set. Otherwise returns
+ // the full name of the board. WARNING: the returned value often differs in
+ // developer built system compared to devices that use the official version.
+ // E.g. for developer built version, the function could return 'glimmer' while
+ // for officially used versions it would be like 'glimmer-signed-mp-v4keys'.
+ // Use GetStrippedReleaseBoard() function if you need only the short name of
+ // the board (would be 'glimmer' in the case described above).
static std::string GetLsbReleaseBoard();
+ // Convenience function for GetLsbReleaseBoard() removing trailing "-signed-*"
+ // if present. Returns "unknown" if CHROMEOS_RELEASE_BOARD is not set.
+ static std::string GetStrippedReleaseBoard();
+
// Returns the creation time of /etc/lsb-release. (Used to get the date and
// time of the Chrome OS build).
static Time GetLsbReleaseTime();
diff --git a/chromium/base/sys_info_chromeos.cc b/chromium/base/sys_info_chromeos.cc
index e4d671f75d9..29f83845dc5 100644
--- a/chromium/base/sys_info_chromeos.cc
+++ b/chromium/base/sys_info_chromeos.cc
@@ -200,6 +200,16 @@ std::string SysInfo::GetLsbReleaseBoard() {
}
// static
+std::string SysInfo::GetStrippedReleaseBoard() {
+ std::string board = GetLsbReleaseBoard();
+ const size_t index = board.find("-signed-");
+ if (index != std::string::npos)
+ board.resize(index);
+
+ return base::ToLowerASCII(board);
+}
+
+// static
Time SysInfo::GetLsbReleaseTime() {
return GetChromeOSVersionInfo().lsb_release_time();
}
diff --git a/chromium/base/sys_info_unittest.cc b/chromium/base/sys_info_unittest.cc
index 0231df63798..c3b85077071 100644
--- a/chromium/base/sys_info_unittest.cc
+++ b/chromium/base/sys_info_unittest.cc
@@ -156,4 +156,14 @@ TEST_F(SysInfoTest, IsRunningOnChromeOS) {
EXPECT_TRUE(base::SysInfo::IsRunningOnChromeOS());
}
+TEST_F(SysInfoTest, GetStrippedReleaseBoard) {
+ const char* kLsbRelease1 = "CHROMEOS_RELEASE_BOARD=Glimmer\n";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease1, base::Time());
+ EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+
+ const char* kLsbRelease2 = "CHROMEOS_RELEASE_BOARD=glimmer-signed-mp-v4keys";
+ base::SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, base::Time());
+ EXPECT_EQ("glimmer", base::SysInfo::GetStrippedReleaseBoard());
+}
+
#endif // OS_CHROMEOS
diff --git a/chromium/base/syslog_logging.cc b/chromium/base/syslog_logging.cc
index 1cd545910a9..087b4fd4543 100644
--- a/chromium/base/syslog_logging.cc
+++ b/chromium/base/syslog_logging.cc
@@ -6,6 +6,8 @@
#include "base/syslog_logging.h"
#if defined(OS_WIN)
+#include "base/bind.h"
+#include "base/callback_helpers.h"
#include "base/win/eventlog_messages.h"
#include <windows.h>
@@ -19,6 +21,18 @@
namespace logging {
+#if defined(OS_WIN)
+
+namespace {
+std::string* g_event_source_name = nullptr;
+}
+
+void SetEventSourceName(const std::string& name) {
+ DCHECK_EQ(nullptr, g_event_source_name);
+ g_event_source_name = new std::string(name);
+}
+#endif // defined(OS_WIN)
+
EventLogMessage::EventLogMessage(const char* file,
int line,
LogSeverity severity)
@@ -27,13 +41,21 @@ EventLogMessage::EventLogMessage(const char* file,
EventLogMessage::~EventLogMessage() {
#if defined(OS_WIN)
- const char kEventSource[] = "chrome";
- HANDLE event_log_handle = RegisterEventSourceA(NULL, kEventSource);
+ // If g_event_source_name is nullptr (which it is per default) SYSLOG will
+ // degrade gracefully to regular LOG. If you see this happening most probably
+ // you are using SYSLOG before you called SetEventSourceName.
+ if (g_event_source_name == nullptr)
+ return;
+
+ HANDLE event_log_handle =
+ RegisterEventSourceA(NULL, g_event_source_name->c_str());
if (event_log_handle == NULL) {
stream() << " !!NOT ADDED TO EVENTLOG!!";
return;
}
+ base::ScopedClosureRunner auto_deregister(
+ base::Bind(base::IgnoreResult(&DeregisterEventSource), event_log_handle));
std::string message(log_message_.str());
WORD log_type = EVENTLOG_ERROR_TYPE;
switch (log_message_.severity()) {
@@ -57,7 +79,6 @@ EventLogMessage::~EventLogMessage() {
MSG_LOG_MESSAGE, NULL, 1, 0, strings, NULL)) {
stream() << " !!NOT ADDED TO EVENTLOG!!";
}
- DeregisterEventSource(event_log_handle);
#elif defined(OS_LINUX)
const char kEventSource[] = "chrome";
openlog(kEventSource, LOG_NOWAIT | LOG_PID, LOG_USER);
diff --git a/chromium/base/syslog_logging.h b/chromium/base/syslog_logging.h
index 0196ba7bbd8..74f35beaf1a 100644
--- a/chromium/base/syslog_logging.h
+++ b/chromium/base/syslog_logging.h
@@ -17,6 +17,11 @@ namespace logging {
#define SYSLOG(severity) \
SYSLOG_STREAM(severity)
+// Sets the name of the event source for logging to the Windows Event Log.
+// Call this function once before using the SYSLOG macro or otherwise it will
+// behave as a regular LOG macro.
+void BASE_EXPORT SetEventSourceName(const std::string& name);
+
// Creates a formatted message on the system event log. That would be the
// Application Event log on Windows and the messages log file on POSIX systems.
class BASE_EXPORT EventLogMessage {
diff --git a/chromium/base/task/cancelable_task_tracker.cc b/chromium/base/task/cancelable_task_tracker.cc
index 6a6e1249d2a..92d82cc9eae 100644
--- a/chromium/base/task/cancelable_task_tracker.cc
+++ b/chromium/base/task/cancelable_task_tracker.cc
@@ -8,21 +8,14 @@
#include <utility>
-#include "base/bind.h"
#include "base/callback_helpers.h"
-#include "base/compiler_specific.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
-#include "base/single_thread_task_runner.h"
#include "base/synchronization/cancellation_flag.h"
#include "base/task_runner.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
-using base::Bind;
-using base::CancellationFlag;
-using base::Closure;
-using base::hash_map;
-using base::TaskRunner;
+namespace base {
namespace {
@@ -57,8 +50,6 @@ void RunOrPostToTaskRunner(TaskRunner* task_runner, const Closure& closure) {
} // namespace
-namespace base {
-
// static
const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
@@ -66,7 +57,7 @@ CancelableTaskTracker::CancelableTaskTracker()
: next_id_(1),weak_factory_(this) {}
CancelableTaskTracker::~CancelableTaskTracker() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
TryCancelAll();
}
@@ -75,7 +66,7 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
TaskRunner* task_runner,
const tracked_objects::Location& from_here,
const Closure& task) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return PostTaskAndReply(task_runner, from_here, task, Bind(&base::DoNothing));
}
@@ -85,10 +76,10 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
const tracked_objects::Location& from_here,
const Closure& task,
const Closure& reply) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
- // We need a MessageLoop to run reply.
- DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+ // We need a SequencedTaskRunnerHandle to run |reply|.
+ DCHECK(base::SequencedTaskRunnerHandle::IsSet());
// Owned by reply callback below.
CancellationFlag* flag = new CancellationFlag();
@@ -115,8 +106,8 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
IsCanceledCallback* is_canceled_cb) {
- DCHECK(thread_checker_.CalledOnValidThread());
- DCHECK(base::ThreadTaskRunnerHandle::IsSet());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
+ DCHECK(base::SequencedTaskRunnerHandle::IsSet());
TaskId id = next_id_;
next_id_++; // int64_t is big enough that we ignore the potential overflow.
@@ -129,11 +120,11 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
Bind(&CancelableTaskTracker::Untrack, weak_factory_.GetWeakPtr(), id),
flag);
- // Will always run |untrack_and_delete_flag| on current MessageLoop.
+ // Will always run |untrack_and_delete_flag| on current sequence.
base::ScopedClosureRunner* untrack_and_delete_flag_runner =
new base::ScopedClosureRunner(
Bind(&RunOrPostToTaskRunner,
- RetainedRef(base::ThreadTaskRunnerHandle::Get()),
+ RetainedRef(base::SequencedTaskRunnerHandle::Get()),
untrack_and_delete_flag));
*is_canceled_cb =
@@ -144,7 +135,7 @@ CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
}
void CancelableTaskTracker::TryCancel(TaskId id) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
hash_map<TaskId, CancellationFlag*>::const_iterator it = task_flags_.find(id);
if (it == task_flags_.end()) {
@@ -160,7 +151,7 @@ void CancelableTaskTracker::TryCancel(TaskId id) {
}
void CancelableTaskTracker::TryCancelAll() {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
for (hash_map<TaskId, CancellationFlag*>::const_iterator it =
task_flags_.begin();
@@ -171,19 +162,19 @@ void CancelableTaskTracker::TryCancelAll() {
}
bool CancelableTaskTracker::HasTrackedTasks() const {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
return !task_flags_.empty();
}
void CancelableTaskTracker::Track(TaskId id, CancellationFlag* flag) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
bool success = task_flags_.insert(std::make_pair(id, flag)).second;
DCHECK(success);
}
void CancelableTaskTracker::Untrack(TaskId id) {
- DCHECK(thread_checker_.CalledOnValidThread());
+ DCHECK(sequence_checker_.CalledOnValidSequence());
size_t num = task_flags_.erase(id);
DCHECK_EQ(1u, num);
}
diff --git a/chromium/base/task/cancelable_task_tracker.h b/chromium/base/task/cancelable_task_tracker.h
index 86b5a458450..959c86f9fd6 100644
--- a/chromium/base/task/cancelable_task_tracker.h
+++ b/chromium/base/task/cancelable_task_tracker.h
@@ -15,36 +15,36 @@
//
// CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
// preferred solutions for canceling a task. However, they don't support
-// cancelation from another thread. This is sometimes a performance critical
+// cancelation from another sequence. This is sometimes a performance critical
// requirement. E.g. We need to cancel database lookup task on DB thread when
// user changes inputed text. If it is performance critical to do a best effort
-// cancelation of a task, then CancelableTaskTracker is appropriate,
-// otherwise use one of the other mechanisms.
+// cancelation of a task, then CancelableTaskTracker is appropriate, otherwise
+// use one of the other mechanisms.
//
// THREAD-SAFETY:
//
-// 1. CancelableTaskTracker objects are not thread safe. They must
-// be created, used, and destroyed on the originating thread that posts the
-// task. It's safe to destroy a CancelableTaskTracker while there
-// are outstanding tasks. This is commonly used to cancel all outstanding
-// tasks.
+// 1. A CancelableTaskTracker object must be created, used, and destroyed on a
+// single sequence.
//
-// 2. Both task and reply are deleted on the originating thread.
+// 2. It's safe to destroy a CancelableTaskTracker while there are outstanding
+// tasks. This is commonly used to cancel all outstanding tasks.
//
-// 3. IsCanceledCallback is thread safe and can be run or deleted on any
-// thread.
+// 3. Both task and reply are deleted on the originating sequence.
+//
+// 4. IsCanceledCallback can be run or deleted on any sequence.
#ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#include <stdint.h>
#include "base/base_export.h"
+#include "base/bind.h"
#include "base/callback.h"
#include "base/containers/hash_tables.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
-#include "base/task_runner_util.h"
-#include "base/threading/thread_checker.h"
+#include "base/post_task_and_reply_with_result_internal.h"
+#include "base/sequence_checker.h"
namespace tracked_objects {
class Location;
@@ -130,7 +130,7 @@ class BASE_EXPORT CancelableTaskTracker {
base::hash_map<TaskId, base::CancellationFlag*> task_flags_;
TaskId next_id_;
- base::ThreadChecker thread_checker_;
+ SequenceChecker sequence_checker_;
base::WeakPtrFactory<CancelableTaskTracker> weak_factory_;
diff --git a/chromium/base/task/cancelable_task_tracker_unittest.cc b/chromium/base/task/cancelable_task_tracker_unittest.cc
index 01a1f23c8aa..fd480f36875 100644
--- a/chromium/base/task/cancelable_task_tracker_unittest.cc
+++ b/chromium/base/task/cancelable_task_tracker_unittest.cc
@@ -15,6 +15,7 @@
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
+#include "base/test/gtest_util.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/thread.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -348,11 +349,7 @@ class CancelableTaskTrackerDeathTest : public CancelableTaskTrackerTest {
void MaybeRunDeadlyTaskTrackerMemberFunction(
CancelableTaskTracker* task_tracker,
const Callback<void(CancelableTaskTracker*)>& fn) {
-// CancelableTask uses DCHECKs with its ThreadChecker (itself only
-// enabled in debug mode).
-#if DCHECK_IS_ON()
- EXPECT_DEATH_IF_SUPPORTED(fn.Run(task_tracker), "");
-#endif
+ EXPECT_DCHECK_DEATH(fn.Run(task_tracker));
}
void PostDoNothingTask(CancelableTaskTracker* task_tracker) {
diff --git a/chromium/base/task_runner_util.h b/chromium/base/task_runner_util.h
index ba8e120c6f4..e57d07769fa 100644
--- a/chromium/base/task_runner_util.h
+++ b/chromium/base/task_runner_util.h
@@ -8,34 +8,11 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
+#include "base/post_task_and_reply_with_result_internal.h"
#include "base/task_runner.h"
namespace base {
-namespace internal {
-
-// Adapts a function that produces a result via a return value to
-// one that returns via an output parameter.
-template <typename ReturnType>
-void ReturnAsParamAdapter(const Callback<ReturnType(void)>& func,
- ReturnType* result) {
- *result = func.Run();
-}
-
-// Adapts a T* result to a callblack that expects a T.
-template <typename TaskReturnType, typename ReplyArgType>
-void ReplyAdapter(const Callback<void(ReplyArgType)>& callback,
- TaskReturnType* result) {
- // TODO(ajwong): Remove this conditional and add a DCHECK to enforce that
- // |reply| must be non-null in PostTaskAndReplyWithResult() below after
- // current code that relies on this API softness has been removed.
- // http://crbug.com/162712
- if (!callback.is_null())
- callback.Run(std::move(*result));
-}
-
-} // namespace internal
-
// When you have these methods
//
// R DoWorkAndReturn();
@@ -56,6 +33,8 @@ bool PostTaskAndReplyWithResult(
const tracked_objects::Location& from_here,
const Callback<TaskReturnType(void)>& task,
const Callback<void(ReplyArgType)>& reply) {
+ DCHECK(task);
+ DCHECK(reply);
TaskReturnType* result = new TaskReturnType();
return task_runner->PostTaskAndReply(
from_here,
diff --git a/chromium/base/task_scheduler/post_task.cc b/chromium/base/task_scheduler/post_task.cc
index 737a219c260..8c3e941a303 100644
--- a/chromium/base/task_scheduler/post_task.cc
+++ b/chromium/base/task_scheduler/post_task.cc
@@ -30,7 +30,13 @@ class PostTaskAndReplyTaskRunner : public internal::PostTaskAndReplyImpl {
} // namespace
void PostTask(const tracked_objects::Location& from_here, const Closure& task) {
- PostTaskWithTraits(from_here, TaskTraits(), task);
+ PostDelayedTask(from_here, task, TimeDelta());
+}
+
+void PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay) {
+ PostDelayedTaskWithTraits(from_here, TaskTraits(), task, delay);
}
void PostTaskAndReply(const tracked_objects::Location& from_here,
@@ -42,7 +48,15 @@ void PostTaskAndReply(const tracked_objects::Location& from_here,
void PostTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
const Closure& task) {
- TaskScheduler::GetInstance()->PostTaskWithTraits(from_here, traits, task);
+ PostDelayedTaskWithTraits(from_here, traits, task, TimeDelta());
+}
+
+void PostDelayedTaskWithTraits(const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ const Closure& task,
+ TimeDelta delay) {
+ TaskScheduler::GetInstance()->PostDelayedTaskWithTraits(from_here, traits,
+ task, delay);
}
void PostTaskWithTraitsAndReply(const tracked_objects::Location& from_here,
diff --git a/chromium/base/task_scheduler/post_task.h b/chromium/base/task_scheduler/post_task.h
index 346a2321476..1c5c7096b5c 100644
--- a/chromium/base/task_scheduler/post_task.h
+++ b/chromium/base/task_scheduler/post_task.h
@@ -6,21 +6,21 @@
#define BASE_TASK_SCHEDULER_POST_TASK_H_
#include "base/base_export.h"
+#include "base/bind.h"
#include "base/callback_forward.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
+#include "base/post_task_and_reply_with_result_internal.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
namespace base {
// This is the preferred interface to post tasks to the TaskScheduler.
//
-// Note: The TaskScheduler is still in an experimental phase in Chrome. Please
-// refrain from using this API unless you know what you are doing.
-//
// TaskScheduler must have been registered for the current process via
// TaskScheduler::SetInstance() before the functions below are valid.
//
@@ -39,29 +39,42 @@ namespace base {
// task_runner.PostTask(FROM_HERE, Bind(...));
// task_runner.PostTask(FROM_HERE, Bind(...));
//
-// To post file I/O tasks that must run in sequence and can be skipped on
+// To post tasks that may block, must run in sequence and can be skipped on
// shutdown:
// scoped_refptr<SequencedTaskRunner> task_runner =
// CreateSequencedTaskRunnerWithTraits(
-// TaskTraits().WithFileIO().WithShutdownBehavior(
+// TaskTraits().MayBlock().WithShutdownBehavior(
// TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
// task_runner.PostTask(FROM_HERE, Bind(...));
// task_runner.PostTask(FROM_HERE, Bind(...));
//
// The default TaskTraits apply to tasks that:
-// (1) don't need to do I/O,
-// (2) don't affect user interaction and/or visible elements, and
+// (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
+// (2) prefer inheriting the current priority to specifying their own, and
// (3) can either block shutdown or be skipped on shutdown
// (barring current TaskScheduler default).
// If those loose requirements are sufficient for your task, use
// PostTask[AndReply], otherwise override these with explicit traits via
// PostTaskWithTraits[AndReply].
+//
+// Tasks posted to TaskScheduler with a delay may be coalesced (i.e. delays may
+// be adjusted to reduce the number of wakeups and hence power consumption).
// Posts |task| to the TaskScheduler. Calling this is equivalent to calling
// PostTaskWithTraits with plain TaskTraits.
BASE_EXPORT void PostTask(const tracked_objects::Location& from_here,
const Closure& task);
+// Posts |task| to the TaskScheduler. |task| will not run before |delay|
+// expires. Calling this is equivalent to calling PostDelayedTaskWithTraits with
+// plain TaskTraits.
+//
+// Use PostDelayedTaskWithTraits to specify a BACKGROUND priority if the task
+// doesn't have to run as soon as |delay| expires.
+BASE_EXPORT void PostDelayedTask(const tracked_objects::Location& from_here,
+ const Closure& task,
+ TimeDelta delay);
+
// Posts |task| to the TaskScheduler and posts |reply| on the caller's execution
// context (i.e. same sequence or thread and same TaskTraits if applicable) when
// |task| completes. Calling this is equivalent to calling
@@ -71,11 +84,34 @@ BASE_EXPORT void PostTaskAndReply(const tracked_objects::Location& from_here,
const Closure& task,
const Closure& reply);
+// Posts |task| to the TaskScheduler and posts |reply| with the return value of
+// |task| as argument on the caller's execution context (i.e. same sequence or
+// thread and same TaskTraits if applicable) when |task| completes. Calling this
+// is equivalent to calling PostTaskWithTraitsAndReplyWithResult with plain
+// TaskTraits. Can only be called when SequencedTaskRunnerHandle::IsSet().
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskAndReplyWithResult(const tracked_objects::Location& from_here,
+ const Callback<TaskReturnType(void)>& task,
+ const Callback<void(ReplyArgType)>& reply) {
+ PostTaskWithTraitsAndReplyWithResult(from_here, TaskTraits(), task, reply);
+}
+
// Posts |task| with specific |traits| to the TaskScheduler.
BASE_EXPORT void PostTaskWithTraits(const tracked_objects::Location& from_here,
const TaskTraits& traits,
const Closure& task);
+// Posts |task| with specific |traits| to the TaskScheduler. |task| will not run
+// before |delay| expires.
+//
+// Specify a BACKGROUND priority via |traits| if the task doesn't have to run as
+// soon as |delay| expires.
+BASE_EXPORT void PostDelayedTaskWithTraits(
+ const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ const Closure& task,
+ TimeDelta delay);
+
// Posts |task| with specific |traits| to the TaskScheduler and posts |reply| on
// the caller's execution context (i.e. same sequence or thread and same
// TaskTraits if applicable) when |task| completes. Can only be called when
@@ -86,6 +122,24 @@ BASE_EXPORT void PostTaskWithTraitsAndReply(
const Closure& task,
const Closure& reply);
+// Posts |task| with specific |traits| to the TaskScheduler and posts |reply|
+// with the return value of |task| as argument on the caller's execution context
+// (i.e. same sequence or thread and same TaskTraits if applicable) when |task|
+// completes. Can only be called when SequencedTaskRunnerHandle::IsSet().
+template <typename TaskReturnType, typename ReplyArgType>
+void PostTaskWithTraitsAndReplyWithResult(
+ const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ const Callback<TaskReturnType(void)>& task,
+ const Callback<void(ReplyArgType)>& reply) {
+ TaskReturnType* result = new TaskReturnType();
+ return PostTaskWithTraitsAndReply(
+ from_here, traits,
+ Bind(&internal::ReturnAsParamAdapter<TaskReturnType>, task, result),
+ Bind(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>, reply,
+ Owned(result)));
+}
+
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
diff --git a/chromium/base/task_scheduler/scheduler_worker.cc b/chromium/base/task_scheduler/scheduler_worker.cc
index 5853bf6131e..94f85bc614f 100644
--- a/chromium/base/task_scheduler/scheduler_worker.cc
+++ b/chromium/base/task_scheduler/scheduler_worker.cc
@@ -9,8 +9,8 @@
#include <utility>
#include "base/logging.h"
+#include "base/memory/ptr_util.h"
#include "base/task_scheduler/task_tracker.h"
-#include "build/build_config.h"
#if defined(OS_MACOSX)
#include "base/mac/scoped_nsautorelease_pool.h"
@@ -44,12 +44,11 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
WaitForWork();
#if defined(OS_WIN)
- // This is required as SequencedWorkerPool previously blindly CoInitialized
- // all of its threads.
- // TODO: Get rid of this broad COM scope and force tasks that care about a
- // CoInitialized environment to request one (via an upcoming execution
- // mode).
- win::ScopedCOMInitializer com_initializer;
+ std::unique_ptr<win::ScopedCOMInitializer> com_initializer;
+ if (outer_->backward_compatibility_ ==
+ SchedulerBackwardCompatibility::INIT_COM_STA) {
+ com_initializer = MakeUnique<win::ScopedCOMInitializer>();
+ }
#endif
while (!outer_->task_tracker_->IsShutdownComplete() &&
@@ -78,11 +77,10 @@ class SchedulerWorker::Thread : public PlatformThread::Delegate {
continue;
}
- std::unique_ptr<Task> task = sequence->TakeTask();
- const TaskPriority task_priority = task->traits.priority();
- const TimeDelta task_latency = TimeTicks::Now() - task->sequenced_time;
- if (outer_->task_tracker_->RunTask(std::move(task), sequence->token()))
- outer_->delegate_->DidRunTaskWithPriority(task_priority, task_latency);
+ if (outer_->task_tracker_->RunTask(sequence->TakeTask(),
+ sequence->token())) {
+ outer_->delegate_->DidRunTask();
+ }
const bool sequence_became_empty = sequence->Pop();
@@ -193,9 +191,11 @@ std::unique_ptr<SchedulerWorker> SchedulerWorker::Create(
ThreadPriority priority_hint,
std::unique_ptr<Delegate> delegate,
TaskTracker* task_tracker,
- InitialState initial_state) {
- std::unique_ptr<SchedulerWorker> worker(
- new SchedulerWorker(priority_hint, std::move(delegate), task_tracker));
+ InitialState initial_state,
+ SchedulerBackwardCompatibility backward_compatibility) {
+ auto worker =
+ WrapUnique(new SchedulerWorker(priority_hint, std::move(delegate),
+ task_tracker, backward_compatibility));
// Creation happens before any other thread can reference this one, so no
// synchronization is necessary.
if (initial_state == SchedulerWorker::InitialState::ALIVE) {
@@ -246,12 +246,19 @@ bool SchedulerWorker::ThreadAliveForTesting() const {
return !!thread_;
}
-SchedulerWorker::SchedulerWorker(ThreadPriority priority_hint,
- std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker)
+SchedulerWorker::SchedulerWorker(
+ ThreadPriority priority_hint,
+ std::unique_ptr<Delegate> delegate,
+ TaskTracker* task_tracker,
+ SchedulerBackwardCompatibility backward_compatibility)
: priority_hint_(priority_hint),
delegate_(std::move(delegate)),
- task_tracker_(task_tracker) {
+ task_tracker_(task_tracker)
+#if defined(OS_WIN)
+ ,
+ backward_compatibility_(backward_compatibility)
+#endif
+{
DCHECK(delegate_);
DCHECK(task_tracker_);
}
diff --git a/chromium/base/task_scheduler/scheduler_worker.h b/chromium/base/task_scheduler/scheduler_worker.h
index a9b891ad772..0aa8075f937 100644
--- a/chromium/base/task_scheduler/scheduler_worker.h
+++ b/chromium/base/task_scheduler/scheduler_worker.h
@@ -13,9 +13,11 @@
#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/scheduler_worker_params.h"
#include "base/task_scheduler/sequence.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
@@ -53,11 +55,8 @@ class BASE_EXPORT SchedulerWorker {
// run a Task.
virtual scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) = 0;
- // Called by the SchedulerWorker after it ran a task with |task_priority|.
- // |task_latency| is the time elapsed between when the task was posted and
- // when it started to run.
- virtual void DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) = 0;
+ // Called by the SchedulerWorker after it ran a task.
+ virtual void DidRunTask() = 0;
// Called when |sequence| isn't empty after the SchedulerWorker pops a Task
// from it. |sequence| is the last Sequence returned by GetWork().
@@ -97,12 +96,15 @@ class BASE_EXPORT SchedulerWorker {
// |task_tracker| is used to handle shutdown behavior of Tasks. If
// |worker_state| is DETACHED, the thread will be created upon a WakeUp().
// Returns nullptr if creating the underlying platform thread fails during
- // Create().
+ // Create(). |backward_compatibility| indicates whether backward compatibility
+ // is enabled.
static std::unique_ptr<SchedulerWorker> Create(
ThreadPriority priority_hint,
std::unique_ptr<Delegate> delegate,
TaskTracker* task_tracker,
- InitialState initial_state);
+ InitialState initial_state,
+ SchedulerBackwardCompatibility backward_compatibility =
+ SchedulerBackwardCompatibility::DISABLED);
// Destroying a SchedulerWorker in production is not allowed; it is always
// leaked. In tests, it can only be destroyed after JoinForTesting() has
@@ -130,7 +132,8 @@ class BASE_EXPORT SchedulerWorker {
SchedulerWorker(ThreadPriority thread_priority,
std::unique_ptr<Delegate> delegate,
- TaskTracker* task_tracker);
+ TaskTracker* task_tracker,
+ SchedulerBackwardCompatibility backward_compatibility);
// Returns the thread instance if the detach was successful so that it can be
// freed upon termination of the thread.
@@ -148,9 +151,14 @@ class BASE_EXPORT SchedulerWorker {
std::unique_ptr<Thread> thread_;
const ThreadPriority priority_hint_;
+
const std::unique_ptr<Delegate> delegate_;
TaskTracker* const task_tracker_;
+#if defined(OS_WIN)
+ const SchedulerBackwardCompatibility backward_compatibility_;
+#endif
+
// Set once JoinForTesting() has been called.
AtomicFlag should_exit_for_testing_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_params.h b/chromium/base/task_scheduler/scheduler_worker_params.h
new file mode 100644
index 00000000000..ea753fff595
--- /dev/null
+++ b/chromium/base/task_scheduler/scheduler_worker_params.h
@@ -0,0 +1,24 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
+
+namespace base {
+
+enum class SchedulerBackwardCompatibility {
+ // No backward compatibility.
+ DISABLED,
+
+ // On Windows, initialize COM STA to mimic SequencedWorkerPool and
+ // BrowserThreadImpl. Behaves like DISABLED on other platforms.
+ // TODO(fdoray): Get rid of this and force tasks that care about a
+ // CoInitialized environment to request one explicitly (via an upcoming
+ // execution mode).
+ INIT_COM_STA,
+};
+
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCHEDULER_WORKER_PARAMS_H_
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index f2d69344f4c..93800eb33b7 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -21,11 +21,12 @@
#include "base/strings/stringprintf.h"
#include "base/task_runner.h"
#include "base/task_scheduler/delayed_task_manager.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_tracker.h"
+#include "base/task_scheduler/task_traits.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local.h"
#include "base/threading/thread_restrictions.h"
-#include "base/time/time.h"
namespace base {
namespace internal {
@@ -39,7 +40,6 @@ constexpr char kNumTasksBeforeDetachHistogramPrefix[] =
"TaskScheduler.NumTasksBeforeDetach.";
constexpr char kNumTasksBetweenWaitsHistogramPrefix[] =
"TaskScheduler.NumTasksBetweenWaits.";
-constexpr char kTaskLatencyHistogramPrefix[] = "TaskScheduler.TaskLatency.";
// SchedulerWorkerPool that owns the current thread, if any.
LazyInstance<ThreadLocalPointer<const SchedulerWorkerPool>>::Leaky
@@ -129,29 +129,6 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
DISALLOW_COPY_AND_ASSIGN(SchedulerSequencedTaskRunner);
};
-HistogramBase* GetTaskLatencyHistogram(const std::string& pool_name,
- TaskPriority task_priority) {
- const char* task_priority_suffix = nullptr;
- switch (task_priority) {
- case TaskPriority::BACKGROUND:
- task_priority_suffix = ".BackgroundTaskPriority";
- break;
- case TaskPriority::USER_VISIBLE:
- task_priority_suffix = ".UserVisibleTaskPriority";
- break;
- case TaskPriority::USER_BLOCKING:
- task_priority_suffix = ".UserBlockingTaskPriority";
- break;
- }
-
- // Mimics the UMA_HISTOGRAM_TIMES macro.
- return Histogram::FactoryTimeGet(kTaskLatencyHistogramPrefix + pool_name +
- kPoolNameSuffix + task_priority_suffix,
- TimeDelta::FromMilliseconds(1),
- TimeDelta::FromSeconds(10), 50,
- HistogramBase::kUmaTargetedHistogramFlag);
-}
-
// Only used in DCHECKs.
bool ContainsWorker(
const std::vector<std::unique_ptr<SchedulerWorker>>& workers,
@@ -239,8 +216,7 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
// SchedulerWorker::Delegate:
void OnMainEntry(SchedulerWorker* worker) override;
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override;
- void DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) override;
+ void DidRunTask() override;
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override;
TimeDelta GetSleepTimeout() override;
bool CanDetach(SchedulerWorker* worker) override;
@@ -308,16 +284,10 @@ std::unique_ptr<SchedulerWorkerPoolImpl> SchedulerWorkerPoolImpl::Create(
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager) {
- std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool(
- new SchedulerWorkerPoolImpl(params.name(),
- params.io_restriction(),
- params.suggested_reclaim_time(),
- task_tracker, delayed_task_manager));
- if (worker_pool->Initialize(
- params.priority_hint(), params.standby_thread_policy(),
- params.max_threads(), re_enqueue_sequence_callback)) {
+ auto worker_pool = WrapUnique(
+ new SchedulerWorkerPoolImpl(params, task_tracker, delayed_task_manager));
+ if (worker_pool->Initialize(params, re_enqueue_sequence_callback))
return worker_pool;
- }
return nullptr;
}
@@ -519,10 +489,6 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
// New threads haven't run GetWork() yet, so reset the |idle_start_time_|.
idle_start_time_ = TimeTicks();
-
- ThreadRestrictions::SetIOAllowed(
- outer_->io_restriction_ ==
- SchedulerWorkerPoolParams::IORestriction::ALLOWED);
}
scoped_refptr<Sequence>
@@ -606,29 +572,9 @@ SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::GetWork(
return sequence;
}
-void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
- DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) {
+void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::DidRunTask() {
++num_tasks_since_last_wait_;
++num_tasks_since_last_detach_;
-
- const int priority_index = static_cast<int>(task_priority);
-
- // As explained in the header file, histograms are allocated on demand. It
- // doesn't matter if an element of |task_latency_histograms_| is set multiple
- // times since GetTaskLatencyHistogram() is idempotent. As explained in the
- // comment at the top of histogram_macros.h, barriers are required.
- HistogramBase* task_latency_histogram = reinterpret_cast<HistogramBase*>(
- subtle::Acquire_Load(&outer_->task_latency_histograms_[priority_index]));
- if (!task_latency_histogram) {
- task_latency_histogram =
- GetTaskLatencyHistogram(outer_->name_, task_priority);
- subtle::Release_Store(
- &outer_->task_latency_histograms_[priority_index],
- reinterpret_cast<subtle::AtomicWord>(task_latency_histogram));
- }
-
- task_latency_histogram->AddTime(task_latency);
}
void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::
@@ -675,14 +621,11 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnDetach() {
}
SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
- StringPiece name,
- SchedulerWorkerPoolParams::IORestriction io_restriction,
- const TimeDelta& suggested_reclaim_time,
+ const SchedulerWorkerPoolParams& params,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager)
- : name_(name.as_string()),
- io_restriction_(io_restriction),
- suggested_reclaim_time_(suggested_reclaim_time),
+ : name_(params.name()),
+ suggested_reclaim_time_(params.suggested_reclaim_time()),
idle_workers_stack_lock_(shared_priority_queue_.container_lock()),
idle_workers_stack_cv_for_testing_(
idle_workers_stack_lock_.CreateConditionVariable()),
@@ -725,31 +668,29 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
}
bool SchedulerWorkerPoolImpl::Initialize(
- ThreadPriority priority_hint,
- SchedulerWorkerPoolParams::StandbyThreadPolicy standby_thread_policy,
- size_t max_threads,
+ const SchedulerWorkerPoolParams& params,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback) {
AutoSchedulerLock auto_lock(idle_workers_stack_lock_);
DCHECK(workers_.empty());
- workers_.resize(max_threads);
+ workers_.resize(params.max_threads());
// Create workers and push them to the idle stack in reverse order of index.
// This ensures that they are woken up in order of index and that the ALIVE
// worker is on top of the stack.
- for (int index = max_threads - 1; index >= 0; --index) {
+ for (int index = params.max_threads() - 1; index >= 0; --index) {
const bool is_standby_lazy =
- standby_thread_policy ==
+ params.standby_thread_policy() ==
SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY;
const SchedulerWorker::InitialState initial_state =
(index == 0 && !is_standby_lazy)
? SchedulerWorker::InitialState::ALIVE
: SchedulerWorker::InitialState::DETACHED;
std::unique_ptr<SchedulerWorker> worker = SchedulerWorker::Create(
- priority_hint,
+ params.priority_hint(),
MakeUnique<SchedulerWorkerDelegateImpl>(
this, re_enqueue_sequence_callback, &shared_priority_queue_, index),
- task_tracker_, initial_state);
+ task_tracker_, initial_state, params.backward_compatibility());
if (!worker)
break;
idle_workers_stack_.Push(worker.get());
@@ -767,7 +708,7 @@ void SchedulerWorkerPoolImpl::WakeUpWorker(SchedulerWorker* worker) {
DCHECK(worker);
RemoveFromIdleWorkersStack(worker);
worker->WakeUp();
- // TOOD(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
+ // TODO(robliao): Honor StandbyThreadPolicy::ONE here and consider adding
// hysteresis to the CanDetach check. See https://crbug.com/666041.
}
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index 8014030b541..f1b88806b46 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -11,30 +11,27 @@
#include <string>
#include <vector>
-#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/strings/string_piece.h"
#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/condition_variable.h"
#include "base/task_scheduler/priority_queue.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/scheduler_worker.h"
#include "base/task_scheduler/scheduler_worker_pool.h"
-#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/scheduler_worker_stack.h"
#include "base/task_scheduler/sequence.h"
#include "base/task_scheduler/task.h"
-#include "base/task_scheduler/task_traits.h"
-#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
namespace base {
class HistogramBase;
-class TimeDelta;
+class SchedulerWorkerPoolParams;
+class TaskTraits;
namespace internal {
@@ -114,17 +111,12 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
class SchedulerSingleThreadTaskRunner;
class SchedulerWorkerDelegateImpl;
- SchedulerWorkerPoolImpl(StringPiece name,
- SchedulerWorkerPoolParams::IORestriction
- io_restriction,
- const TimeDelta& suggested_reclaim_time,
+ SchedulerWorkerPoolImpl(const SchedulerWorkerPoolParams& params,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager);
bool Initialize(
- ThreadPriority priority_hint,
- SchedulerWorkerPoolParams::StandbyThreadPolicy standby_thread_policy,
- size_t max_threads,
+ const SchedulerWorkerPoolParams& params,
const ReEnqueueSequenceCallback& re_enqueue_sequence_callback);
// Wakes up |worker|.
@@ -162,9 +154,6 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// PriorityQueue from which all threads of this worker pool get work.
PriorityQueue shared_priority_queue_;
- // Indicates whether Tasks on this worker pool are allowed to make I/O calls.
- const SchedulerWorkerPoolParams::IORestriction io_restriction_;
-
// Suggested reclaim time for workers.
const TimeDelta suggested_reclaim_time_;
@@ -209,14 +198,6 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Intentionally leaked.
HistogramBase* const num_tasks_between_waits_histogram_;
- // TaskScheduler.TaskLatency.[worker pool name].[task priority] histograms.
- // Indexed by task priority. Histograms are allocated on demand to reduce
- // memory usage (some task priorities might never run in this
- // SchedulerThreadPoolImpl). Intentionally leaked.
- subtle::AtomicWord
- task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
- {};
-
TaskTracker* const task_tracker_;
DelayedTaskManager* const delayed_task_manager_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index 9983cad5c16..dfaa9c7ff3d 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -58,7 +58,6 @@ constexpr TimeDelta kReclaimTimeForDetachTests =
constexpr TimeDelta kExtraTimeToWaitForDetach =
TimeDelta::FromSeconds(1);
-using IORestriction = SchedulerWorkerPoolParams::IORestriction;
using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
class TaskSchedulerWorkerPoolImplTest
@@ -77,7 +76,7 @@ class TaskSchedulerWorkerPoolImplTest
worker_pool_->JoinForTesting();
}
- void InitializeWorkerPool(const TimeDelta& suggested_reclaim_time,
+ void InitializeWorkerPool(TimeDelta suggested_reclaim_time,
size_t num_workers) {
ASSERT_FALSE(worker_pool_);
ASSERT_FALSE(delayed_task_manager_);
@@ -85,9 +84,9 @@ class TaskSchedulerWorkerPoolImplTest
delayed_task_manager_ =
base::MakeUnique<DelayedTaskManager>(service_thread_.task_runner());
worker_pool_ = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams(
- "TestWorkerPool", ThreadPriority::NORMAL, IORestriction::ALLOWED,
- StandbyThreadPolicy::LAZY, num_workers, suggested_reclaim_time),
+ SchedulerWorkerPoolParams("TestWorkerPool", ThreadPriority::NORMAL,
+ StandbyThreadPolicy::LAZY, num_workers,
+ suggested_reclaim_time),
Bind(&TaskSchedulerWorkerPoolImplTest::ReEnqueueSequenceCallback,
Unretained(this)),
&task_tracker_, delayed_task_manager_.get());
@@ -115,13 +114,15 @@ class TaskSchedulerWorkerPoolImplTest
scoped_refptr<TaskRunner> CreateTaskRunnerWithExecutionMode(
SchedulerWorkerPoolImpl* worker_pool,
test::ExecutionMode execution_mode) {
+ // Allow tasks posted to the returned TaskRunner to wait on a WaitableEvent.
+ const TaskTraits traits = TaskTraits().WithBaseSyncPrimitives();
switch (execution_mode) {
case test::ExecutionMode::PARALLEL:
- return worker_pool->CreateTaskRunnerWithTraits(TaskTraits());
+ return worker_pool->CreateTaskRunnerWithTraits(traits);
case test::ExecutionMode::SEQUENCED:
- return worker_pool->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ return worker_pool->CreateSequencedTaskRunnerWithTraits(traits);
case test::ExecutionMode::SINGLE_THREADED:
- return worker_pool->CreateSingleThreadTaskRunnerWithTraits(TaskTraits());
+ return worker_pool->CreateSingleThreadTaskRunnerWithTraits(traits);
}
ADD_FAILURE() << "Unknown ExecutionMode";
return nullptr;
@@ -438,67 +439,6 @@ INSTANTIATE_TEST_CASE_P(
namespace {
-void NotReachedReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence) {
- ADD_FAILURE()
- << "Unexpected invocation of NotReachedReEnqueueSequenceCallback.";
-}
-
-// Verifies that the current thread allows I/O if |io_restriction| is ALLOWED
-// and disallows it otherwise. Signals |event| before returning.
-void ExpectIORestriction(IORestriction io_restriction, WaitableEvent* event) {
- DCHECK(event);
-
- if (io_restriction == IORestriction::ALLOWED) {
- ThreadRestrictions::AssertIOAllowed();
- } else {
- EXPECT_DCHECK_DEATH({ ThreadRestrictions::AssertIOAllowed(); });
- }
-
- event->Signal();
-}
-
-class TaskSchedulerWorkerPoolImplIORestrictionTest
- : public testing::TestWithParam<IORestriction> {
- public:
- TaskSchedulerWorkerPoolImplIORestrictionTest() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerPoolImplIORestrictionTest);
-};
-
-} // namespace
-
-TEST_P(TaskSchedulerWorkerPoolImplIORestrictionTest, IORestriction) {
- TaskTracker task_tracker;
- DelayedTaskManager delayed_task_manager(
- make_scoped_refptr(new TestSimpleTaskRunner));
-
- auto worker_pool = SchedulerWorkerPoolImpl::Create(
- SchedulerWorkerPoolParams(
- "TestWorkerPoolWithParam", ThreadPriority::NORMAL, GetParam(),
- StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max()),
- Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
- &delayed_task_manager);
- ASSERT_TRUE(worker_pool);
-
- WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
- WaitableEvent::InitialState::NOT_SIGNALED);
- worker_pool->CreateTaskRunnerWithTraits(TaskTraits())
- ->PostTask(FROM_HERE, Bind(&ExpectIORestriction, GetParam(), &task_ran));
- task_ran.Wait();
-
- worker_pool->JoinForTesting();
-}
-
-INSTANTIATE_TEST_CASE_P(IOAllowed,
- TaskSchedulerWorkerPoolImplIORestrictionTest,
- ::testing::Values(IORestriction::ALLOWED));
-INSTANTIATE_TEST_CASE_P(IODisallowed,
- TaskSchedulerWorkerPoolImplIORestrictionTest,
- ::testing::Values(IORestriction::DISALLOWED));
-
-namespace {
-
class TaskSchedulerWorkerPoolSingleThreadedTest
: public TaskSchedulerWorkerPoolImplTest {
public:
@@ -603,7 +543,8 @@ TEST_F(TaskSchedulerWorkerPoolCheckTlsReuse, CheckDetachedThreads) {
std::vector<std::unique_ptr<test::TestTaskFactory>> factories;
for (size_t i = 0; i < kNumWorkersInWorkerPool; ++i) {
factories.push_back(MakeUnique<test::TestTaskFactory>(
- worker_pool_->CreateTaskRunnerWithTraits(TaskTraits()),
+ worker_pool_->CreateTaskRunnerWithTraits(
+ TaskTraits().WithBaseSyncPrimitives()),
test::ExecutionMode::PARALLEL));
ASSERT_TRUE(factories.back()->PostTask(
PostNestedTask::NO,
@@ -674,8 +615,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaits) {
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
InitializeWorkerPool(TimeDelta::Max(), kNumWorkersInWorkerPool);
- auto task_runner =
- worker_pool_->CreateSequencedTaskRunnerWithTraits(TaskTraits());
+ auto task_runner = worker_pool_->CreateSequencedTaskRunnerWithTraits(
+ TaskTraits().WithBaseSyncPrimitives());
// Post a task.
task_runner->PostTask(FROM_HERE,
@@ -718,7 +659,8 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
WaitableEvent tasks_can_exit_event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
InitializeWorkerPool(kReclaimTimeForDetachTests, kNumWorkersInWorkerPool);
- auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(TaskTraits());
+ auto task_runner = worker_pool_->CreateTaskRunnerWithTraits(
+ TaskTraits().WithBaseSyncPrimitives());
// Post tasks to saturate the pool.
std::vector<std::unique_ptr<WaitableEvent>> task_started_events;
@@ -779,6 +721,11 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBetweenWaitsWithDetach) {
namespace {
+void NotReachedReEnqueueSequenceCallback(scoped_refptr<Sequence> sequence) {
+ ADD_FAILURE()
+ << "Unexpected invocation of NotReachedReEnqueueSequenceCallback.";
+}
+
void CaptureThreadId(PlatformThreadId* thread_id) {
ASSERT_TRUE(thread_id);
*thread_id = PlatformThread::CurrentId();
@@ -832,7 +779,6 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitLazy) {
make_scoped_refptr(new TestSimpleTaskRunner));
auto worker_pool = SchedulerWorkerPoolImpl::Create(
SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
- IORestriction::DISALLOWED,
StandbyThreadPolicy::LAZY, 8U,
TimeDelta::Max()),
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
@@ -848,7 +794,6 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
make_scoped_refptr(new TestSimpleTaskRunner));
auto worker_pool = SchedulerWorkerPoolImpl::Create(
SchedulerWorkerPoolParams("LazyPolicyWorkerPool", ThreadPriority::NORMAL,
- IORestriction::DISALLOWED,
StandbyThreadPolicy::ONE, 8U, TimeDelta::Max()),
Bind(&NotReachedReEnqueueSequenceCallback), &task_tracker,
&delayed_task_manager);
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
index dbc02f58261..0747c2ed2cd 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.cc
@@ -4,23 +4,21 @@
#include "base/task_scheduler/scheduler_worker_pool_params.h"
-#include "base/time/time.h"
-
namespace base {
SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
const std::string& name,
ThreadPriority priority_hint,
- IORestriction io_restriction,
StandbyThreadPolicy standby_thread_policy,
int max_threads,
- const TimeDelta& suggested_reclaim_time)
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility)
: name_(name),
priority_hint_(priority_hint),
- io_restriction_(io_restriction),
standby_thread_policy_(standby_thread_policy),
max_threads_(max_threads),
- suggested_reclaim_time_(suggested_reclaim_time) {}
+ suggested_reclaim_time_(suggested_reclaim_time),
+ backward_compatibility_(backward_compatibility) {}
SchedulerWorkerPoolParams::SchedulerWorkerPoolParams(
SchedulerWorkerPoolParams&& other) = default;
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_params.h b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
index c33392afd06..5f90fd482d0 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_params.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_params.h
@@ -8,19 +8,14 @@
#include <string>
#include "base/macros.h"
+#include "base/task_scheduler/scheduler_worker_params.h"
#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
namespace base {
-class TimeDelta;
-
class BASE_EXPORT SchedulerWorkerPoolParams final {
public:
- enum class IORestriction {
- ALLOWED,
- DISALLOWED,
- };
-
enum class StandbyThreadPolicy {
// Create threads as needed on demand, reclaimed as necessary.
LAZY,
@@ -34,37 +29,40 @@ class BASE_EXPORT SchedulerWorkerPoolParams final {
// ("TaskScheduler." + histogram name + "." + |name| + extra suffixes). The
// pool will contain up to |max_threads|. |priority_hint| is the preferred
// thread priority; the actual thread priority depends on shutdown state and
- // platform capabilities. |io_restriction| indicates whether Tasks on the pool
- // are allowed to make I/O calls. |suggested_reclaim_time| sets a suggestion
- // on when to reclaim idle threads. The pool is free to ignore this value for
- // performance or correctness reasons.
- SchedulerWorkerPoolParams(const std::string& name,
- ThreadPriority priority_hint,
- IORestriction io_restriction,
- StandbyThreadPolicy standby_thread_policy,
- int max_threads,
- const TimeDelta& suggested_reclaim_time);
+ // platform capabilities. |standby_thread_policy| indicates whether an idle
+ // thread should be kept alive on standby. |suggested_reclaim_time| sets a
+ // suggestion on when to reclaim idle threads. The pool is free to ignore this
+ // value for performance or correctness reasons. |backward_compatibility|
+ // indicates whether backward compatibility is enabled.
+ SchedulerWorkerPoolParams(
+ const std::string& name,
+ ThreadPriority priority_hint,
+ StandbyThreadPolicy standby_thread_policy,
+ int max_threads,
+ TimeDelta suggested_reclaim_time,
+ SchedulerBackwardCompatibility backward_compatibility =
+ SchedulerBackwardCompatibility::DISABLED);
SchedulerWorkerPoolParams(SchedulerWorkerPoolParams&& other);
SchedulerWorkerPoolParams& operator=(SchedulerWorkerPoolParams&& other);
const std::string& name() const { return name_; }
ThreadPriority priority_hint() const { return priority_hint_; }
- IORestriction io_restriction() const { return io_restriction_; }
StandbyThreadPolicy standby_thread_policy() const {
return standby_thread_policy_;
}
size_t max_threads() const { return max_threads_; }
- const TimeDelta& suggested_reclaim_time() const {
- return suggested_reclaim_time_;
+ TimeDelta suggested_reclaim_time() const { return suggested_reclaim_time_; }
+ SchedulerBackwardCompatibility backward_compatibility() const {
+ return backward_compatibility_;
}
private:
std::string name_;
ThreadPriority priority_hint_;
- IORestriction io_restriction_;
StandbyThreadPolicy standby_thread_policy_;
size_t max_threads_;
TimeDelta suggested_reclaim_time_;
+ SchedulerBackwardCompatibility backward_compatibility_;
DISALLOW_COPY_AND_ASSIGN(SchedulerWorkerPoolParams);
};
diff --git a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
index 5359dcd0ebc..520e52cd61f 100644
--- a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
@@ -26,9 +26,8 @@ class MockSchedulerWorkerDelegate : public SchedulerWorker::Delegate {
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
return nullptr;
}
- void DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) override {
- ADD_FAILURE() << "Unexpected call to DidRunTaskWithPriority()";
+ void DidRunTask() override {
+ ADD_FAILURE() << "Unexpected call to DidRunTask()";
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
diff --git a/chromium/base/task_scheduler/scheduler_worker_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index 22847580e32..b65d50c07c3 100644
--- a/chromium/base/task_scheduler/scheduler_worker_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -25,6 +25,10 @@
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_WIN)
+#include <objbase.h>
+#endif
+
using testing::_;
using testing::Mock;
using testing::Ne;
@@ -45,9 +49,8 @@ class SchedulerWorkerDefaultDelegate : public SchedulerWorker::Delegate {
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
return nullptr;
}
- void DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) override {
- ADD_FAILURE() << "Unexpected call to DidRunTaskWithPriority()";
+ void DidRunTask() override {
+ ADD_FAILURE() << "Unexpected call to DidRunTask()";
}
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
ADD_FAILURE() << "Unexpected call to ReEnqueueSequence()";
@@ -127,14 +130,14 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
: outer_(outer) {}
~TestSchedulerWorkerDelegate() override {
- EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskExpected());
}
// SchedulerWorker::Delegate:
void OnMainEntry(SchedulerWorker* worker) override {
outer_->worker_set_.Wait();
EXPECT_EQ(outer_->worker_.get(), worker);
- EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskExpected());
// Without synchronization, OnMainEntry() could be called twice without
// generating an error.
@@ -144,7 +147,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
}
scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
- EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskExpected());
EXPECT_EQ(outer_->worker_.get(), worker);
{
@@ -174,7 +177,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
sequence->PushTask(std::move(task));
}
- ExpectCallToDidRunTaskWithPriority(sequence->PeekTaskTraits().priority());
+ ExpectCallToDidRunTask();
{
// Add the Sequence to the vector of created Sequences.
@@ -185,13 +188,10 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
return sequence;
}
- void DidRunTaskWithPriority(TaskPriority task_priority,
- const TimeDelta& task_latency) override {
- AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
- EXPECT_TRUE(expect_did_run_task_with_priority_);
- EXPECT_EQ(expected_task_priority_, task_priority);
- EXPECT_FALSE(task_latency.is_max());
- expect_did_run_task_with_priority_ = false;
+ void DidRunTask() override {
+ AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+ EXPECT_TRUE(expect_did_run_task_);
+ expect_did_run_task_ = false;
}
// This override verifies that |sequence| contains the expected number of
@@ -199,7 +199,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
// EnqueueSequence implementation, it doesn't reinsert |sequence| into a
// queue for further execution.
void ReEnqueueSequence(scoped_refptr<Sequence> sequence) override {
- EXPECT_FALSE(IsCallToDidRunTaskWithPriorityExpected());
+ EXPECT_FALSE(IsCallToDidRunTaskExpected());
EXPECT_GT(outer_->TasksPerSequence(), 1U);
// Verify that |sequence| contains TasksPerSequence() - 1 Tasks.
@@ -216,31 +216,27 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
}
private:
- // Expect a call to DidRunTaskWithPriority() with |task_priority| as
- // argument before the next call to any other method of this delegate.
- void ExpectCallToDidRunTaskWithPriority(TaskPriority task_priority) {
- AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
- expect_did_run_task_with_priority_ = true;
- expected_task_priority_ = task_priority;
+ // Expect a call to DidRunTask() before the next call to any other method of
+ // this delegate.
+ void ExpectCallToDidRunTask() {
+ AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+ expect_did_run_task_ = true;
}
- bool IsCallToDidRunTaskWithPriorityExpected() const {
- AutoSchedulerLock auto_lock(expect_did_run_task_with_priority_lock_);
- return expect_did_run_task_with_priority_;
+ bool IsCallToDidRunTaskExpected() const {
+ AutoSchedulerLock auto_lock(expect_did_run_task_lock_);
+ return expect_did_run_task_;
}
TaskSchedulerWorkerTest* outer_;
- // Synchronizes access to |expect_did_run_task_with_priority_| and
- // |expected_task_priority_|.
- mutable SchedulerLock expect_did_run_task_with_priority_lock_;
+ // Synchronizes access to |expect_did_run_task_|.
+ mutable SchedulerLock expect_did_run_task_lock_;
- // Whether the next method called on this delegate should be
- // DidRunTaskWithPriority().
- bool expect_did_run_task_with_priority_ = false;
+ // Whether the next method called on this delegate should be DidRunTask().
+ bool expect_did_run_task_ = false;
- // Expected priority for the next call to DidRunTaskWithPriority().
- TaskPriority expected_task_priority_ = TaskPriority::BACKGROUND;
+ DISALLOW_COPY_AND_ASSIGN(TestSchedulerWorkerDelegate);
};
void RunTaskCallback() {
@@ -285,6 +281,8 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerWorkerTest);
};
+} // namespace
+
// Verify that when GetWork() continuously returns Sequences, all Tasks in these
// Sequences run successfully. The test wakes up the SchedulerWorker once.
TEST_P(TaskSchedulerWorkerTest, ContinuousWork) {
@@ -390,8 +388,7 @@ class ControllableDetachDelegate : public SchedulerWorkerDefaultDelegate {
return sequence;
}
- void DidRunTaskWithPriority(TaskPriority task,
- const TimeDelta& task_latency) override {}
+ void DidRunTask() override {}
bool CanDetach(SchedulerWorker* worker) override {
detach_requested_.Signal();
@@ -605,6 +602,84 @@ TEST(TaskSchedulerWorkerTest, BumpPriorityOfDetachedThreadDuringShutdown) {
worker->JoinForTesting();
}
+#if defined(OS_WIN)
+
+namespace {
+
+class CoInitializeDelegate : public SchedulerWorkerDefaultDelegate {
+ public:
+ CoInitializeDelegate()
+ : get_work_returned_(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED) {}
+
+ scoped_refptr<Sequence> GetWork(SchedulerWorker* worker) override {
+ EXPECT_FALSE(get_work_returned_.IsSignaled());
+ EXPECT_EQ(E_UNEXPECTED, coinitialize_hresult_);
+
+ coinitialize_hresult_ = CoInitializeEx(nullptr, COINIT_APARTMENTTHREADED);
+ if (SUCCEEDED(coinitialize_hresult_))
+ CoUninitialize();
+
+ get_work_returned_.Signal();
+ return nullptr;
+ }
+
+ void WaitUntilGetWorkReturned() { get_work_returned_.Wait(); }
+
+ HRESULT coinitialize_hresult() const { return coinitialize_hresult_; }
+
+ private:
+ WaitableEvent get_work_returned_;
+ HRESULT coinitialize_hresult_ = E_UNEXPECTED;
+
+ DISALLOW_COPY_AND_ASSIGN(CoInitializeDelegate);
+};
+
} // namespace
+
+TEST(TaskSchedulerWorkerTest, BackwardCompatibilityEnabled) {
+ TaskTracker task_tracker;
+ auto delegate = MakeUnique<CoInitializeDelegate>();
+ CoInitializeDelegate* const delegate_raw = delegate.get();
+
+ // Create a worker with backward compatibility ENABLED. Wake it up and wait
+ // until GetWork() returns.
+ auto worker = SchedulerWorker::Create(
+ ThreadPriority::NORMAL, std::move(delegate), &task_tracker,
+ SchedulerWorker::InitialState::ALIVE,
+ SchedulerBackwardCompatibility::INIT_COM_STA);
+ worker->WakeUp();
+ delegate_raw->WaitUntilGetWorkReturned();
+
+ // The call to CoInitializeEx() should have returned S_FALSE to indicate that
+ // the COM library was already initialized on the thread.
+ EXPECT_EQ(S_FALSE, delegate_raw->coinitialize_hresult());
+
+ worker->JoinForTesting();
+}
+
+TEST(TaskSchedulerWorkerTest, BackwardCompatibilityDisabled) {
+ TaskTracker task_tracker;
+ auto delegate = MakeUnique<CoInitializeDelegate>();
+ CoInitializeDelegate* const delegate_raw = delegate.get();
+
+ // Create a worker with backward compatibility DISABLED. Wake it up and wait
+ // until GetWork() returns.
+ auto worker = SchedulerWorker::Create(
+ ThreadPriority::NORMAL, std::move(delegate), &task_tracker,
+ SchedulerWorker::InitialState::ALIVE,
+ SchedulerBackwardCompatibility::DISABLED);
+ worker->WakeUp();
+ delegate_raw->WaitUntilGetWorkReturned();
+
+ // The call to CoInitializeEx() should have returned S_OK to indicate that the
+ // COM library wasn't already initialized on the thread.
+ EXPECT_EQ(S_OK, delegate_raw->coinitialize_hresult());
+
+ worker->JoinForTesting();
+}
+
+#endif // defined(OS_WIN)
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
new file mode 100644
index 00000000000..a163863d0ff
--- /dev/null
+++ b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.cc
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/threading/thread_local.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+LazyInstance<ThreadLocalPointer<const TaskPriority>>::Leaky
+ tls_task_priority_for_current_thread = LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+ScopedSetTaskPriorityForCurrentThread::ScopedSetTaskPriorityForCurrentThread(
+ TaskPriority priority)
+ : priority_(priority) {
+ DCHECK(!tls_task_priority_for_current_thread.Get().Get());
+ tls_task_priority_for_current_thread.Get().Set(&priority_);
+}
+
+ScopedSetTaskPriorityForCurrentThread::
+ ~ScopedSetTaskPriorityForCurrentThread() {
+ DCHECK_EQ(&priority_, tls_task_priority_for_current_thread.Get().Get());
+ tls_task_priority_for_current_thread.Get().Set(nullptr);
+}
+
+TaskPriority GetTaskPriorityForCurrentThread() {
+ const TaskPriority* priority =
+ tls_task_priority_for_current_thread.Get().Get();
+ return priority ? *priority : TaskPriority::USER_VISIBLE;
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.h b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
new file mode 100644
index 00000000000..4508911d9ca
--- /dev/null
+++ b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+#define BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+class BASE_EXPORT ScopedSetTaskPriorityForCurrentThread {
+ public:
+ // Within the scope of this object, GetTaskPriorityForCurrentThread() will
+ // return |priority|.
+ ScopedSetTaskPriorityForCurrentThread(TaskPriority priority);
+ ~ScopedSetTaskPriorityForCurrentThread();
+
+ private:
+ const TaskPriority priority_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedSetTaskPriorityForCurrentThread);
+};
+
+// Returns the priority of the TaskScheduler task running on the current thread,
+// or TaskPriority::USER_VISIBLE if no TaskScheduler task is running on the
+// current thread.
+BASE_EXPORT TaskPriority GetTaskPriorityForCurrentThread();
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_TASK_SCHEDULER_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
diff --git a/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
new file mode 100644
index 00000000000..c497af67702
--- /dev/null
+++ b/chromium/base/task_scheduler/scoped_set_task_priority_for_current_thread_unittest.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerScopedSetTaskPriorityForCurrentThreadTest,
+ ScopedSetTaskPriorityForCurrentThread) {
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+ {
+ ScopedSetTaskPriorityForCurrentThread
+ scoped_set_task_priority_for_current_thread(
+ TaskPriority::USER_BLOCKING);
+ EXPECT_EQ(TaskPriority::USER_BLOCKING, GetTaskPriorityForCurrentThread());
+ }
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, GetTaskPriorityForCurrentThread());
+}
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/task_scheduler/sequence_unittest.cc b/chromium/base/task_scheduler/sequence_unittest.cc
index ba020cb31c0..c45d8a87d01 100644
--- a/chromium/base/task_scheduler/sequence_unittest.cc
+++ b/chromium/base/task_scheduler/sequence_unittest.cc
@@ -62,7 +62,7 @@ class TaskSchedulerSequenceTest : public testing::Test {
std::unique_ptr<Task> task_e_owned_;
// Raw pointers to those same tasks for verification. This is needed because
- // the scoped_ptrs above no longer point to the tasks once they have been
+ // the unique_ptrs above no longer point to the tasks once they have been
// moved into a Sequence.
const Task* task_a_;
const Task* task_b_;
diff --git a/chromium/base/task_scheduler/task.cc b/chromium/base/task_scheduler/task.cc
index 7314099c43a..3780c16dcb7 100644
--- a/chromium/base/task_scheduler/task.cc
+++ b/chromium/base/task_scheduler/task.cc
@@ -10,7 +10,7 @@ namespace internal {
Task::Task(const tracked_objects::Location& posted_from,
const Closure& task,
const TaskTraits& traits,
- const TimeDelta& delay)
+ TimeDelta delay)
: PendingTask(posted_from,
task,
delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
diff --git a/chromium/base/task_scheduler/task.h b/chromium/base/task_scheduler/task.h
index c014671ce07..c5b9bdb53bd 100644
--- a/chromium/base/task_scheduler/task.h
+++ b/chromium/base/task_scheduler/task.h
@@ -30,7 +30,7 @@ struct BASE_EXPORT Task : public PendingTask {
Task(const tracked_objects::Location& posted_from,
const Closure& task,
const TaskTraits& traits,
- const TimeDelta& delay);
+ TimeDelta delay);
~Task();
// The TaskTraits of this task.
diff --git a/chromium/base/task_scheduler/task_scheduler.cc b/chromium/base/task_scheduler/task_scheduler.cc
index 4b7d0b7e85a..00ca4f15ce9 100644
--- a/chromium/base/task_scheduler/task_scheduler.cc
+++ b/chromium/base/task_scheduler/task_scheduler.cc
@@ -4,8 +4,12 @@
#include "base/task_scheduler/task_scheduler.h"
+#include "base/bind.h"
#include "base/logging.h"
+#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_scheduler_impl.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
namespace base {
@@ -17,6 +21,18 @@ TaskScheduler* g_task_scheduler = nullptr;
} // namespace
// static
+void TaskScheduler::CreateAndSetSimpleTaskScheduler(int max_threads) {
+ std::vector<SchedulerWorkerPoolParams> worker_pool_params_vector;
+ worker_pool_params_vector.emplace_back(
+ "Simple", ThreadPriority::NORMAL,
+ SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY, max_threads,
+ TimeDelta::FromSeconds(30));
+ CreateAndSetDefaultTaskScheduler(
+ worker_pool_params_vector,
+ Bind([](const TaskTraits&) -> size_t { return 0; }));
+}
+
+// static
void TaskScheduler::CreateAndSetDefaultTaskScheduler(
const std::vector<SchedulerWorkerPoolParams>& worker_pool_params_vector,
const WorkerPoolIndexForTraitsCallback&
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index 385edd77a8b..5d9344bcbad 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -15,6 +15,7 @@
#include "base/single_thread_task_runner.h"
#include "base/task_runner.h"
#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
namespace tracked_objects {
class Location;
@@ -39,11 +40,13 @@ class BASE_EXPORT TaskScheduler {
virtual ~TaskScheduler() = default;
- // Posts |task| with specific |traits|.
+ // Posts |task| with a |delay| and specific |traits|. |delay| can be zero.
// For one off tasks that don't require a TaskRunner.
- virtual void PostTaskWithTraits(const tracked_objects::Location& from_here,
- const TaskTraits& traits,
- const Closure& task) = 0;
+ virtual void PostDelayedTaskWithTraits(
+ const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ const Closure& task,
+ TimeDelta delay) = 0;
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
@@ -82,16 +85,21 @@ class BASE_EXPORT TaskScheduler {
// other threads during the call. Returns immediately when shutdown completes.
virtual void FlushForTesting() = 0;
- // CreateAndSetDefaultTaskScheduler() and SetInstance() register a
- // TaskScheduler to handle tasks posted through the post_task.h API for this
- // process. The registered TaskScheduler will only be deleted when a new
- // TaskScheduler is registered and is leaked on shutdown. The methods must
- // not be called when TaskRunners created by the previous TaskScheduler are
- // still alive. The methods are not thread-safe; proper synchronization is
- // required to use the post_task.h API after registering a new TaskScheduler.
-
- // Creates and sets a default task scheduler. CHECKs on failure.
- // |worker_pool_params_vector| describes the worker pools to create.
+ // CreateAndSetSimpleTaskScheduler(), CreateAndSetDefaultTaskScheduler(), and
+ // SetInstance() register a TaskScheduler to handle tasks posted through the
+ // post_task.h API for this process. The registered TaskScheduler will only be
+ // deleted when a new TaskScheduler is registered and is leaked on shutdown.
+ // The methods must not be called when TaskRunners created by the previous
+ // TaskScheduler are still alive. The methods are not thread-safe; proper
+ // synchronization is required to use the post_task.h API after registering a
+ // new TaskScheduler.
+
+ // Creates and sets a task scheduler with one worker pool that can have up to
+ // |max_threads| threads. CHECKs on failure.
+ static void CreateAndSetSimpleTaskScheduler(int max_threads);
+
+ // Creates and sets a task scheduler with custom worker pools. CHECKs on
+ // failure. |worker_pool_params_vector| describes the worker pools to create.
// |worker_pool_index_for_traits_callback| returns the index in |worker_pools|
// of the worker pool in which a task with given traits should run.
static void CreateAndSetDefaultTaskScheduler(
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index 708685d68d6..827caae0f95 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -14,7 +14,6 @@
#include "base/task_scheduler/sequence_sort_key.h"
#include "base/task_scheduler/task.h"
#include "base/task_scheduler/task_tracker.h"
-#include "base/time/time.h"
#include "build/build_config.h"
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
@@ -41,13 +40,14 @@ TaskSchedulerImpl::~TaskSchedulerImpl() {
#endif
}
-void TaskSchedulerImpl::PostTaskWithTraits(
+void TaskSchedulerImpl::PostDelayedTaskWithTraits(
const tracked_objects::Location& from_here,
const TaskTraits& traits,
- const Closure& task) {
+ const Closure& task,
+ TimeDelta delay) {
// Post |task| as part of a one-off single-task Sequence.
GetWorkerPoolForTraits(traits)->PostTaskWithSequence(
- MakeUnique<Task>(from_here, task, traits, TimeDelta()),
+ MakeUnique<Task>(from_here, task, traits, delay),
make_scoped_refptr(new Sequence), nullptr);
}
@@ -116,15 +116,15 @@ void TaskSchedulerImpl::Initialize(
// Start the service thread. On platforms that support it (POSIX except NaCL
// SFI), the service thread runs a MessageLoopForIO which is used to support
// FileDescriptorWatcher in the scope in which tasks run.
- constexpr MessageLoop::Type kServiceThreadMessageLoopType =
+ Thread::Options service_thread_options;
+ service_thread_options.message_loop_type =
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
MessageLoop::TYPE_IO;
#else
MessageLoop::TYPE_DEFAULT;
#endif
- constexpr size_t kDefaultStackSize = 0;
- CHECK(service_thread_.StartWithOptions(
- Thread::Options(kServiceThreadMessageLoopType, kDefaultStackSize)));
+ service_thread_options.timer_slack = TIMER_SLACK_MAXIMUM;
+ CHECK(service_thread_.StartWithOptions(service_thread_options));
// Instantiate TaskTracker. Needs to happen after starting the service thread
// to get its message_loop().
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index 1483c5d646c..3e6cfdb12d4 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -50,9 +50,10 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
~TaskSchedulerImpl() override;
// TaskScheduler:
- void PostTaskWithTraits(const tracked_objects::Location& from_here,
- const TaskTraits& traits,
- const Closure& task) override;
+ void PostDelayedTaskWithTraits(const tracked_objects::Location& from_here,
+ const TaskTraits& traits,
+ const Closure& task,
+ TimeDelta delay) override;
scoped_refptr<TaskRunner> CreateTaskRunnerWithTraits(
const TaskTraits& traits) override;
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunnerWithTraits(
@@ -63,8 +64,8 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
void Shutdown() override;
void FlushForTesting() override;
- // Joins all threads of this scheduler. Tasks that are already running are
- // allowed to complete their execution. This can only be called once.
+ // Joins all threads. Tasks that are already running are allowed to complete
+ // their execution. This can only be called once.
void JoinForTesting();
private:
diff --git a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
index 43f8d745bd2..cda06063ed7 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl_unittest.cc
@@ -20,6 +20,7 @@
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_traits.h"
#include "base/task_scheduler/test_task_factory.h"
+#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
#include "base/threading/thread.h"
@@ -53,7 +54,7 @@ bool GetIOAllowed() {
// Verify that the current thread priority and I/O restrictions are appropriate
// to run a Task with |traits|.
// Note: ExecutionMode is verified inside TestTaskFactory.
-void VerifyTaskEnvironement(const TaskTraits& traits) {
+void VerifyTaskEnvironment(const TaskTraits& traits) {
const bool supports_background_priority =
Lock::HandlesMultipleThreadPriorities() &&
PlatformThread::CanIncreaseCurrentThreadPriority();
@@ -67,7 +68,7 @@ void VerifyTaskEnvironement(const TaskTraits& traits) {
#if DCHECK_IS_ON()
// The #if above is required because GetIOAllowed() always returns true when
// !DCHECK_IS_ON(), even when |traits| don't allow file I/O.
- EXPECT_EQ(traits.with_file_io(), GetIOAllowed());
+ EXPECT_EQ(traits.may_block(), GetIOAllowed());
#endif
// Verify that the thread the task is running on is named as expected.
@@ -77,14 +78,23 @@ void VerifyTaskEnvironement(const TaskTraits& traits) {
current_thread_name.find(
traits.priority() == TaskPriority::BACKGROUND ? "Background"
: "Foreground"));
- EXPECT_EQ(traits.with_file_io(),
- current_thread_name.find("FileIO") != std::string::npos);
+ EXPECT_EQ(traits.may_block(),
+ current_thread_name.find("Blocking") != std::string::npos);
}
-void VerifyTaskEnvironementAndSignalEvent(const TaskTraits& traits,
- WaitableEvent* event) {
+void VerifyTaskEnvironmentAndSignalEvent(const TaskTraits& traits,
+ WaitableEvent* event) {
DCHECK(event);
- VerifyTaskEnvironement(traits);
+ VerifyTaskEnvironment(traits);
+ event->Signal();
+}
+
+void VerifyTimeAndTaskEnvironmentAndSignalEvent(const TaskTraits& traits,
+ TimeTicks expected_time,
+ WaitableEvent* event) {
+ DCHECK(event);
+ EXPECT_LE(expected_time, TimeTicks::Now());
+ VerifyTaskEnvironment(traits);
event->Signal();
}
@@ -127,7 +137,7 @@ class ThreadPostingTasks : public SimpleThread {
const size_t kNumTasksPerThread = 150;
for (size_t i = 0; i < kNumTasksPerThread; ++i) {
factory_.PostTask(test::TestTaskFactory::PostNestedTask::NO,
- Bind(&VerifyTaskEnvironement, traits_));
+ Bind(&VerifyTaskEnvironment, traits_));
}
}
@@ -138,7 +148,7 @@ class ThreadPostingTasks : public SimpleThread {
};
// Returns a vector with a TraitsExecutionModePair for each valid
-// combination of {ExecutionMode, TaskPriority, WithFileIO()}.
+// combination of {ExecutionMode, TaskPriority, MayBlock()}.
std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
std::vector<TraitsExecutionModePair> params;
@@ -154,7 +164,7 @@ std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
params.push_back(TraitsExecutionModePair(
TaskTraits().WithPriority(priority), execution_mode));
params.push_back(TraitsExecutionModePair(
- TaskTraits().WithPriority(priority).WithFileIO(), execution_mode));
+ TaskTraits().WithPriority(priority).MayBlock(), execution_mode));
}
}
@@ -163,16 +173,16 @@ std::vector<TraitsExecutionModePair> GetTraitsExecutionModePairs() {
enum WorkerPoolType {
BACKGROUND_WORKER_POOL = 0,
- BACKGROUND_FILE_IO_WORKER_POOL,
+ BACKGROUND_BLOCKING_WORKER_POOL,
FOREGROUND_WORKER_POOL,
- FOREGROUND_FILE_IO_WORKER_POOL,
+ FOREGROUND_BLOCKING_WORKER_POOL,
};
size_t GetThreadPoolIndexForTraits(const TaskTraits& traits) {
- if (traits.with_file_io()) {
+ if (traits.may_block()) {
return traits.priority() == TaskPriority::BACKGROUND
- ? BACKGROUND_FILE_IO_WORKER_POOL
- : FOREGROUND_FILE_IO_WORKER_POOL;
+ ? BACKGROUND_BLOCKING_WORKER_POOL
+ : FOREGROUND_BLOCKING_WORKER_POOL;
}
return traits.priority() == TaskPriority::BACKGROUND ? BACKGROUND_WORKER_POOL
: FOREGROUND_WORKER_POOL;
@@ -184,30 +194,26 @@ class TaskSchedulerImplTest
TaskSchedulerImplTest() = default;
void SetUp() override {
- using IORestriction = SchedulerWorkerPoolParams::IORestriction;
using StandbyThreadPolicy = SchedulerWorkerPoolParams::StandbyThreadPolicy;
std::vector<SchedulerWorkerPoolParams> params_vector;
ASSERT_EQ(BACKGROUND_WORKER_POOL, params_vector.size());
params_vector.emplace_back("Background", ThreadPriority::BACKGROUND,
- IORestriction::DISALLOWED,
StandbyThreadPolicy::LAZY, 1U, TimeDelta::Max());
- ASSERT_EQ(BACKGROUND_FILE_IO_WORKER_POOL, params_vector.size());
- params_vector.emplace_back("BackgroundFileIO", ThreadPriority::BACKGROUND,
- IORestriction::ALLOWED,
+ ASSERT_EQ(BACKGROUND_BLOCKING_WORKER_POOL, params_vector.size());
+ params_vector.emplace_back("BackgroundBlocking", ThreadPriority::BACKGROUND,
StandbyThreadPolicy::LAZY, 3U, TimeDelta::Max());
ASSERT_EQ(FOREGROUND_WORKER_POOL, params_vector.size());
params_vector.emplace_back("Foreground", ThreadPriority::NORMAL,
- IORestriction::DISALLOWED,
StandbyThreadPolicy::LAZY, 4U, TimeDelta::Max());
- ASSERT_EQ(FOREGROUND_FILE_IO_WORKER_POOL, params_vector.size());
- params_vector.emplace_back(
- "ForegroundFileIO", ThreadPriority::NORMAL, IORestriction::ALLOWED,
- StandbyThreadPolicy::LAZY, 12U, TimeDelta::Max());
+ ASSERT_EQ(FOREGROUND_BLOCKING_WORKER_POOL, params_vector.size());
+ params_vector.emplace_back("ForegroundBlocking", ThreadPriority::NORMAL,
+ StandbyThreadPolicy::LAZY, 12U,
+ TimeDelta::Max());
scheduler_ = TaskSchedulerImpl::Create(params_vector,
Bind(&GetThreadPoolIndexForTraits));
@@ -224,16 +230,33 @@ class TaskSchedulerImplTest
} // namespace
-// Verifies that a Task posted via PostTaskWithTraits with parameterized
-// TaskTraits runs on a thread with the expected priority and I/O restrictions.
-// The ExecutionMode parameter is ignored by this test.
-TEST_P(TaskSchedulerImplTest, PostTaskWithTraits) {
+// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
+// TaskTraits and no delay runs on a thread with the expected priority and I/O
+// restrictions. The ExecutionMode parameter is ignored by this test.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsNoDelay) {
+ WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ scheduler_->PostDelayedTaskWithTraits(
+ FROM_HERE, GetParam().traits,
+ Bind(&VerifyTaskEnvironmentAndSignalEvent, GetParam().traits,
+ Unretained(&task_ran)),
+ TimeDelta());
+ task_ran.Wait();
+}
+
+// Verifies that a Task posted via PostDelayedTaskWithTraits with parameterized
+// TaskTraits and a non-zero delay runs on a thread with the expected priority
+// and I/O restrictions after the delay expires. The ExecutionMode parameter is
+// ignored by this test.
+TEST_P(TaskSchedulerImplTest, PostDelayedTaskWithTraitsWithDelay) {
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- scheduler_->PostTaskWithTraits(
+ scheduler_->PostDelayedTaskWithTraits(
FROM_HERE, GetParam().traits,
- Bind(&VerifyTaskEnvironementAndSignalEvent, GetParam().traits,
- Unretained(&task_ran)));
+ Bind(&VerifyTimeAndTaskEnvironmentAndSignalEvent, GetParam().traits,
+ TimeTicks::Now() + TestTimeouts::tiny_timeout(),
+ Unretained(&task_ran)),
+ TestTimeouts::tiny_timeout());
task_ran.Wait();
}
@@ -250,7 +273,7 @@ TEST_P(TaskSchedulerImplTest, PostTasksViaTaskRunner) {
const size_t kNumTasksPerTest = 150;
for (size_t i = 0; i < kNumTasksPerTest; ++i) {
factory.PostTask(test::TestTaskFactory::PostNestedTask::NO,
- Bind(&VerifyTaskEnvironement, GetParam().traits));
+ Bind(&VerifyTaskEnvironment, GetParam().traits));
}
factory.WaitForAllTasksToRun();
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index d06a84dc1f4..da21d5ea8ef 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -5,6 +5,7 @@
#include "base/task_scheduler/task_tracker.h"
#include <limits>
+#include <string>
#include "base/callback.h"
#include "base/debug/task_annotator.h"
@@ -14,9 +15,11 @@
#include "base/metrics/histogram_macros.h"
#include "base/sequence_token.h"
#include "base/synchronization/condition_variable.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread_restrictions.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
#include "base/values.h"
@@ -70,6 +73,14 @@ const char kQueueFunctionName[] = "base::PostTask";
// its implementation details.
const char kRunFunctionName[] = "TaskSchedulerRunTask";
+HistogramBase* GetTaskLatencyHistogram(const char* suffix) {
+ // Mimics the UMA_HISTOGRAM_TIMES macro.
+ return Histogram::FactoryTimeGet(
+ std::string("TaskScheduler.TaskLatency.") + suffix,
+ TimeDelta::FromMilliseconds(1), TimeDelta::FromSeconds(10), 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
+}
+
// Upper bound for the
// TaskScheduler.BlockShutdownTasksPostedDuringShutdown histogram.
const HistogramBase::Sample kMaxBlockShutdownTasksPostedDuringShutdown = 1000;
@@ -173,7 +184,20 @@ class TaskTracker::State {
TaskTracker::TaskTracker()
: state_(new State),
flush_cv_(flush_lock_.CreateConditionVariable()),
- shutdown_lock_(&flush_lock_) {}
+ shutdown_lock_(&flush_lock_),
+ task_latency_histograms_{
+ {GetTaskLatencyHistogram("BackgroundTaskPriority"),
+ GetTaskLatencyHistogram("BackgroundTaskPriority.MayBlock")},
+ {GetTaskLatencyHistogram("UserVisibleTaskPriority"),
+ GetTaskLatencyHistogram("UserVisibleTaskPriority.MayBlock")},
+ {GetTaskLatencyHistogram("UserBlockingTaskPriority"),
+ GetTaskLatencyHistogram("UserBlockingTaskPriority.MayBlock")}} {
+ // Confirm that all |task_latency_histograms_| have been initialized above.
+ DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
+ 1][0] -
+ 1));
+}
+
TaskTracker::~TaskTracker() = default;
void TaskTracker::Shutdown() {
@@ -219,17 +243,22 @@ bool TaskTracker::RunTask(std::unique_ptr<Task> task,
const bool is_delayed = !task->delayed_run_time.is_null();
if (can_run_task) {
- // All tasks run through here and the scheduler itself doesn't use
- // singletons. Therefore, it isn't necessary to reset the singleton allowed
- // bit after running the task.
- ThreadRestrictions::SetSingletonAllowed(
- task->traits.shutdown_behavior() !=
- TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+ RecordTaskLatencyHistogram(task.get());
+
+ const bool previous_singleton_allowed =
+ ThreadRestrictions::SetSingletonAllowed(
+ task->traits.shutdown_behavior() !=
+ TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
+ const bool previous_io_allowed =
+ ThreadRestrictions::SetIOAllowed(task->traits.may_block());
+ const bool previous_wait_allowed = ThreadRestrictions::SetWaitAllowed(
+ task->traits.with_base_sync_primitives());
{
- // Set up SequenceToken as expected for the scope of the task.
ScopedSetSequenceTokenForCurrentThread
scoped_set_sequence_token_for_current_thread(sequence_token);
+ ScopedSetTaskPriorityForCurrentThread
+ scoped_set_task_priority_for_current_thread(task->traits.priority());
// Set up TaskRunnerHandle as expected for the scope of the task.
std::unique_ptr<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
@@ -261,6 +290,10 @@ bool TaskTracker::RunTask(std::unique_ptr<Task> task,
PerformRunTask(std::move(task));
}
+ ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
+ ThreadRestrictions::SetIOAllowed(previous_io_allowed);
+ ThreadRestrictions::SetSingletonAllowed(previous_singleton_allowed);
+
AfterRunTask(shutdown_behavior);
}
@@ -280,6 +313,14 @@ bool TaskTracker::IsShutdownComplete() const {
}
void TaskTracker::SetHasShutdownStartedForTesting() {
+ AutoSchedulerLock auto_lock(shutdown_lock_);
+
+ // Create a dummy |shutdown_event_| to satisfy TaskTracker's expectation of
+ // its existence during shutdown (e.g. in OnBlockingShutdownTasksComplete()).
+ shutdown_event_.reset(
+ new WaitableEvent(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED));
+
state_->StartShutdown();
}
@@ -447,5 +488,15 @@ void TaskTracker::DecrementNumPendingUndelayedTasks() {
}
}
+void TaskTracker::RecordTaskLatencyHistogram(Task* task) {
+ const TimeDelta task_latency = TimeTicks::Now() - task->sequenced_time;
+ task_latency_histograms_[static_cast<int>(task->traits.priority())]
+ [task->traits.may_block() ||
+ task->traits.with_base_sync_primitives()
+ ? 1
+ : 0]
+ ->AddTime(task_latency);
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index a5caf213985..14ca1f41932 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -21,13 +21,15 @@
namespace base {
class ConditionVariable;
+class HistogramBase;
class SequenceToken;
namespace internal {
// All tasks go through the scheduler's TaskTracker when they are posted and
-// when they are executed. The TaskTracker enforces shutdown semantics and takes
-// care of tracing and profiling. This class is thread-safe.
+// when they are executed. The TaskTracker sets up the environment to run tasks,
+// enforces shutdown semantics, records metrics, and takes care of tracing and
+// profiling. This class is thread-safe.
class BASE_EXPORT TaskTracker {
public:
TaskTracker();
@@ -106,6 +108,10 @@ class BASE_EXPORT TaskTracker {
// it reaches zero.
void DecrementNumPendingUndelayedTasks();
+ // Records the TaskScheduler.TaskLatency.[task priority].[may block] histogram
+ // for |task|.
+ void RecordTaskLatencyHistogram(Task* task);
+
// Number of tasks blocking shutdown and boolean indicating whether shutdown
// has started.
const std::unique_ptr<State> state_;
@@ -133,6 +139,12 @@ class BASE_EXPORT TaskTracker {
// completes.
std::unique_ptr<WaitableEvent> shutdown_event_;
+ // TaskScheduler.TaskLatency.[task priority].[may block] histograms. The first
+ // index is a TaskPriority. The second index is 0 for non-blocking tasks, 1
+ // for blocking tasks. Intentionally leaked.
+ HistogramBase* const
+ task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) + 1][2];
+
// Number of BLOCK_SHUTDOWN tasks posted during shutdown.
HistogramBase::Sample num_block_shutdown_tasks_posted_during_shutdown_ = 0;
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 3a1ff789442..85378b6234a 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -16,6 +16,9 @@
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/statistics_recorder.h"
#include "base/sequence_token.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
@@ -25,6 +28,7 @@
#include "base/task_scheduler/task.h"
#include "base/task_scheduler/task_traits.h"
#include "base/test/gtest_util.h"
+#include "base/test/histogram_tester.h"
#include "base/test/test_simple_task_runner.h"
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
@@ -264,7 +268,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
WaitableEvent::InitialState::NOT_SIGNALED);
auto blocked_task = base::MakeUnique<Task>(
FROM_HERE, Bind(&WaitableEvent::Wait, Unretained(&event)),
- TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ TaskTraits().WithBaseSyncPrimitives().WithShutdownBehavior(GetParam()),
+ TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
EXPECT_TRUE(tracker_.WillPostTask(blocked_task.get()));
@@ -431,6 +436,34 @@ TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
}
}
+// Verify that AssertIOAllowed() succeeds only for a MayBlock() task.
+TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
+ TaskTracker tracker;
+
+ // Unset the IO allowed bit. Expect TaskTracker to set it before running a
+ // task with the MayBlock() trait.
+ ThreadRestrictions::SetIOAllowed(false);
+ auto task_with_may_block = MakeUnique<Task>(
+ FROM_HERE, Bind([]() {
+ // Shouldn't fail.
+ ThreadRestrictions::AssertIOAllowed();
+ }),
+ TaskTraits().MayBlock().WithShutdownBehavior(GetParam()), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_with_may_block.get()));
+ tracker.RunTask(std::move(task_with_may_block), SequenceToken::Create());
+
+ // Set the IO allowed bit. Expect TaskTracker to unset it before running a
+ // task without the MayBlock() trait.
+ ThreadRestrictions::SetIOAllowed(true);
+ auto task_without_may_block = MakeUnique<Task>(
+ FROM_HERE, Bind([]() {
+ EXPECT_DCHECK_DEATH({ ThreadRestrictions::AssertIOAllowed(); });
+ }),
+ TaskTraits().WithShutdownBehavior(GetParam()), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_without_may_block.get()));
+ tracker.RunTask(std::move(task_without_may_block), SequenceToken::Create());
+}
+
static void RunTaskRunnerHandleVerificationTask(
TaskTracker* tracker,
std::unique_ptr<Task> verify_task) {
@@ -804,5 +837,104 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
WAIT_FOR_ASYNC_SHUTDOWN_COMPLETED();
}
+namespace {
+
+class WaitAllowedTestThread : public SimpleThread {
+ public:
+ WaitAllowedTestThread() : SimpleThread("WaitAllowedTestThread") {}
+
+ private:
+ void Run() override {
+ TaskTracker tracker;
+
+ // Waiting is allowed by default. Expect TaskTracker to disallow it before
+ // running a task without the WithBaseSyncPrimitives() trait.
+ ThreadRestrictions::AssertWaitAllowed();
+ auto task_without_sync_primitives = MakeUnique<Task>(
+ FROM_HERE, Bind([]() {
+ EXPECT_DCHECK_DEATH({ ThreadRestrictions::AssertWaitAllowed(); });
+ }),
+ TaskTraits(), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_without_sync_primitives.get()));
+ tracker.RunTask(std::move(task_without_sync_primitives),
+ SequenceToken::Create());
+
+ // Disallow waiting. Expect TaskTracker to allow it before running a task
+ // with the WithBaseSyncPrimitives() trait.
+ ThreadRestrictions::DisallowWaiting();
+ auto task_with_sync_primitives =
+ MakeUnique<Task>(FROM_HERE, Bind([]() {
+ // Shouldn't fail.
+ ThreadRestrictions::AssertWaitAllowed();
+ }),
+ TaskTraits().WithBaseSyncPrimitives(), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_with_sync_primitives.get()));
+ tracker.RunTask(std::move(task_with_sync_primitives),
+ SequenceToken::Create());
+ }
+
+ DISALLOW_COPY_AND_ASSIGN(WaitAllowedTestThread);
+};
+
+} // namespace
+
+// Verify that AssertIOAllowed() succeeds only for a WithBaseSyncPrimitives()
+// task.
+TEST(TaskSchedulerTaskTrackerWaitAllowedTest, WaitAllowed) {
+ // Run the test on the separate thread since it is not possible to reset the
+ // "wait allowed" bit of a thread without being a friend of
+ // ThreadRestrictions.
+ WaitAllowedTestThread wait_allowed_test_thread;
+ wait_allowed_test_thread.Start();
+ wait_allowed_test_thread.Join();
+}
+
+// Verify that TaskScheduler.TaskLatency.* histograms are correctly recorded
+// when a task runs.
+TEST(TaskSchedulerTaskTrackerHistogramTest, TaskLatency) {
+ auto statistics_recorder = StatisticsRecorder::CreateTemporaryForTesting();
+
+ TaskTracker tracker;
+
+ struct {
+ const TaskTraits traits;
+ const char* const expected_histogram;
+ } tests[] = {
+ {TaskTraits().WithPriority(TaskPriority::BACKGROUND),
+ "TaskScheduler.TaskLatency.BackgroundTaskPriority"},
+ {TaskTraits().WithPriority(TaskPriority::BACKGROUND).MayBlock(),
+ "TaskScheduler.TaskLatency.BackgroundTaskPriority.MayBlock"},
+ {TaskTraits()
+ .WithPriority(TaskPriority::BACKGROUND)
+ .WithBaseSyncPrimitives(),
+ "TaskScheduler.TaskLatency.BackgroundTaskPriority.MayBlock"},
+ {TaskTraits().WithPriority(TaskPriority::USER_VISIBLE),
+ "TaskScheduler.TaskLatency.UserVisibleTaskPriority"},
+ {TaskTraits().WithPriority(TaskPriority::USER_VISIBLE).MayBlock(),
+ "TaskScheduler.TaskLatency.UserVisibleTaskPriority.MayBlock"},
+ {TaskTraits()
+ .WithPriority(TaskPriority::USER_VISIBLE)
+ .WithBaseSyncPrimitives(),
+ "TaskScheduler.TaskLatency.UserVisibleTaskPriority.MayBlock"},
+ {TaskTraits().WithPriority(TaskPriority::USER_BLOCKING),
+ "TaskScheduler.TaskLatency.UserBlockingTaskPriority"},
+ {TaskTraits().WithPriority(TaskPriority::USER_BLOCKING).MayBlock(),
+ "TaskScheduler.TaskLatency.UserBlockingTaskPriority.MayBlock"},
+ {TaskTraits()
+ .WithPriority(TaskPriority::USER_BLOCKING)
+ .WithBaseSyncPrimitives(),
+ "TaskScheduler.TaskLatency.UserBlockingTaskPriority.MayBlock"}};
+
+ for (const auto& test : tests) {
+ auto task =
+ MakeUnique<Task>(FROM_HERE, Bind(&DoNothing), test.traits, TimeDelta());
+ ASSERT_TRUE(tracker.WillPostTask(task.get()));
+
+ HistogramTester tester;
+ EXPECT_TRUE(tracker.RunTask(std::move(task), SequenceToken::Create()));
+ tester.ExpectTotalCount(test.expected_histogram, 1);
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/task_traits.cc b/chromium/base/task_scheduler/task_traits.cc
index 10cbe68efcf..6acf3244f59 100644
--- a/chromium/base/task_scheduler/task_traits.cc
+++ b/chromium/base/task_scheduler/task_traits.cc
@@ -9,6 +9,7 @@
#include <ostream>
#include "base/logging.h"
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
namespace base {
@@ -16,14 +17,20 @@ namespace base {
// the header; anything else is subject to change. Tasks should explicitly
// request defaults if the behavior is critical to the task.
TaskTraits::TaskTraits()
- : with_file_io_(false),
- priority_(TaskPriority::BACKGROUND),
+ : may_block_(false),
+ with_base_sync_primitives_(false),
+ priority_(internal::GetTaskPriorityForCurrentThread()),
shutdown_behavior_(TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {}
TaskTraits::~TaskTraits() = default;
-TaskTraits& TaskTraits::WithFileIO() {
- with_file_io_ = true;
+TaskTraits& TaskTraits::MayBlock() {
+ may_block_ = true;
+ return *this;
+}
+
+TaskTraits& TaskTraits::WithBaseSyncPrimitives() {
+ with_base_sync_primitives_ = true;
return *this;
}
diff --git a/chromium/base/task_scheduler/task_traits.h b/chromium/base/task_scheduler/task_traits.h
index 93b6d71e9c7..835465073e9 100644
--- a/chromium/base/task_scheduler/task_traits.h
+++ b/chromium/base/task_scheduler/task_traits.h
@@ -78,19 +78,56 @@ enum class TaskShutdownBehavior {
// Describes metadata for a single task or a group of tasks.
class BASE_EXPORT TaskTraits {
public:
- // Constructs a default TaskTraits for tasks with
- // (1) no I/O,
- // (2) low priority, and
+ // Constructs a default TaskTraits for tasks that
+ // (1) do not make blocking calls
+ // (2) can inherit their priority from the calling context, and
// (3) may block shutdown or be skipped on shutdown.
- // Tasks that require stricter guarantees should highlight those by requesting
+ // Tasks that require stricter guarantees and/or know the specific
+ // TaskPriority appropriate for them should highlight those by requesting
// explicit traits below.
TaskTraits();
TaskTraits(const TaskTraits& other) = default;
TaskTraits& operator=(const TaskTraits& other) = default;
~TaskTraits();
- // Allows tasks with these traits to do file I/O.
- TaskTraits& WithFileIO();
+ // Tasks with this trait may block. This includes but is not limited to tasks
+ // that wait on synchronous file I/O operations: read or write a file from
+ // disk, interact with a pipe or a socket, rename or delete a file, enumerate
+ // files in a directory, etc. This trait isn't required for the mere use of
+ // locks. For tasks that block on base/ synchronization primitives, see
+ // WithBaseSyncPrimitives().
+ TaskTraits& MayBlock();
+
+ // Tasks with this trait will pass base::AssertWaitAllowed(), i.e. will be
+ // allowed on the following methods :
+ // - base::WaitableEvent::Wait
+ // - base::ConditionVariable::Wait
+ // - base::PlatformThread::Join
+ // - base::PlatformThread::Sleep
+ // - base::Process::WaitForExit
+ // - base::Process::WaitForExitWithTimeout
+ //
+ // Tasks should generally not use these methods.
+ //
+ // Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
+ // that should happen after the wait in a callback and post that callback from
+ // where the WaitableEvent or ConditionVariable would have been signaled. If
+ // something needs to be scheduled after many tasks have executed, use
+ // base::BarrierClosure.
+ //
+ // Avoid creating threads. Instead, use
+ // base::Create(Sequenced|SingleTreaded)TaskRunnerWithTraits(). If a thread is
+ // really needed, make it non-joinable and add cleanup work at the end of the
+ // thread's main function (if using base::Thread, override Cleanup()).
+ //
+ // On Windows, join processes asynchronously using base::win::ObjectWatcher.
+ //
+ // MayBlock() must be specified in conjunction with this trait if and only if
+ // removing usage of methods listed above in the labeled tasks would still
+ // result in tasks that may block (per MayBlock()'s definition).
+ //
+ // In doubt, consult with base/task_scheduler/OWNERS.
+ TaskTraits& WithBaseSyncPrimitives();
// Applies |priority| to tasks with these traits.
TaskTraits& WithPriority(TaskPriority priority);
@@ -98,8 +135,11 @@ class BASE_EXPORT TaskTraits {
// Applies |shutdown_behavior| to tasks with these traits.
TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
- // Returns true if file I/O is allowed by these traits.
- bool with_file_io() const { return with_file_io_; }
+ // Returns true if tasks with these traits may block.
+ bool may_block() const { return may_block_; }
+
+ // Returns true if tasks with these traits may use base/ sync primitives.
+ bool with_base_sync_primitives() const { return with_base_sync_primitives_; }
// Returns the priority of tasks with these traits.
TaskPriority priority() const { return priority_; }
@@ -108,7 +148,8 @@ class BASE_EXPORT TaskTraits {
TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
private:
- bool with_file_io_;
+ bool may_block_;
+ bool with_base_sync_primitives_;
TaskPriority priority_;
TaskShutdownBehavior shutdown_behavior_;
};
diff --git a/chromium/base/task_scheduler/task_traits_unittest.cc b/chromium/base/task_scheduler/task_traits_unittest.cc
new file mode 100644
index 00000000000..fed3f9963e8
--- /dev/null
+++ b/chromium/base/task_scheduler/task_traits_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include "base/task_scheduler/scoped_set_task_priority_for_current_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+// Verify that TaskTraits is initialized with the priority of the task running
+// on the current thread.
+TEST(TaskSchedulerTaskTraitsTest, DefaultPriority) {
+ {
+ internal::ScopedSetTaskPriorityForCurrentThread scope(
+ TaskPriority::BACKGROUND);
+ EXPECT_EQ(TaskPriority::BACKGROUND, TaskTraits().priority());
+ }
+ {
+ internal::ScopedSetTaskPriorityForCurrentThread scope(
+ TaskPriority::USER_VISIBLE);
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, TaskTraits().priority());
+ }
+ {
+ internal::ScopedSetTaskPriorityForCurrentThread scope(
+ TaskPriority::USER_BLOCKING);
+ EXPECT_EQ(TaskPriority::USER_BLOCKING, TaskTraits().priority());
+ }
+}
+
+} // namespace base
diff --git a/chromium/base/template_util.h b/chromium/base/template_util.h
index 1bfc1ac814a..42552107cfb 100644
--- a/chromium/base/template_util.h
+++ b/chromium/base/template_util.h
@@ -23,6 +23,28 @@
#define CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
#endif
+// Some versions of libstdc++ have partial support for type_traits, but misses
+// a smaller subset while removing some of the older non-standard stuff. Assume
+// that all versions below 5.0 fall in this category, along with one 5.0
+// experimental release. Test for this by consulting compiler major version,
+// the only reliable option available, so theoretically this could fail should
+// you attempt to mix an earlier version of libstdc++ with >= GCC5. But
+// that's unlikely to work out, especially as GCC5 changed ABI.
+#define CR_GLIBCXX_5_0_0 20150123
+#if (defined(__GNUC__) && __GNUC__ < 5) || \
+ (defined(__GLIBCXX__) && __GLIBCXX__ == CR_GLIBCXX_5_0_0)
+#define CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
+#endif
+
+// This hacks around using gcc with libc++ which has some incompatibilies.
+// - is_trivially_* doesn't work: https://llvm.org/bugs/show_bug.cgi?id=27538
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that works with older
+// gcc versions.
+#if !defined(__clang__) && defined(_LIBCPP_VERSION)
+#define CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#endif
+
namespace base {
template <class T> struct is_non_const_reference : std::false_type {};
@@ -126,8 +148,53 @@ template <class T>
using is_trivially_destructible = std::is_trivially_destructible<T>;
#endif
+// is_trivially_copyable is especially hard to get right.
+// - Older versions of libstdc++ will fail to have it like they do for other
+// type traits. In this case we should provide it based on compiler
+// intrinsics. This is covered by the CR_USE_FALLBACKS_FOR_OLD_GLIBCXX define.
+// - An experimental release of gcc includes most of type_traits but misses
+// is_trivially_copyable, so we still have to avoid using libstdc++ in this
+// case, which is covered by CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX.
+// - When compiling libc++ from before r239653, with a gcc compiler, the
+// std::is_trivially_copyable can fail. So we need to work around that by not
+// using the one in libc++ in this case. This is covered by the
+// CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX define, and is discussed in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 where they point out that
+// in libc++'s commit r239653 this is fixed by libc++ checking for gcc 5.1.
+// - In both of the above cases we are using the gcc compiler. When defining
+// this ourselves on compiler intrinsics, the __is_trivially_copyable()
+// intrinsic is not available on gcc before version 5.1 (see the discussion in
+// https://llvm.org/bugs/show_bug.cgi?id=27538#c1 again), so we must check for
+// that version.
+// - When __is_trivially_copyable() is not available because we are on gcc older
+// than 5.1, we need to fall back to something, so we use __has_trivial_copy()
+// instead based on what was done one-off in bit_cast() previously.
+
+// TODO(crbug.com/554293): Remove this when all platforms have this in the std
+// namespace and it works with gcc as needed.
+#if defined(CR_USE_FALLBACKS_FOR_OLD_GLIBCXX) || \
+ defined(CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX) || \
+ defined(CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX)
+template <typename T>
+struct is_trivially_copyable {
+// TODO(danakj): Remove this when android builders are all using a newer version
+// of gcc, or the android ndk is updated to a newer libc++ that does this for
+// us.
+#if _GNUC_VER >= 501
+ static constexpr bool value = __is_trivially_copyable(T);
+#else
+ static constexpr bool value = __has_trivial_copy(T);
+#endif
+};
+#else
+template <class T>
+using is_trivially_copyable = std::is_trivially_copyable<T>;
+#endif
+
} // namespace base
#undef CR_USE_FALLBACKS_FOR_OLD_GLIBCXX
+#undef CR_USE_FALLBACKS_FOR_GCC_WITH_LIBCXX
+#undef CR_USE_FALLBACKS_FOR_OLD_EXPERIMENTAL_GLIBCXX
#endif // BASE_TEMPLATE_UTIL_H_
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 89b7028c117..256cae257f3 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -48,6 +48,7 @@ static_library("test_support") {
"launcher/test_result.h",
"launcher/test_results_tracker.h",
"launcher/unit_test_launcher.h",
+ "mock_callback.h",
"mock_chrome_application_mac.h",
"mock_chrome_application_mac.mm",
"mock_devices_changed_observer.cc",
@@ -56,7 +57,9 @@ static_library("test_support") {
"mock_entropy_provider.h",
"mock_log.cc",
"mock_log.h",
+ "multiprocess_test.cc",
"multiprocess_test.h",
+ "multiprocess_test_android.cc",
"null_task_runner.cc",
"null_task_runner.h",
"opaque_ref_counted.cc",
@@ -69,6 +72,8 @@ static_library("test_support") {
"perf_time_logger.h",
"power_monitor_test_base.cc",
"power_monitor_test_base.h",
+ "scoped_async_task_scheduler.cc",
+ "scoped_async_task_scheduler.h",
"scoped_command_line.cc",
"scoped_command_line.h",
"scoped_feature_list.cc",
@@ -79,6 +84,8 @@ static_library("test_support") {
"scoped_mock_time_message_loop_task_runner.h",
"scoped_path_override.cc",
"scoped_path_override.h",
+ "scoped_task_scheduler.cc",
+ "scoped_task_scheduler.h",
"sequenced_task_runner_test_template.cc",
"sequenced_task_runner_test_template.h",
"sequenced_worker_pool_owner.cc",
@@ -144,8 +151,6 @@ static_library("test_support") {
"launcher/test_launcher_tracer.h",
"launcher/test_results_tracker.cc",
"launcher/unit_test_launcher.cc",
- "multiprocess_test.cc",
- "multiprocess_test_android.cc",
]
}
@@ -189,7 +194,11 @@ static_library("test_support") {
}
if (is_android) {
- deps += [ ":base_unittests_jni_headers" ]
+ deps += [
+ ":base_unittests_jni_headers",
+ ":test_support_jni_headers",
+ ]
+ public_deps += [ ":test_support_java" ]
}
if (is_nacl_nonsfi) {
@@ -325,4 +334,42 @@ if (is_android) {
]
jni_package = "base"
}
+
+ generate_jni("test_support_jni_headers") {
+ sources = [
+ "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+ ]
+ jni_package = "base"
+ }
+
+ android_library("test_support_java") {
+ testonly = true
+ deps = [
+ "//base:base_java",
+ "//testing/android/native_test:native_main_runner_java",
+ "//third_party/android_tools:android_support_annotations_java",
+ "//third_party/jsr-305:jsr_305_javalib",
+ ]
+ srcjar_deps = [ ":test_support_java_aidl" ]
+ java_files = [
+ "android/java/src/org/chromium/base/FileDescriptorInfo.java",
+ "android/java/src/org/chromium/base/MainReturnCodeResult.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService0.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService1.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService2.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService3.java",
+ "android/java/src/org/chromium/base/MultiprocessTestClientService4.java",
+ ]
+ }
+
+ android_aidl("test_support_java_aidl") {
+ testonly = true
+ import_include = "android/java/src"
+ sources = [
+ "android/java/src/org/chromium/base/ITestClient.aidl",
+ ]
+ }
}
diff --git a/chromium/base/third_party/libevent/BUILD.gn b/chromium/base/third_party/libevent/BUILD.gn
index 5dd0f7070e8..e934454a10f 100644
--- a/chromium/base/third_party/libevent/BUILD.gn
+++ b/chromium/base/third_party/libevent/BUILD.gn
@@ -9,28 +9,53 @@ static_library("libevent") {
"buffer.c",
"evbuffer.c",
"evdns.c",
+ "evdns.h",
+ "event-config.h",
+ "event-internal.h",
"event.c",
+ "event.h",
"event_tagging.c",
+ "evhttp.h",
+ "evrpc-internal.h",
"evrpc.c",
+ "evrpc.h",
+ "evsignal.h",
"evutil.c",
+ "evutil.h",
+ "http-internal.h",
"http.c",
"log.c",
+ "log.h",
+ "min_heap.h",
"poll.c",
"select.c",
"signal.c",
+ "strlcpy-internal.h",
"strlcpy.c",
]
defines = [ "HAVE_CONFIG_H" ]
if (is_mac || is_ios) {
- sources += [ "kqueue.c" ]
+ sources += [
+ "kqueue.c",
+ "mac/config.h",
+ "mac/event-config.h",
+ ]
include_dirs = [ "mac" ]
} else if (is_linux) {
- sources += [ "epoll.c" ]
+ sources += [
+ "epoll.c",
+ "linux/config.h",
+ "linux/event-config.h",
+ ]
include_dirs = [ "linux" ]
} else if (is_android) {
- sources += [ "epoll.c" ]
+ sources += [
+ "android/config.h",
+ "android/event-config.h",
+ "epoll.c",
+ ]
include_dirs = [ "android" ]
} else if (is_nacl_nonsfi) {
sources -= [
diff --git a/chromium/base/third_party/symbolize/symbolize.cc b/chromium/base/third_party/symbolize/symbolize.cc
index d7678956c87..0932e64abdd 100644
--- a/chromium/base/third_party/symbolize/symbolize.cc
+++ b/chromium/base/third_party/symbolize/symbolize.cc
@@ -56,6 +56,7 @@
#if defined(HAVE_SYMBOLIZE)
+#include <algorithm>
#include <limits>
#include "symbolize.h"
@@ -296,10 +297,12 @@ FindSymbol(uint64_t pc, const int fd, char *out, int out_size,
// Read at most NUM_SYMBOLS symbols at once to save read() calls.
ElfW(Sym) buf[NUM_SYMBOLS];
- const ssize_t len = ReadFromOffset(fd, &buf, sizeof(buf), offset);
+ int num_symbols_to_read = std::min(NUM_SYMBOLS, num_symbols - i);
+ const ssize_t len =
+ ReadFromOffset(fd, &buf, sizeof(buf[0]) * num_symbols_to_read, offset);
SAFE_ASSERT(len % sizeof(buf[0]) == 0);
const ssize_t num_symbols_in_buf = len / sizeof(buf[0]);
- SAFE_ASSERT(num_symbols_in_buf <= sizeof(buf)/sizeof(buf[0]));
+ SAFE_ASSERT(num_symbols_in_buf <= num_symbols_to_read);
for (int j = 0; j < num_symbols_in_buf; ++j) {
const ElfW(Sym)& symbol = buf[j];
uint64_t start_address = symbol.st_value;
diff --git a/chromium/base/threading/sequenced_task_runner_handle.cc b/chromium/base/threading/sequenced_task_runner_handle.cc
index 53f3261d9f7..90f68b33ab1 100644
--- a/chromium/base/threading/sequenced_task_runner_handle.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle.cc
@@ -16,45 +16,56 @@ namespace base {
namespace {
-base::LazyInstance<base::ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
+LazyInstance<ThreadLocalPointer<SequencedTaskRunnerHandle>>::Leaky
lazy_tls_ptr = LAZY_INSTANCE_INITIALIZER;
} // namespace
// static
scoped_refptr<SequencedTaskRunner> SequencedTaskRunnerHandle::Get() {
+ // Return the registered SingleThreadTaskRunner, if any. This must be at the
+ // top so that a SingleThreadTaskRunner has priority over a
+ // SequencedTaskRunner (RLZ registers both on the same thread despite that
+ // being prevented by DCHECKs).
+ // TODO(fdoray): Move this to the bottom once RLZ stops registering a
+ // SingleThreadTaskRunner and a SequencedTaskRunner on the same thread.
+ // https://crbug.com/618530#c14
+ if (ThreadTaskRunnerHandle::IsSet()) {
+ // Various modes of setting SequencedTaskRunnerHandle don't combine.
+ DCHECK(!lazy_tls_ptr.Pointer()->Get());
+ DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
+
+ return ThreadTaskRunnerHandle::Get();
+ }
+
// Return the registered SequencedTaskRunner, if any.
const SequencedTaskRunnerHandle* handle = lazy_tls_ptr.Pointer()->Get();
if (handle) {
// Various modes of setting SequencedTaskRunnerHandle don't combine.
- DCHECK(!base::ThreadTaskRunnerHandle::IsSet());
DCHECK(!SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid());
+
return handle->task_runner_;
}
// If we are on a worker thread for a SequencedBlockingPool that is running a
// sequenced task, return a SequencedTaskRunner for it.
- scoped_refptr<base::SequencedWorkerPool> pool =
+ scoped_refptr<SequencedWorkerPool> pool =
SequencedWorkerPool::GetWorkerPoolForCurrentThread();
- if (pool) {
- SequencedWorkerPool::SequenceToken sequence_token =
- SequencedWorkerPool::GetSequenceTokenForCurrentThread();
- DCHECK(sequence_token.IsValid());
- scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
- pool->GetSequencedTaskRunner(sequence_token));
- DCHECK(sequenced_task_runner->RunsTasksOnCurrentThread());
- return sequenced_task_runner;
- }
-
- // Return the SingleThreadTaskRunner for the current thread otherwise.
- return base::ThreadTaskRunnerHandle::Get();
+ DCHECK(pool);
+ SequencedWorkerPool::SequenceToken sequence_token =
+ SequencedWorkerPool::GetSequenceTokenForCurrentThread();
+ DCHECK(sequence_token.IsValid());
+ scoped_refptr<SequencedTaskRunner> sequenced_task_runner(
+ pool->GetSequencedTaskRunner(sequence_token));
+ DCHECK(sequenced_task_runner->RunsTasksOnCurrentThread());
+ return sequenced_task_runner;
}
// static
bool SequencedTaskRunnerHandle::IsSet() {
return lazy_tls_ptr.Pointer()->Get() ||
SequencedWorkerPool::GetSequenceTokenForCurrentThread().IsValid() ||
- base::ThreadTaskRunnerHandle::IsSet();
+ ThreadTaskRunnerHandle::IsSet();
}
SequencedTaskRunnerHandle::SequencedTaskRunnerHandle(
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index 360fb4a537f..badd2936ee2 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -15,10 +15,10 @@
#include <vector>
#include "base/atomic_sequence_num.h"
-#include "base/atomicops.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/critical_closure.h"
+#include "base/debug/dump_without_crashing.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
@@ -54,32 +54,28 @@ namespace base {
namespace {
-// An enum representing the state of all pools. Any given non-test process
-// should only ever transition from NONE_ACTIVE to one of the active states.
-// Transitions between actives states are unexpected. The
-// REDIRECTED_TO_TASK_SCHEDULER transition occurs when
-// RedirectToTaskSchedulerForProcess() is called. The WORKER_CREATED transition
-// occurs when a Worker needs to be created because the first task was posted
-// and the state is still NONE_ACTIVE. In a test process, a transition to
-// NONE_ACTIVE occurs when ResetRedirectToTaskSchedulerForProcessForTesting() is
-// called.
+// An enum representing the state of all pools. A non-test process should only
+// ever transition from POST_TASK_DISABLED to one of the active states. A test
+// process may transition from one of the active states to POST_TASK_DISABLED
+// when DisableForProcessForTesting() is called.
//
-// |g_all_pools_state| uses relaxed atomic operations to ensure no data race
-// between reads/writes, strict memory ordering isn't required per no other
-// state being inferred from its value. Explicit synchronization (e.g. locks or
-// events) would be overkill (it's fine for other threads to still see
-// NONE_ACTIVE after the first Worker was created -- this is not possible for
-// REDIRECTED_TO_TASK_SCHEDULER per its API requesting to be invoked while no
-// other threads are active).
+// External memory synchronization is required to call a method that reads
+// |g_all_pools_state| after calling a method that modifies it.
//
// TODO(gab): Remove this if http://crbug.com/622400 fails (SequencedWorkerPool
// will be phased out completely otherwise).
-enum AllPoolsState : subtle::Atomic32 {
- NONE_ACTIVE,
- WORKER_CREATED,
+enum class AllPoolsState {
+ POST_TASK_DISABLED,
+ USE_WORKER_POOL,
REDIRECTED_TO_TASK_SCHEDULER,
};
-subtle::Atomic32 g_all_pools_state = AllPoolsState::NONE_ACTIVE;
+
+// TODO(fdoray): Change the initial state to POST_TASK_DISABLED. It is initially
+// USE_WORKER_POOL to avoid a revert of the CL that adds
+// debug::DumpWithoutCrashing() in case of waterfall failures.
+AllPoolsState g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
+
+TaskPriority g_max_task_priority = TaskPriority::HIGHEST;
struct SequencedTask : public TrackingInfo {
SequencedTask()
@@ -387,6 +383,12 @@ class SequencedWorkerPool::Inner {
CLEANUP_DONE,
};
+ // Clears ScheduledTasks in |tasks_to_delete| while ensuring that
+ // |this_worker| has the desired task info context during ~ScheduledTask() to
+ // allow sequence-checking.
+ void DeleteWithoutLock(std::vector<SequencedTask>* tasks_to_delete,
+ Worker* this_worker);
+
// Helper used by PostTask() to complete the work when redirection is on.
// Returns true if the task may run at some point in the future and false if
// it will definitely not run.
@@ -424,7 +426,7 @@ class SequencedWorkerPool::Inner {
// See the implementation for a more detailed description.
GetWorkStatus GetWork(SequencedTask* task,
TimeDelta* wait_time,
- std::vector<Closure>* delete_these_outside_lock);
+ std::vector<SequencedTask>* delete_these_outside_lock);
void HandleCleanup();
@@ -582,8 +584,7 @@ SequencedWorkerPool::Worker::Worker(
worker_pool_(std::move(worker_pool)),
task_shutdown_behavior_(BLOCK_SHUTDOWN),
is_processing_task_(false) {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
Start();
}
@@ -591,8 +592,7 @@ SequencedWorkerPool::Worker::~Worker() {
}
void SequencedWorkerPool::Worker::Run() {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
#if defined(OS_WIN)
win::ScopedCOMInitializer com_initializer;
@@ -652,7 +652,10 @@ SequencedWorkerPool::Inner::Inner(SequencedWorkerPool* worker_pool,
cleanup_idlers_(0),
cleanup_cv_(&lock_),
testing_observer_(observer),
- task_priority_(task_priority) {
+ task_priority_(static_cast<int>(task_priority) <=
+ static_cast<int>(g_max_task_priority)
+ ? task_priority
+ : g_max_task_priority) {
DCHECK_GT(max_threads_, 1U);
}
@@ -691,6 +694,13 @@ bool SequencedWorkerPool::Inner::PostTask(
const tracked_objects::Location& from_here,
const Closure& task,
TimeDelta delay) {
+ // TODO(fdoray): Uncomment this DCHECK. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() if it fails on the
+ // waterfall. https://crbug.com/622400
+ // DCHECK_NE(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
+ if (g_all_pools_state == AllPoolsState::POST_TASK_DISABLED)
+ debug::DumpWithoutCrashing();
+
DCHECK(delay.is_zero() || shutdown_behavior == SKIP_ON_SHUTDOWN);
SequencedTask sequenced(from_here);
sequenced.sequence_token_id = sequence_token.id_;
@@ -740,8 +750,7 @@ bool SequencedWorkerPool::Inner::PostTask(
if (optional_token_name)
sequenced.sequence_token_id = LockedGetNamedTokenID(*optional_token_name);
- if (subtle::NoBarrier_Load(&g_all_pools_state) ==
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
if (!PostTaskToTaskScheduler(sequenced, delay))
return false;
} else {
@@ -754,8 +763,10 @@ bool SequencedWorkerPool::Inner::PostTask(
}
}
- if (subtle::NoBarrier_Load(&g_all_pools_state) !=
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ // Use != REDIRECTED_TO_TASK_SCHEDULER instead of == USE_WORKER_POOL to ensure
+ // correct behavior if a task is posted to a SequencedWorkerPool before
+ // Enable(WithRedirectionToTaskScheduler)ForProcess() in a non-DCHECK build.
+ if (g_all_pools_state != AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
// Actually start the additional thread or signal an existing one outside
// the lock.
if (create_thread_id)
@@ -769,8 +780,7 @@ bool SequencedWorkerPool::Inner::PostTask(
AutoLock lock_for_dcheck(lock_);
// Some variables are exposed in both modes for convenience but only really
// intended for one of them at runtime, confirm exclusive usage here.
- if (subtle::NoBarrier_Load(&g_all_pools_state) ==
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
DCHECK(pending_tasks_.empty());
DCHECK_EQ(0, create_thread_id);
} else {
@@ -785,8 +795,7 @@ bool SequencedWorkerPool::Inner::PostTask(
bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
const SequencedTask& sequenced,
const TimeDelta& delay) {
- DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
lock_.AssertAcquired();
@@ -809,7 +818,8 @@ bool SequencedWorkerPool::Inner::PostTaskToTaskScheduler(
const TaskShutdownBehavior task_shutdown_behavior =
static_cast<TaskShutdownBehavior>(sequenced.shutdown_behavior);
const TaskTraits traits = TaskTraits()
- .WithFileIO()
+ .MayBlock()
+ .WithBaseSyncPrimitives()
.WithPriority(task_priority_)
.WithShutdownBehavior(task_shutdown_behavior);
return GetTaskSchedulerTaskRunner(sequenced.sequence_token_id, traits)
@@ -820,8 +830,7 @@ scoped_refptr<TaskRunner>
SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner(
int sequence_token_id,
const TaskTraits& traits) {
- DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
lock_.AssertAcquired();
@@ -858,11 +867,11 @@ SequencedWorkerPool::Inner::GetTaskSchedulerTaskRunner(
bool SequencedWorkerPool::Inner::RunsTasksOnCurrentThread() const {
AutoLock lock(lock_);
- if (subtle::NoBarrier_Load(&g_all_pools_state) ==
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
if (!runs_tasks_on_verifier_) {
runs_tasks_on_verifier_ = CreateTaskRunnerWithTraits(
- TaskTraits().WithFileIO().WithPriority(task_priority_));
+ TaskTraits().MayBlock().WithBaseSyncPrimitives().WithPriority(
+ task_priority_));
}
return runs_tasks_on_verifier_->RunsTasksOnCurrentThread();
} else {
@@ -876,8 +885,7 @@ bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
AutoLock lock(lock_);
- if (subtle::NoBarrier_Load(&g_all_pools_state) ==
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
const auto sequenced_task_runner_it =
sequenced_task_runner_map_.find(sequence_token.id_);
return sequenced_task_runner_it != sequenced_task_runner_map_.end() &&
@@ -892,8 +900,7 @@ bool SequencedWorkerPool::Inner::IsRunningSequenceOnCurrentThread(
// See https://code.google.com/p/chromium/issues/detail?id=168415
void SequencedWorkerPool::Inner::CleanupForTesting() {
- DCHECK_NE(subtle::NoBarrier_Load(&g_all_pools_state),
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER);
+ DCHECK_NE(g_all_pools_state, AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER);
AutoLock lock(lock_);
CHECK_EQ(CLEANUP_DONE, cleanup_state_);
if (shutdown_called_)
@@ -924,10 +931,8 @@ void SequencedWorkerPool::Inner::Shutdown(
max_blocking_tasks_after_shutdown_ = max_new_blocking_tasks_after_shutdown;
- if (subtle::NoBarrier_Load(&g_all_pools_state) !=
- AllPoolsState::WORKER_CREATED) {
+ if (g_all_pools_state != AllPoolsState::USE_WORKER_POOL)
return;
- }
// Tickle the threads. This will wake up a waiting one so it will know that
// it can exit, which in turn will wake up any other waiting ones.
@@ -966,8 +971,7 @@ bool SequencedWorkerPool::Inner::IsShutdownInProgress() {
}
void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
{
AutoLock lock(lock_);
DCHECK(thread_being_created_);
@@ -986,7 +990,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// See GetWork for what delete_these_outside_lock is doing.
SequencedTask task;
TimeDelta wait_time;
- std::vector<Closure> delete_these_outside_lock;
+ std::vector<SequencedTask> delete_these_outside_lock;
GetWorkStatus status =
GetWork(&task, &wait_time, &delete_these_outside_lock);
if (status == GET_WORK_FOUND) {
@@ -1003,7 +1007,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// already get a signal for each new task, but it doesn't
// hurt.)
SignalHasWork();
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
// Complete thread creation outside the lock if necessary.
if (new_thread_id)
@@ -1034,7 +1038,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
switch (status) {
case GET_WORK_WAIT: {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
}
break;
case GET_WORK_NOT_FOUND:
@@ -1056,7 +1060,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// help this case.
if (shutdown_called_ && blocking_shutdown_pending_task_count_ == 0) {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
break;
}
@@ -1064,7 +1068,7 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
// deletion must happen outside of the lock.
if (delete_these_outside_lock.size()) {
AutoUnlock unlock(lock_);
- delete_these_outside_lock.clear();
+ DeleteWithoutLock(&delete_these_outside_lock, this_worker);
// Since the lock has been released, |status| may no longer be
// accurate. It might read GET_WORK_WAIT even if there are tasks
@@ -1087,6 +1091,9 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
}
waiting_thread_count_--;
}
+ // |delete_these_outside_lock| should have been cleared via
+ // DeleteWithoutLock() above already.
+ DCHECK(delete_these_outside_lock.empty());
}
} // Release lock_.
@@ -1098,9 +1105,21 @@ void SequencedWorkerPool::Inner::ThreadLoop(Worker* this_worker) {
can_shutdown_cv_.Signal();
}
+void SequencedWorkerPool::Inner::DeleteWithoutLock(
+ std::vector<SequencedTask>* tasks_to_delete,
+ Worker* this_worker) {
+ while (!tasks_to_delete->empty()) {
+ const SequencedTask& deleted_task = tasks_to_delete->back();
+ this_worker->set_running_task_info(
+ SequenceToken(deleted_task.sequence_token_id),
+ deleted_task.shutdown_behavior);
+ tasks_to_delete->pop_back();
+ }
+ this_worker->reset_running_task_info();
+}
+
void SequencedWorkerPool::Inner::HandleCleanup() {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
if (cleanup_state_ == CLEANUP_DONE)
@@ -1166,9 +1185,8 @@ int64_t SequencedWorkerPool::Inner::LockedGetNextSequenceTaskNumber() {
SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
SequencedTask* task,
TimeDelta* wait_time,
- std::vector<Closure>* delete_these_outside_lock) {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ std::vector<SequencedTask>* delete_these_outside_lock) {
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
@@ -1213,18 +1231,17 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
// shutdown. Delete it and get more work.
//
// Note that we do not want to delete unrunnable tasks. Deleting a task
- // can have side effects (like freeing some objects) and deleting a
- // task that's supposed to run after one that's currently running could
- // cause an obscure crash.
+ // can have side effects (like freeing some objects) and deleting a task
+ // that's supposed to run after one that's currently running could cause
+ // an obscure crash.
//
// We really want to delete these tasks outside the lock in case the
- // closures are holding refs to objects that want to post work from
- // their destructorss (which would deadlock). The closures are
- // internally refcounted, so we just need to keep a copy of them alive
- // until the lock is exited. The calling code can just clear() the
- // vector they passed to us once the lock is exited to make this
- // happen.
- delete_these_outside_lock->push_back(i->task);
+ // closures are holding refs to objects that want to post work from their
+ // destructors (which would deadlock). The closures are internally
+ // refcounted, so we just need to keep a copy of them alive until the lock
+ // is exited. The calling code can just clear() the vector they passed to
+ // us once the lock is exited to make this happen.
+ delete_these_outside_lock->push_back(*i);
pending_tasks_.erase(i++);
continue;
}
@@ -1235,7 +1252,7 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
status = GET_WORK_WAIT;
if (cleanup_state_ == CLEANUP_RUNNING) {
// Deferred tasks are deleted when cleaning up, see Inner::ThreadLoop.
- delete_these_outside_lock->push_back(i->task);
+ delete_these_outside_lock->push_back(*i);
pending_tasks_.erase(i);
}
break;
@@ -1256,8 +1273,7 @@ SequencedWorkerPool::Inner::GetWorkStatus SequencedWorkerPool::Inner::GetWork(
}
int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
@@ -1290,8 +1306,7 @@ int SequencedWorkerPool::Inner::WillRunWorkerTask(const SequencedTask& task) {
}
void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
@@ -1306,8 +1321,7 @@ void SequencedWorkerPool::Inner::DidRunWorkerTask(const SequencedTask& task) {
bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
int sequence_token_id) const {
- DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
lock_.AssertAcquired();
return !sequence_token_id ||
@@ -1316,8 +1330,7 @@ bool SequencedWorkerPool::Inner::IsSequenceTokenRunnable(
}
int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
- DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
lock_.AssertAcquired();
// How thread creation works:
@@ -1369,27 +1382,18 @@ int SequencedWorkerPool::Inner::PrepareToStartAdditionalThreadIfHelpful() {
void SequencedWorkerPool::Inner::FinishStartingAdditionalThread(
int thread_number) {
- DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
// Called outside of the lock.
DCHECK_GT(thread_number, 0);
- if (subtle::NoBarrier_Load(&g_all_pools_state) !=
- AllPoolsState::WORKER_CREATED) {
- DCHECK_EQ(AllPoolsState::NONE_ACTIVE,
- subtle::NoBarrier_Load(&g_all_pools_state));
- subtle::NoBarrier_Store(&g_all_pools_state, AllPoolsState::WORKER_CREATED);
- }
-
// The worker is assigned to the list when the thread actually starts, which
// will manage the memory of the pointer.
new Worker(worker_pool_, thread_number, thread_name_prefix_);
}
void SequencedWorkerPool::Inner::SignalHasWork() {
- DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_NE(AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER, g_all_pools_state);
has_work_cv_.Signal();
if (testing_observer_) {
@@ -1398,8 +1402,7 @@ void SequencedWorkerPool::Inner::SignalHasWork() {
}
bool SequencedWorkerPool::Inner::CanShutdown() const {
- DCHECK_EQ(AllPoolsState::WORKER_CREATED,
- subtle::NoBarrier_Load(&g_all_pools_state));
+ DCHECK_EQ(AllPoolsState::USE_WORKER_POOL, g_all_pools_state);
lock_.AssertAcquired();
// See PrepareToStartAdditionalThreadIfHelpful for how thread creation works.
return !thread_being_created_ &&
@@ -1437,26 +1440,34 @@ SequencedWorkerPool::GetWorkerPoolForCurrentThread() {
}
// static
-void SequencedWorkerPool::RedirectToTaskSchedulerForProcess() {
+void SequencedWorkerPool::EnableForProcess() {
+ // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() in case of
+ // waterfall failures.
+ // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
+ g_all_pools_state = AllPoolsState::USE_WORKER_POOL;
+}
+
+// static
+void SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess(
+ TaskPriority max_task_priority) {
+ // TODO(fdoray): Uncomment this line. It is initially commented to avoid a
+ // revert of the CL that adds debug::DumpWithoutCrashing() in case of
+ // waterfall failures.
+ // DCHECK_EQ(AllPoolsState::POST_TASK_DISABLED, g_all_pools_state);
DCHECK(TaskScheduler::GetInstance());
- // Hitting this DCHECK indicates that a task was posted to a
- // SequencedWorkerPool before the TaskScheduler was initialized and
- // redirected, posting task to SequencedWorkerPools needs to at least be
- // delayed until after that point.
- DCHECK_EQ(AllPoolsState::NONE_ACTIVE,
- subtle::NoBarrier_Load(&g_all_pools_state));
- subtle::NoBarrier_Store(&g_all_pools_state,
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER);
+ g_all_pools_state = AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER;
+ g_max_task_priority = max_task_priority;
+}
+
+// static
+void SequencedWorkerPool::DisableForProcessForTesting() {
+ g_all_pools_state = AllPoolsState::POST_TASK_DISABLED;
}
// static
-void SequencedWorkerPool::ResetRedirectToTaskSchedulerForProcessForTesting() {
- // This can be called when the current state is REDIRECTED_TO_TASK_SCHEDULER
- // to stop redirecting tasks. It can also be called when the current state is
- // WORKER_CREATED to allow RedirectToTaskSchedulerForProcess() to be called
- // (RedirectToTaskSchedulerForProcess() cannot be called after a worker has
- // been created if this isn't called).
- subtle::NoBarrier_Store(&g_all_pools_state, AllPoolsState::NONE_ACTIVE);
+bool SequencedWorkerPool::IsEnabled() {
+ return g_all_pools_state != AllPoolsState::POST_TASK_DISABLED;
}
SequencedWorkerPool::SequencedWorkerPool(size_t max_threads,
@@ -1595,8 +1606,7 @@ bool SequencedWorkerPool::RunsTasksOnCurrentThread() const {
void SequencedWorkerPool::FlushForTesting() {
DCHECK(!RunsTasksOnCurrentThread());
base::ThreadRestrictions::ScopedAllowWait allow_wait;
- if (subtle::NoBarrier_Load(&g_all_pools_state) ==
- AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
+ if (g_all_pools_state == AllPoolsState::REDIRECTED_TO_TASK_SCHEDULER) {
// TODO(gab): Remove this if http://crbug.com/622400 fails.
TaskScheduler::GetInstance()->FlushForTesting();
} else {
diff --git a/chromium/base/threading/sequenced_worker_pool.h b/chromium/base/threading/sequenced_worker_pool.h
index 252d511d3f2..0d42de9138e 100644
--- a/chromium/base/threading/sequenced_worker_pool.h
+++ b/chromium/base/threading/sequenced_worker_pool.h
@@ -59,6 +59,10 @@ template <class T> class DeleteHelper;
// These will be executed in an unspecified order. The order of execution
// between tasks with different sequence tokens is also unspecified.
//
+// You must call EnableForProcess() or
+// EnableWithRedirectionToTaskSchedulerForProcess() before starting to post
+// tasks to a process' SequencedWorkerPools.
+//
// This class may be leaked on shutdown to facilitate fast shutdown. The
// expected usage, however, is to call Shutdown(), which correctly accounts
// for CONTINUE_ON_SHUTDOWN behavior and is required for BLOCK_SHUTDOWN
@@ -178,25 +182,29 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// PostSequencedWorkerTask(). Valid tokens are always nonzero.
static SequenceToken GetSequenceToken();
- // Starts redirecting tasks posted to this process' SequencedWorkerPools to
- // the registered TaskScheduler. This cannot be called after a task has been
- // posted to a SequencedWorkerPool. This is not thread-safe; proper
- // synchronization is required to use any SequencedWorkerPool method after
- // calling this. There must be a registered TaskScheduler when this is called.
- // Ideally, call this on the main thread of a process, before any other
- // threads are created and before any tasks are posted to that process'
- // SequencedWorkerPools.
+ // Enables posting tasks to this process' SequencedWorkerPools. Cannot be
+ // called if already enabled. This is not thread-safe; proper synchronization
+ // is required to use any SequencedWorkerPool method after calling this.
+ static void EnableForProcess();
+
+ // Same as EnableForProcess(), but tasks are redirected to the registered
+ // TaskScheduler. All redirections' TaskPriority will be capped to
+ // |max_task_priority|. There must be a registered TaskScheduler when this is
+ // called.
// TODO(gab): Remove this if http://crbug.com/622400 fails
// (SequencedWorkerPool will be phased out completely otherwise).
- static void RedirectToTaskSchedulerForProcess();
-
- // Stops redirecting tasks posted to this process' SequencedWorkerPools to the
- // registered TaskScheduler and allows RedirectToTaskSchedulerForProcess() to
- // be called even if tasks have already posted to a SequencedWorkerPool in
- // this process. Calling this while there are active SequencedWorkerPools is
- // not supported. This is not thread-safe; proper synchronization is required
- // to use any SequencedWorkerPool method after calling this.
- static void ResetRedirectToTaskSchedulerForProcessForTesting();
+ static void EnableWithRedirectionToTaskSchedulerForProcess(
+ TaskPriority max_task_priority = TaskPriority::HIGHEST);
+
+ // Disables posting tasks to this process' SequencedWorkerPools. Calling this
+ // while there are active SequencedWorkerPools is not supported. This is not
+ // thread-safe; proper synchronization is required to use any
+ // SequencedWorkerPool method after calling this.
+ static void DisableForProcessForTesting();
+
+ // Returns true if posting tasks to this process' SequencedWorkerPool is
+ // enabled (with or without redirection to TaskScheduler).
+ static bool IsEnabled();
// When constructing a SequencedWorkerPool, there must be a
// ThreadTaskRunnerHandle on the current thread unless you plan to
diff --git a/chromium/base/threading/sequenced_worker_pool_unittest.cc b/chromium/base/threading/sequenced_worker_pool_unittest.cc
index d2007f8be52..6e203390c07 100644
--- a/chromium/base/threading/sequenced_worker_pool_unittest.cc
+++ b/chromium/base/threading/sequenced_worker_pool_unittest.cc
@@ -248,14 +248,17 @@ class SequencedWorkerPoolTest
std::vector<SchedulerWorkerPoolParams> worker_pool_params;
worker_pool_params.emplace_back(
"SchedulerWorkerPoolName", ThreadPriority::NORMAL,
- SchedulerWorkerPoolParams::IORestriction::ALLOWED,
SchedulerWorkerPoolParams::StandbyThreadPolicy::LAZY,
kNumWorkerThreads, TimeDelta::Max());
TaskScheduler::CreateAndSetDefaultTaskScheduler(
std::move(worker_pool_params),
base::Bind([](const TaskTraits&) -> size_t { return 0U; }));
- SequencedWorkerPool::ResetRedirectToTaskSchedulerForProcessForTesting();
- SequencedWorkerPool::RedirectToTaskSchedulerForProcess();
+
+ // Unit tests run in an environment where SequencedWorkerPool is enabled
+ // without redirection to TaskScheduler. For the current unit test,
+ // disable it and re-enable it with redirection to TaskScheduler.
+ SequencedWorkerPool::DisableForProcessForTesting();
+ SequencedWorkerPool::EnableWithRedirectionToTaskSchedulerForProcess();
}
}
@@ -267,7 +270,12 @@ class SequencedWorkerPoolTest
DeletePool();
if (RedirectedToTaskScheduler()) {
- SequencedWorkerPool::ResetRedirectToTaskSchedulerForProcessForTesting();
+ // Reset SequencedWorkerPool to its original state (i.e. enabled without
+ // redirection to TaskScheduler).
+ SequencedWorkerPool::DisableForProcessForTesting();
+ SequencedWorkerPool::EnableForProcess();
+
+ // Delete the registered TaskScheduler.
DeleteTaskScheduler();
}
}
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index 90b44f8e19a..ec2f98dd4b3 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -261,12 +261,7 @@ bool Thread::GetThreadWasQuitProperly() {
void Thread::SetMessageLoop(MessageLoop* message_loop) {
DCHECK(owning_sequence_checker_.CalledOnValidSequence());
-
- // TODO(gab): Figure out why some callers pass in a null |message_loop|...
- // https://crbug.com/629139#c15
- // DCHECK(message_loop);
- if (!message_loop)
- return;
+ DCHECK(message_loop);
// Setting |message_loop_| should suffice for this thread to be considered
// as "running", until Stop() is invoked.
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 07b78aa9641..8f3beb1d1a2 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -14,9 +14,6 @@ class HistogramSynchronizer;
class NativeBackendKWallet;
class ScopedAllowWaitForLegacyWebViewApi;
-namespace blimp {
-class BlimpBrowserTest;
-}
namespace cc {
class CompletionEvent;
class SingleThreadTaskGraphRunner;
@@ -55,6 +52,9 @@ class GpuChannelHost;
}
namespace mojo {
class SyncCallRestrictions;
+namespace edk {
+class ScopedIPCSupport;
+}
}
namespace ui {
class CommandBufferClientImpl;
@@ -135,20 +135,6 @@ class BASE_EXPORT ThreadRestrictions {
DISALLOW_COPY_AND_ASSIGN(ScopedAllowIO);
};
- // Constructing a ScopedAllowSingleton temporarily allows accessing for the
- // current thread. Doing this is almost always incorrect.
- class BASE_EXPORT ScopedAllowSingleton {
- public:
- ScopedAllowSingleton() { previous_value_ = SetSingletonAllowed(true); }
- ~ScopedAllowSingleton() { SetSingletonAllowed(previous_value_); }
- private:
- // Whether singleton use is allowed when the ScopedAllowSingleton was
- // constructed.
- bool previous_value_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedAllowSingleton);
- };
-
#if DCHECK_IS_ON()
// Set whether the current thread to make IO calls.
// Threads start out in the *allowed* state.
@@ -188,7 +174,6 @@ class BASE_EXPORT ThreadRestrictions {
private:
// DO NOT ADD ANY OTHER FRIEND STATEMENTS, talk to jam or brettw first.
// BEGIN ALLOWED USAGE.
- friend class blimp::BlimpBrowserTest;
friend class content::BrowserShutdownProfileDumper;
friend class content::BrowserSurfaceViewManager;
friend class content::BrowserTestBase;
@@ -211,6 +196,7 @@ class BASE_EXPORT ThreadRestrictions {
friend class PlatformThread;
friend class android::JavaHandlerThread;
friend class mojo::SyncCallRestrictions;
+ friend class mojo::edk::ScopedIPCSupport;
friend class ui::CommandBufferClientImpl;
friend class ui::CommandBufferLocal;
friend class ui::GpuState;
diff --git a/chromium/base/threading/worker_pool_posix.cc b/chromium/base/threading/worker_pool_posix.cc
index 7dd452b854f..afa9a5044f5 100644
--- a/chromium/base/threading/worker_pool_posix.cc
+++ b/chromium/base/threading/worker_pool_posix.cc
@@ -33,7 +33,10 @@ const int kIdleSecondsBeforeExit = 10 * 60;
class WorkerPoolImpl {
public:
WorkerPoolImpl();
- ~WorkerPoolImpl();
+
+ // WorkerPoolImpl is only instantiated as a leaky LazyInstance, so the
+ // destructor is never called.
+ ~WorkerPoolImpl() = delete;
void PostTask(const tracked_objects::Location& from_here,
const base::Closure& task,
@@ -47,10 +50,6 @@ WorkerPoolImpl::WorkerPoolImpl()
: pool_(new base::PosixDynamicThreadPool("WorkerPool",
kIdleSecondsBeforeExit)) {}
-WorkerPoolImpl::~WorkerPoolImpl() {
- pool_->Terminate();
-}
-
void WorkerPoolImpl::PostTask(const tracked_objects::Location& from_here,
const base::Closure& task,
bool task_is_slow) {
@@ -121,23 +120,13 @@ PosixDynamicThreadPool::PosixDynamicThreadPool(const std::string& name_prefix,
: name_prefix_(name_prefix),
idle_seconds_before_exit_(idle_seconds_before_exit),
pending_tasks_available_cv_(&lock_),
- num_idle_threads_(0),
- terminated_(false) {}
+ num_idle_threads_(0) {}
PosixDynamicThreadPool::~PosixDynamicThreadPool() {
while (!pending_tasks_.empty())
pending_tasks_.pop();
}
-void PosixDynamicThreadPool::Terminate() {
- {
- AutoLock locked(lock_);
- DCHECK(!terminated_) << "Thread pool is already terminated.";
- terminated_ = true;
- }
- pending_tasks_available_cv_.Broadcast();
-}
-
void PosixDynamicThreadPool::PostTask(
const tracked_objects::Location& from_here,
const base::Closure& task) {
@@ -147,8 +136,6 @@ void PosixDynamicThreadPool::PostTask(
void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
AutoLock locked(lock_);
- DCHECK(!terminated_)
- << "This thread pool is already terminated. Do not post new tasks.";
pending_tasks_.push(std::move(*pending_task));
@@ -166,9 +153,6 @@ void PosixDynamicThreadPool::AddTask(PendingTask* pending_task) {
PendingTask PosixDynamicThreadPool::WaitForTask() {
AutoLock locked(lock_);
- if (terminated_)
- return PendingTask(FROM_HERE, base::Closure());
-
if (pending_tasks_.empty()) { // No work available, wait for work.
num_idle_threads_++;
if (num_idle_threads_cv_.get())
diff --git a/chromium/base/threading/worker_pool_posix.h b/chromium/base/threading/worker_pool_posix.h
index 0598d706a37..d65ae8f8cf6 100644
--- a/chromium/base/threading/worker_pool_posix.h
+++ b/chromium/base/threading/worker_pool_posix.h
@@ -50,10 +50,6 @@ class BASE_EXPORT PosixDynamicThreadPool
PosixDynamicThreadPool(const std::string& name_prefix,
int idle_seconds_before_exit);
- // Indicates that the thread pool is going away. Stops handing out tasks to
- // worker threads. Wakes up all the idle threads to let them exit.
- void Terminate();
-
// Adds |task| to the thread pool.
void PostTask(const tracked_objects::Location& from_here,
const Closure& task);
@@ -83,7 +79,6 @@ class BASE_EXPORT PosixDynamicThreadPool
ConditionVariable pending_tasks_available_cv_;
int num_idle_threads_;
TaskQueue pending_tasks_;
- bool terminated_;
// Only used for tests to ensure correct thread ordering. It will always be
// NULL in non-test code.
std::unique_ptr<ConditionVariable> num_idle_threads_cv_;
diff --git a/chromium/base/threading/worker_pool_posix_unittest.cc b/chromium/base/threading/worker_pool_posix_unittest.cc
index 6cefeed34e5..b4e8b58520c 100644
--- a/chromium/base/threading/worker_pool_posix_unittest.cc
+++ b/chromium/base/threading/worker_pool_posix_unittest.cc
@@ -103,12 +103,6 @@ class PosixDynamicThreadPoolTest : public testing::Test {
peer_.set_num_idle_threads_cv(new ConditionVariable(peer_.lock()));
}
- void TearDown() override {
- // Wake up the idle threads so they can terminate.
- if (pool_.get())
- pool_->Terminate();
- }
-
void WaitForTasksToStart(int num_tasks) {
base::AutoLock num_waiting_to_start_locked(num_waiting_to_start_lock_);
while (num_waiting_to_start_ < num_tasks) {
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index 4e942015fcf..f5cefd49e0a 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -126,7 +126,7 @@ int64_t SaturatedSub(TimeDelta delta, int64_t value) {
} // namespace time_internal
std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
- return os << time_delta.InSecondsF() << "s";
+ return os << time_delta.InSecondsF() << " s";
}
// Time -----------------------------------------------------------------------
@@ -203,6 +203,11 @@ double Time::ToJsTime() const {
kMicrosecondsPerMillisecond);
}
+Time Time::FromJavaTime(int64_t ms_since_epoch) {
+ return base::Time::UnixEpoch() +
+ base::TimeDelta::FromMilliseconds(ms_since_epoch);
+}
+
int64_t Time::ToJavaTime() const {
if (is_null()) {
// Preserve 0 so the invalid result doesn't depend on the platform.
@@ -230,7 +235,12 @@ Time Time::LocalMidnight() const {
exploded.minute = 0;
exploded.second = 0;
exploded.millisecond = 0;
- return FromLocalExploded(exploded);
+ Time out_time;
+ if (FromLocalExploded(exploded, &out_time))
+ return out_time;
+ // This function must not fail.
+ NOTREACHED();
+ return Time();
}
// static
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index 1084e951794..75b690eef22 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -59,6 +59,7 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
@@ -494,8 +495,9 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
static Time FromJsTime(double ms_since_epoch);
double ToJsTime() const;
- // Converts to Java convention for times, a number of
+ // Converts to/from Java convention for times, a number of
// milliseconds since the epoch.
+ static Time FromJavaTime(int64_t ms_since_epoch);
int64_t ToJavaTime() const;
#if defined(OS_POSIX)
@@ -536,21 +538,6 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
#endif
// Converts an exploded structure representing either the local time or UTC
- // into a Time class.
- // TODO(maksims): Get rid of these in favor of the methods below when
- // all the callers stop using these ones.
- static Time FromUTCExploded(const Exploded& exploded) {
- base::Time time;
- ignore_result(FromUTCExploded(exploded, &time));
- return time;
- }
- static Time FromLocalExploded(const Exploded& exploded) {
- base::Time time;
- ignore_result(FromLocalExploded(exploded, &time));
- return time;
- }
-
- // Converts an exploded structure representing either the local time or UTC
// into a Time class. Returns false on a failure when, for example, a day of
// month is set to 31 on a 28-30 day month. Returns Time(0) on overflow.
static bool FromUTCExploded(const Exploded& exploded,
@@ -570,10 +557,12 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// specified in RFC822) is treated as if the timezone is not specified.
// TODO(iyengar) Move the FromString/FromTimeT/ToTimeT/FromFileTime to
// a new time converter class.
- static bool FromString(const char* time_string, Time* parsed_time) {
+ static bool FromString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
return FromStringInternal(time_string, true, parsed_time);
}
- static bool FromUTCString(const char* time_string, Time* parsed_time) {
+ static bool FromUTCString(const char* time_string,
+ Time* parsed_time) WARN_UNUSED_RESULT {
return FromStringInternal(time_string, false, parsed_time);
}
@@ -616,10 +605,11 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// timezone is not specified.
static bool FromStringInternal(const char* time_string,
bool is_local,
- Time* parsed_time);
+ Time* parsed_time) WARN_UNUSED_RESULT;
// Comparison does not consider |day_of_week| when doing the operation.
- static bool ExplodedMostlyEquals(const Exploded& lhs, const Exploded& rhs);
+ static bool ExplodedMostlyEquals(const Exploded& lhs,
+ const Exploded& rhs) WARN_UNUSED_RESULT;
};
// static
@@ -726,14 +716,14 @@ class BASE_EXPORT TimeTicks : public time_internal::TimeBase<TimeTicks> {
// Now() will return high resolution values. Note that, on systems where the
// high resolution clock works but is deemed inefficient, the low resolution
// clock will be used instead.
- static bool IsHighResolution();
+ static bool IsHighResolution() WARN_UNUSED_RESULT;
// Returns true if TimeTicks is consistent across processes, meaning that
// timestamps taken on different processes can be safely compared with one
// another. (Note that, even on platforms where this returns true, time values
// from different threads that are within one tick of each other must be
// considered to have an ambiguous ordering.)
- static bool IsConsistentAcrossProcesses();
+ static bool IsConsistentAcrossProcesses() WARN_UNUSED_RESULT;
#if defined(OS_WIN)
// Translates an absolute QPC timestamp into a TimeTicks value. The returned
@@ -794,7 +784,7 @@ class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
}
// Returns true if ThreadTicks::Now() is supported on this system.
- static bool IsSupported() {
+ static bool IsSupported() WARN_UNUSED_RESULT {
#if (defined(_POSIX_THREAD_CPUTIME) && (_POSIX_THREAD_CPUTIME >= 0)) || \
(defined(OS_MACOSX) && !defined(OS_IOS)) || defined(OS_ANDROID)
return true;
@@ -845,7 +835,7 @@ class BASE_EXPORT ThreadTicks : public time_internal::TimeBase<ThreadTicks> {
// allow testing.
static double TSCTicksPerSecond();
- static bool IsSupportedWin();
+ static bool IsSupportedWin() WARN_UNUSED_RESULT;
static void WaitUntilInitializedWin();
#endif
};
diff --git a/chromium/base/time/time_posix.cc b/chromium/base/time/time_posix.cc
index 4caf3866c52..963c15b6536 100644
--- a/chromium/base/time/time_posix.cc
+++ b/chromium/base/time/time_posix.cc
@@ -16,6 +16,7 @@
#include <ostream>
#include "base/logging.h"
+#include "base/numerics/safe_math.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
@@ -227,19 +228,28 @@ void Time::Explode(bool is_local, Exploded* exploded) const {
// static
bool Time::FromExploded(bool is_local, const Exploded& exploded, Time* time) {
+ CheckedNumeric<int> month = exploded.month;
+ month--;
+ CheckedNumeric<int> year = exploded.year;
+ year -= 1900;
+ if (!month.IsValid() || !year.IsValid()) {
+ *time = Time(0);
+ return false;
+ }
+
struct tm timestruct;
- timestruct.tm_sec = exploded.second;
- timestruct.tm_min = exploded.minute;
- timestruct.tm_hour = exploded.hour;
- timestruct.tm_mday = exploded.day_of_month;
- timestruct.tm_mon = exploded.month - 1;
- timestruct.tm_year = exploded.year - 1900;
- timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
- timestruct.tm_yday = 0; // mktime/timegm ignore this
- timestruct.tm_isdst = -1; // attempt to figure it out
+ timestruct.tm_sec = exploded.second;
+ timestruct.tm_min = exploded.minute;
+ timestruct.tm_hour = exploded.hour;
+ timestruct.tm_mday = exploded.day_of_month;
+ timestruct.tm_mon = month.ValueOrDie();
+ timestruct.tm_year = year.ValueOrDie();
+ timestruct.tm_wday = exploded.day_of_week; // mktime/timegm ignore this
+ timestruct.tm_yday = 0; // mktime/timegm ignore this
+ timestruct.tm_isdst = -1; // attempt to figure it out
#if !defined(OS_NACL) && !defined(OS_SOLARIS)
- timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
- timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_gmtoff = 0; // not a POSIX field, so mktime/timegm ignore
+ timestruct.tm_zone = NULL; // not a POSIX field, so mktime/timegm ignore
#endif
SysTime seconds;
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index a42d701f25a..8906c3bee19 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -59,6 +59,11 @@ TEST(TimeTestOutOfBounds, FromExplodedOutOfBoundsTime) {
{{9840633, 1, 0, 1, 1, 1, 0, 0}, true},
// Underflow will fail as well.
{{-9840633, 1, 0, 1, 1, 1, 0, 0}, true},
+ // Test integer overflow and underflow cases for the values themselves.
+ {{std::numeric_limits<int>::min(), 1, 0, 1, 1, 1, 0, 0}, true},
+ {{std::numeric_limits<int>::max(), 1, 0, 1, 1, 1, 0, 0}, true},
+ {{2016, std::numeric_limits<int>::min(), 0, 1, 1, 1, 0, 0}, false},
+ {{2016, std::numeric_limits<int>::max(), 0, 1, 1, 1, 0, 0}, false},
};
for (const auto& test : kDateTestData) {
@@ -1111,17 +1116,17 @@ TEST(TimeDeltaLogging, DCheckEqCompiles) {
TEST(TimeDeltaLogging, EmptyIsZero) {
TimeDelta zero;
- EXPECT_EQ("0s", AnyToString(zero));
+ EXPECT_EQ("0 s", AnyToString(zero));
}
TEST(TimeDeltaLogging, FiveHundredMs) {
TimeDelta five_hundred_ms = TimeDelta::FromMilliseconds(500);
- EXPECT_EQ("0.5s", AnyToString(five_hundred_ms));
+ EXPECT_EQ("0.5 s", AnyToString(five_hundred_ms));
}
TEST(TimeDeltaLogging, MinusTenSeconds) {
TimeDelta minus_ten_seconds = TimeDelta::FromSeconds(-10);
- EXPECT_EQ("-10s", AnyToString(minus_ten_seconds));
+ EXPECT_EQ("-10 s", AnyToString(minus_ten_seconds));
}
TEST(TimeDeltaLogging, DoesNotMessUpFormattingFlags) {
diff --git a/chromium/base/timer/timer.cc b/chromium/base/timer/timer.cc
index aba8fc8d9fb..6ec18f18148 100644
--- a/chromium/base/timer/timer.cc
+++ b/chromium/base/timer/timer.cc
@@ -181,10 +181,8 @@ void Timer::PostNewScheduledTask(TimeDelta delay) {
}
// Remember the thread ID that posts the first task -- this will be verified
// later when the task is abandoned to detect misuse from multiple threads.
- if (!thread_id_) {
- DCHECK(GetTaskRunner()->BelongsToCurrentThread());
+ if (!thread_id_)
thread_id_ = static_cast<int>(PlatformThread::CurrentId());
- }
}
scoped_refptr<SingleThreadTaskRunner> Timer::GetTaskRunner() {
diff --git a/chromium/base/timer/timer.h b/chromium/base/timer/timer.h
index 50aedbd4cec..8aac279def6 100644
--- a/chromium/base/timer/timer.h
+++ b/chromium/base/timer/timer.h
@@ -163,8 +163,10 @@ class BASE_EXPORT Timer {
// Stop running task (if any) and abandon scheduled task (if any).
void StopAndAbandon() {
- Stop();
AbandonScheduledTask();
+
+ Stop();
+ // No more member accesses here: |this| could be deleted at this point.
}
// When non-NULL, the scheduled_task_ is waiting in the MessageLoop to call
diff --git a/chromium/base/timer/timer_unittest.cc b/chromium/base/timer/timer_unittest.cc
index b34da20ba3f..69338eb211b 100644
--- a/chromium/base/timer/timer_unittest.cc
+++ b/chromium/base/timer/timer_unittest.cc
@@ -10,27 +10,35 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
+#include "base/callback.h"
#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
#include "base/message_loop/message_loop.h"
#include "base/run_loop.h"
+#include "base/sequenced_task_runner.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/sequenced_worker_pool_owner.h"
#include "base/test/test_mock_time_task_runner.h"
-#include "base/test/test_simple_task_runner.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_task_runner_handle.h"
+#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/tick_clock.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
-using base::TimeDelta;
-using base::SingleThreadTaskRunner;
+namespace base {
namespace {
// The message loops on which each timer should be tested.
-const base::MessageLoop::Type testing_message_loops[] = {
- base::MessageLoop::TYPE_DEFAULT,
- base::MessageLoop::TYPE_IO,
+const MessageLoop::Type testing_message_loops[] = {
+ MessageLoop::TYPE_DEFAULT, MessageLoop::TYPE_IO,
#if !defined(OS_IOS) // iOS does not allow direct running of the UI loop.
- base::MessageLoop::TYPE_UI,
+ MessageLoop::TYPE_UI,
#endif
};
@@ -47,165 +55,227 @@ class Receiver {
int count_;
};
-class OneShotTimerTester {
+// A basic helper class that can start a one-shot timer and signal a
+// WaitableEvent when this timer fires.
+class OneShotTimerTesterBase {
public:
- explicit OneShotTimerTester(bool* did_run, unsigned milliseconds = 10)
- : did_run_(did_run),
- delay_ms_(milliseconds),
- quit_message_loop_(true) {
- }
+ // |did_run|, if provided, will be signaled when Run() fires.
+ explicit OneShotTimerTesterBase(
+ WaitableEvent* did_run = nullptr,
+ const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+ : did_run_(did_run), delay_(delay) {}
+
+ virtual ~OneShotTimerTesterBase() = default;
void Start() {
- timer_.Start(FROM_HERE, TimeDelta::FromMilliseconds(delay_ms_), this,
- &OneShotTimerTester::Run);
+ started_time_ = TimeTicks::Now();
+ timer_->Start(FROM_HERE, delay_, this, &OneShotTimerTesterBase::Run);
}
- void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
- quit_message_loop_ = false;
- timer_.SetTaskRunner(task_runner);
- }
+ bool IsRunning() { return timer_->IsRunning(); }
- private:
- void Run() {
- *did_run_ = true;
- if (quit_message_loop_) {
- base::MessageLoop::current()->QuitWhenIdle();
+ TimeTicks started_time() const { return started_time_; }
+ TimeDelta delay() const { return delay_; }
+
+ protected:
+ virtual void Run() {
+ if (did_run_) {
+ EXPECT_FALSE(did_run_->IsSignaled());
+ did_run_->Signal();
}
}
- bool* did_run_;
- base::OneShotTimer timer_;
- const unsigned delay_ms_;
- bool quit_message_loop_;
+ std::unique_ptr<OneShotTimer> timer_ = MakeUnique<OneShotTimer>();
+
+ private:
+ WaitableEvent* const did_run_;
+ const TimeDelta delay_;
+ TimeTicks started_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotTimerTesterBase);
};
-class OneShotSelfDeletingTimerTester {
+// Extends functionality of OneShotTimerTesterBase with the abilities to wait
+// until the timer fires and to change task runner for the timer.
+class OneShotTimerTester : public OneShotTimerTesterBase {
public:
- explicit OneShotSelfDeletingTimerTester(bool* did_run)
- : did_run_(did_run), timer_(new base::OneShotTimer()) {}
+ // |did_run|, if provided, will be signaled when Run() fires.
+ explicit OneShotTimerTester(
+ WaitableEvent* did_run = nullptr,
+ const TimeDelta& delay = TimeDelta::FromMilliseconds(10))
+ : OneShotTimerTesterBase(did_run, delay),
+ quit_closure_(run_loop_.QuitClosure()) {}
- void Start() {
- timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(10), this,
- &OneShotSelfDeletingTimerTester::Run);
+ ~OneShotTimerTester() override = default;
+
+ void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner) {
+ timer_->SetTaskRunner(std::move(task_runner));
+
+ // Run() will be invoked on |task_runner| but |run_loop_|'s QuitClosure
+ // needs to run on this thread (where the MessageLoop lives).
+ quit_closure_ =
+ Bind(IgnoreResult(&SingleThreadTaskRunner::PostTask),
+ ThreadTaskRunnerHandle::Get(), FROM_HERE, run_loop_.QuitClosure());
}
+ // Blocks until Run() executes and confirms that Run() didn't fire before
+ // |delay_| expired.
+ void WaitAndConfirmTimerFiredAfterDelay() {
+ run_loop_.Run();
+
+ EXPECT_NE(TimeTicks(), started_time());
+ EXPECT_GE(TimeTicks::Now() - started_time(), delay());
+ }
+
+ protected:
+ // Overridable method to do things on Run() before signaling events/closures
+ // managed by this helper.
+ virtual void OnRun() {}
+
private:
- void Run() {
- *did_run_ = true;
- timer_.reset();
- base::MessageLoop::current()->QuitWhenIdle();
+ void Run() override {
+ OnRun();
+ OneShotTimerTesterBase::Run();
+ quit_closure_.Run();
}
- bool* did_run_;
- std::unique_ptr<base::OneShotTimer> timer_;
+ RunLoop run_loop_;
+ Closure quit_closure_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotTimerTester);
};
+class OneShotSelfDeletingTimerTester : public OneShotTimerTester {
+ protected:
+ void OnRun() override { timer_.reset(); }
+};
+
+constexpr int kNumRepeats = 10;
+
class RepeatingTimerTester {
public:
- explicit RepeatingTimerTester(bool* did_run, const TimeDelta& delay)
- : did_run_(did_run), counter_(10), delay_(delay) {
- }
+ explicit RepeatingTimerTester(WaitableEvent* did_run, const TimeDelta& delay)
+ : counter_(kNumRepeats),
+ quit_closure_(run_loop_.QuitClosure()),
+ did_run_(did_run),
+ delay_(delay) {}
void Start() {
+ started_time_ = TimeTicks::Now();
timer_.Start(FROM_HERE, delay_, this, &RepeatingTimerTester::Run);
}
+ void WaitAndConfirmTimerFiredRepeatedlyAfterDelay() {
+ run_loop_.Run();
+
+ EXPECT_NE(TimeTicks(), started_time_);
+ EXPECT_GE(TimeTicks::Now() - started_time_, kNumRepeats * delay_);
+ }
+
private:
void Run() {
if (--counter_ == 0) {
- *did_run_ = true;
+ if (did_run_) {
+ EXPECT_FALSE(did_run_->IsSignaled());
+ did_run_->Signal();
+ }
timer_.Stop();
- base::MessageLoop::current()->QuitWhenIdle();
+ quit_closure_.Run();
}
}
- bool* did_run_;
+ RepeatingTimer timer_;
int counter_;
- TimeDelta delay_;
- base::RepeatingTimer timer_;
+
+ RunLoop run_loop_;
+ Closure quit_closure_;
+ WaitableEvent* const did_run_;
+
+ const TimeDelta delay_;
+ TimeTicks started_time_;
+
+ DISALLOW_COPY_AND_ASSIGN(RepeatingTimerTester);
};
-void RunTest_OneShotTimer(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+// Basic test with same setup as RunTest_OneShotTimers_Cancel below to confirm
+// that |did_run_a| would be signaled in that test if it wasn't for the
+// deletion.
+void RunTest_OneShotTimers(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- OneShotTimerTester f(&did_run);
- f.Start();
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ OneShotTimerTester a(&did_run_a);
+ a.Start();
+
+ OneShotTimerTester b;
+ b.Start();
- base::RunLoop().Run();
+ b.WaitAndConfirmTimerFiredAfterDelay();
- EXPECT_TRUE(did_run);
+ EXPECT_TRUE(did_run_a.IsSignaled());
}
-void RunTest_OneShotTimer_Cancel(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_OneShotTimers_Cancel(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run_a = false;
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
OneShotTimerTester* a = new OneShotTimerTester(&did_run_a);
// This should run before the timer expires.
- base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+ SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
- bool did_run_b = false;
- OneShotTimerTester b(&did_run_b);
+ OneShotTimerTester b;
b.Start();
- base::RunLoop().Run();
+ b.WaitAndConfirmTimerFiredAfterDelay();
- EXPECT_FALSE(did_run_a);
- EXPECT_TRUE(did_run_b);
+ EXPECT_FALSE(did_run_a.IsSignaled());
}
-void RunTest_OneShotSelfDeletingTimer(
- base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_OneShotSelfDeletingTimer(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- OneShotSelfDeletingTimerTester f(&did_run);
+ OneShotSelfDeletingTimerTester f;
f.Start();
-
- base::RunLoop().Run();
-
- EXPECT_TRUE(did_run);
+ f.WaitAndConfirmTimerFiredAfterDelay();
}
-void RunTest_RepeatingTimer(base::MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer(MessageLoop::Type message_loop_type,
const TimeDelta& delay) {
- base::MessageLoop loop(message_loop_type);
+ MessageLoop loop(message_loop_type);
- bool did_run = false;
- RepeatingTimerTester f(&did_run, delay);
+ RepeatingTimerTester f(nullptr, delay);
f.Start();
-
- base::RunLoop().Run();
-
- EXPECT_TRUE(did_run);
+ f.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
}
-void RunTest_RepeatingTimer_Cancel(base::MessageLoop::Type message_loop_type,
+void RunTest_RepeatingTimer_Cancel(MessageLoop::Type message_loop_type,
const TimeDelta& delay) {
- base::MessageLoop loop(message_loop_type);
+ MessageLoop loop(message_loop_type);
- bool did_run_a = false;
+ WaitableEvent did_run_a(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
RepeatingTimerTester* a = new RepeatingTimerTester(&did_run_a, delay);
// This should run before the timer expires.
- base::ThreadTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
+ SequencedTaskRunnerHandle::Get()->DeleteSoon(FROM_HERE, a);
// Now start the timer.
a->Start();
- bool did_run_b = false;
- RepeatingTimerTester b(&did_run_b, delay);
+ RepeatingTimerTester b(nullptr, delay);
b.Start();
- base::RunLoop().Run();
+ b.WaitAndConfirmTimerFiredRepeatedlyAfterDelay();
- EXPECT_FALSE(did_run_a);
- EXPECT_TRUE(did_run_b);
+ // |a| should not have fired despite |b| starting after it on the same
+ // sequence and being complete by now.
+ EXPECT_FALSE(did_run_a.IsSignaled());
}
class DelayTimerTarget {
@@ -221,40 +291,38 @@ class DelayTimerTarget {
bool signaled_ = false;
};
-void RunTest_DelayTimer_NoCall(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_NoCall(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
- bool did_run = false;
- OneShotTimerTester tester(&did_run);
+ OneShotTimerTester tester;
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_FALSE(target.signaled());
}
-void RunTest_DelayTimer_OneCall(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_OneCall(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(1), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
- bool did_run = false;
- OneShotTimerTester tester(&did_run, 100 /* milliseconds */);
+ OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(100));
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_TRUE(target.signaled());
}
struct ResetHelper {
- ResetHelper(base::DelayTimer* timer, DelayTimerTarget* target)
+ ResetHelper(DelayTimer* timer, DelayTimerTarget* target)
: timer_(timer), target_(target) {}
void Reset() {
@@ -263,31 +331,30 @@ struct ResetHelper {
}
private:
- base::DelayTimer* const timer_;
+ DelayTimer* const timer_;
DelayTimerTarget* const target_;
};
-void RunTest_DelayTimer_Reset(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_Reset(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
// If Delay is never called, the timer shouldn't go off.
DelayTimerTarget target;
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
- &DelayTimerTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerTarget::Signal);
timer.Reset();
ResetHelper reset_helper(&timer, &target);
- base::OneShotTimer timers[20];
+ OneShotTimer timers[20];
for (size_t i = 0; i < arraysize(timers); ++i) {
timers[i].Start(FROM_HERE, TimeDelta::FromMilliseconds(i * 10),
&reset_helper, &ResetHelper::Reset);
}
- bool did_run = false;
- OneShotTimerTester tester(&did_run, 300);
+ OneShotTimerTester tester(nullptr, TimeDelta::FromMilliseconds(300));
tester.Start();
- base::RunLoop().Run();
+ tester.WaitAndConfirmTimerFiredAfterDelay();
ASSERT_TRUE(target.signaled());
}
@@ -299,21 +366,20 @@ class DelayTimerFatalTarget {
}
};
-
-void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
- base::MessageLoop loop(message_loop_type);
+void RunTest_DelayTimer_Deleted(MessageLoop::Type message_loop_type) {
+ MessageLoop loop(message_loop_type);
DelayTimerFatalTarget target;
{
- base::DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
- &DelayTimerFatalTarget::Signal);
+ DelayTimer timer(FROM_HERE, TimeDelta::FromMilliseconds(50), &target,
+ &DelayTimerFatalTarget::Signal);
timer.Reset();
}
// When the timer is deleted, the DelayTimerFatalTarget should never be
// called.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(100));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
}
} // namespace
@@ -322,15 +388,15 @@ void RunTest_DelayTimer_Deleted(base::MessageLoop::Type message_loop_type) {
// Each test is run against each type of MessageLoop. That way we are sure
// that timers work properly in all configurations.
-TEST(TimerTest, OneShotTimer) {
+TEST(TimerTest, OneShotTimers) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
- RunTest_OneShotTimer(testing_message_loops[i]);
+ RunTest_OneShotTimers(testing_message_loops[i]);
}
}
-TEST(TimerTest, OneShotTimer_Cancel) {
+TEST(TimerTest, OneShotTimers_Cancel) {
for (int i = 0; i < kNumTestingMessageLoops; i++) {
- RunTest_OneShotTimer_Cancel(testing_message_loops[i]);
+ RunTest_OneShotTimers_Cancel(testing_message_loops[i]);
}
}
@@ -343,31 +409,41 @@ TEST(TimerTest, OneShotSelfDeletingTimer) {
}
TEST(TimerTest, OneShotTimer_CustomTaskRunner) {
- scoped_refptr<base::TestSimpleTaskRunner> task_runner =
- new base::TestSimpleTaskRunner();
+ // A MessageLoop is required for the timer events on the other thread to
+ // communicate back to the Timer under test.
+ MessageLoop loop;
- bool did_run = false;
+ Thread other_thread("OneShotTimer_CustomTaskRunner");
+ other_thread.Start();
+
+ WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
OneShotTimerTester f(&did_run);
- f.SetTaskRunner(task_runner);
+ f.SetTaskRunner(other_thread.task_runner());
f.Start();
+ EXPECT_TRUE(f.IsRunning());
+
+ f.WaitAndConfirmTimerFiredAfterDelay();
+ EXPECT_TRUE(did_run.IsSignaled());
- EXPECT_FALSE(did_run);
- task_runner->RunUntilIdle();
- EXPECT_TRUE(did_run);
+ // |f| should already have communicated back to this |loop| before invoking
+ // Run() and as such this thread should already be aware that |f| is no longer
+ // running.
+ EXPECT_TRUE(loop.IsIdleForTesting());
+ EXPECT_FALSE(f.IsRunning());
}
TEST(TimerTest, OneShotTimerWithTickClock) {
- scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
- new base::TestMockTimeTaskRunner(base::Time::Now(),
- base::TimeTicks::Now()));
- std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
- base::MessageLoop message_loop;
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
- base::OneShotTimer timer(tick_clock.get());
- timer.Start(FROM_HERE, base::TimeDelta::FromSeconds(1),
- base::Bind(&Receiver::OnCalled, base::Unretained(&receiver)));
- task_runner->FastForwardBy(base::TimeDelta::FromSeconds(1));
+ OneShotTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+ Bind(&Receiver::OnCalled, Unretained(&receiver)));
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
EXPECT_TRUE(receiver.WasCalled());
}
@@ -400,19 +476,17 @@ TEST(TimerTest, RepeatingTimerZeroDelay_Cancel) {
}
TEST(TimerTest, RepeatingTimerWithTickClock) {
- scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
- new base::TestMockTimeTaskRunner(base::Time::Now(),
- base::TimeTicks::Now()));
- std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
- base::MessageLoop message_loop;
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
const int expected_times_called = 10;
- base::RepeatingTimer timer(tick_clock.get());
- timer.Start(FROM_HERE, base::TimeDelta::FromSeconds(1),
- base::Bind(&Receiver::OnCalled, base::Unretained(&receiver)));
- task_runner->FastForwardBy(
- base::TimeDelta::FromSeconds(expected_times_called));
+ RepeatingTimer timer(tick_clock.get());
+ timer.Start(FROM_HERE, TimeDelta::FromSeconds(1),
+ Bind(&Receiver::OnCalled, Unretained(&receiver)));
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(expected_times_called));
timer.Stop();
EXPECT_EQ(expected_times_called, receiver.TimesCalled());
}
@@ -443,22 +517,21 @@ TEST(TimerTest, DelayTimer_Deleted) {
}
TEST(TimerTest, DelayTimerWithTickClock) {
- scoped_refptr<base::TestMockTimeTaskRunner> task_runner(
- new base::TestMockTimeTaskRunner(base::Time::Now(),
- base::TimeTicks::Now()));
- std::unique_ptr<base::TickClock> tick_clock(task_runner->GetMockTickClock());
- base::MessageLoop message_loop;
+ scoped_refptr<TestMockTimeTaskRunner> task_runner(
+ new TestMockTimeTaskRunner(Time::Now(), TimeTicks::Now()));
+ std::unique_ptr<TickClock> tick_clock(task_runner->GetMockTickClock());
+ MessageLoop message_loop;
message_loop.SetTaskRunner(task_runner);
Receiver receiver;
- base::DelayTimer timer(FROM_HERE, base::TimeDelta::FromSeconds(1), &receiver,
- &Receiver::OnCalled, tick_clock.get());
- task_runner->FastForwardBy(base::TimeDelta::FromMilliseconds(999));
+ DelayTimer timer(FROM_HERE, TimeDelta::FromSeconds(1), &receiver,
+ &Receiver::OnCalled, tick_clock.get());
+ task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
EXPECT_FALSE(receiver.WasCalled());
timer.Reset();
- task_runner->FastForwardBy(base::TimeDelta::FromMilliseconds(999));
+ task_runner->FastForwardBy(TimeDelta::FromMilliseconds(999));
EXPECT_FALSE(receiver.WasCalled());
timer.Reset();
- task_runner->FastForwardBy(base::TimeDelta::FromSeconds(1));
+ task_runner->FastForwardBy(TimeDelta::FromSeconds(1));
EXPECT_TRUE(receiver.WasCalled());
}
@@ -467,20 +540,65 @@ TEST(TimerTest, MessageLoopShutdown) {
// message loop does not cause crashes if there were pending
// timers not yet fired. It may only trigger exceptions
// if debug heap checking is enabled.
- bool did_run = false;
+ WaitableEvent did_run(WaitableEvent::ResetPolicy::MANUAL,
+ WaitableEvent::InitialState::NOT_SIGNALED);
{
- OneShotTimerTester a(&did_run);
- OneShotTimerTester b(&did_run);
- OneShotTimerTester c(&did_run);
- OneShotTimerTester d(&did_run);
+ OneShotTimerTesterBase a(&did_run);
+ OneShotTimerTesterBase b(&did_run);
+ OneShotTimerTesterBase c(&did_run);
+ OneShotTimerTesterBase d(&did_run);
{
- base::MessageLoop loop;
+ MessageLoop loop;
a.Start();
b.Start();
} // MessageLoop destructs by falling out of scope.
} // OneShotTimers destruct. SHOULD NOT CRASH, of course.
- EXPECT_FALSE(did_run);
+ EXPECT_FALSE(did_run.IsSignaled());
+}
+
+// Ref counted class which owns a Timer. The class passes a reference to itself
+// via the |user_task| parameter in Timer::Start(). |Timer::user_task_| might
+// end up holding the last reference to the class.
+class OneShotSelfOwningTimerTester
+ : public RefCounted<OneShotSelfOwningTimerTester> {
+ public:
+ OneShotSelfOwningTimerTester() = default;
+
+ void StartTimer() {
+ // Start timer with long delay in order to test the timer getting destroyed
+ // while a timer task is still pending.
+ timer_.Start(FROM_HERE, TimeDelta::FromDays(1),
+ base::Bind(&OneShotSelfOwningTimerTester::Run, this));
+ }
+
+ private:
+ friend class RefCounted<OneShotSelfOwningTimerTester>;
+ ~OneShotSelfOwningTimerTester() = default;
+
+ void Run() {
+ ADD_FAILURE() << "Timer unexpectedly fired.";
+ }
+
+ OneShotTimer timer_;
+
+ DISALLOW_COPY_AND_ASSIGN(OneShotSelfOwningTimerTester);
+};
+
+TEST(TimerTest, MessageLoopShutdownSelfOwningTimer) {
+ // This test verifies that shutdown of the message loop does not cause crashes
+ // if there is a pending timer not yet fired and |Timer::user_task_| owns the
+ // timer. The test may only trigger exceptions if debug heap checking is
+ // enabled.
+
+ MessageLoop loop;
+ scoped_refptr<OneShotSelfOwningTimerTester> tester =
+ new OneShotSelfOwningTimerTester();
+
+ std::move(tester)->StartTimer();
+ // |Timer::user_task_| owns sole reference to |tester|.
+
+ // MessageLoop destructs by falling out of scope. SHOULD NOT CRASH.
}
void TimerTestCallback() {
@@ -488,11 +606,10 @@ void TimerTestCallback() {
TEST(TimerTest, NonRepeatIsRunning) {
{
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
timer.Stop();
EXPECT_FALSE(timer.IsRunning());
@@ -500,11 +617,10 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
{
- base::Timer timer(true, false);
- base::MessageLoop loop;
+ Timer timer(true, false);
+ MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
timer.Stop();
EXPECT_FALSE(timer.IsRunning());
@@ -515,12 +631,11 @@ TEST(TimerTest, NonRepeatIsRunning) {
}
TEST(TimerTest, NonRepeatMessageLoopDeath) {
- base::Timer timer(false, false);
+ Timer timer(false, false);
{
- base::MessageLoop loop;
+ MessageLoop loop;
EXPECT_FALSE(timer.IsRunning());
- timer.Start(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback));
+ timer.Start(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback));
EXPECT_TRUE(timer.IsRunning());
}
EXPECT_FALSE(timer.IsRunning());
@@ -528,9 +643,9 @@ TEST(TimerTest, NonRepeatMessageLoopDeath) {
}
TEST(TimerTest, RetainRepeatIsRunning) {
- base::MessageLoop loop;
- base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback), true);
+ MessageLoop loop;
+ Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+ true);
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -541,9 +656,9 @@ TEST(TimerTest, RetainRepeatIsRunning) {
}
TEST(TimerTest, RetainNonRepeatIsRunning) {
- base::MessageLoop loop;
- base::Timer timer(FROM_HERE, TimeDelta::FromDays(1),
- base::Bind(&TimerTestCallback), false);
+ MessageLoop loop;
+ Timer timer(FROM_HERE, TimeDelta::FromDays(1), Bind(&TimerTestCallback),
+ false);
EXPECT_FALSE(timer.IsRunning());
timer.Reset();
EXPECT_TRUE(timer.IsRunning());
@@ -565,25 +680,27 @@ void ClearAllCallbackHappened() {
void SetCallbackHappened1() {
g_callback_happened1 = true;
- base::MessageLoop::current()->QuitWhenIdle();
+ MessageLoop::current()->QuitWhenIdle();
}
void SetCallbackHappened2() {
g_callback_happened2 = true;
- base::MessageLoop::current()->QuitWhenIdle();
+ MessageLoop::current()->QuitWhenIdle();
}
+} // namespace
+
TEST(TimerTest, ContinuationStopStart) {
{
ClearAllCallbackHappened();
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
- base::Bind(&SetCallbackHappened1));
+ Bind(&SetCallbackHappened1));
timer.Stop();
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(40),
- base::Bind(&SetCallbackHappened2));
- base::RunLoop().Run();
+ Bind(&SetCallbackHappened2));
+ RunLoop().Run();
EXPECT_FALSE(g_callback_happened1);
EXPECT_TRUE(g_callback_happened2);
}
@@ -592,16 +709,16 @@ TEST(TimerTest, ContinuationStopStart) {
TEST(TimerTest, ContinuationReset) {
{
ClearAllCallbackHappened();
- base::MessageLoop loop;
- base::Timer timer(false, false);
+ MessageLoop loop;
+ Timer timer(false, false);
timer.Start(FROM_HERE, TimeDelta::FromMilliseconds(10),
- base::Bind(&SetCallbackHappened1));
+ Bind(&SetCallbackHappened1));
timer.Reset();
// Since Reset happened before task ran, the user_task must not be cleared:
ASSERT_FALSE(timer.user_task().is_null());
- base::RunLoop().Run();
+ RunLoop().Run();
EXPECT_TRUE(g_callback_happened1);
}
}
-} // namespace
+} // namespace base
diff --git a/chromium/base/trace_event/category_registry.cc b/chromium/base/trace_event/category_registry.cc
index 87715fc806a..e7c14606d66 100644
--- a/chromium/base/trace_event/category_registry.cc
+++ b/chromium/base/trace_event/category_registry.cc
@@ -10,9 +10,7 @@
#include "base/atomicops.h"
#include "base/debug/leak_annotations.h"
-#include "base/lazy_instance.h"
#include "base/logging.h"
-#include "base/synchronization/lock.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/trace_event/trace_category.h"
@@ -30,16 +28,13 @@ static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
// These entries must be kept consistent with the kCategory* consts below.
TraceCategory g_categories[kMaxCategories] = {
{0, 0, "tracing categories exhausted; must increase kMaxCategories"},
- {0, 0, "tracing already shutdown"}, // See kCategoryAlreadyShutdown below.
- {0, 0, "__metadata"}, // See kCategoryMetadata below.
- {0, 0, "toplevel"}, // Warmup the toplevel category.
+ {0, 0, "tracing already shutdown"}, // See kCategoryAlreadyShutdown below.
+ {0, 0, "__metadata"}, // See kCategoryMetadata below.
+ {0, 0, "toplevel"}, // Warmup the toplevel category.
};
base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
-base::LazyInstance<base::Lock>::Leaky g_category_lock =
- LAZY_INSTANCE_INITIALIZER;
-
bool IsValidCategoryPtr(const TraceCategory* category) {
// If any of these are hit, something has cached a corrupt category pointer.
uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
@@ -73,14 +68,15 @@ void CategoryRegistry::Initialize() {
// static
void CategoryRegistry::ResetForTesting() {
- AutoLock lock(g_category_lock.Get());
+ // reset_for_testing clears up only the enabled state and filters. The
+ // categories themselves cannot be cleared up because the static pointers
+ // injected by the macros still point to them and cannot be reset.
for (size_t i = 0; i < kMaxCategories; ++i)
g_categories[i].reset_for_testing();
}
// static
-bool CategoryRegistry::GetOrCreateCategoryByName(const char* category_name,
- TraceCategory** category) {
+TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
DCHECK(!strchr(category_name, '"'))
<< "Category names may not contain double quote";
@@ -90,28 +86,25 @@ bool CategoryRegistry::GetOrCreateCategoryByName(const char* category_name,
// Search for pre-existing category group.
for (size_t i = 0; i < category_index; ++i) {
if (strcmp(g_categories[i].name(), category_name) == 0) {
- *category = &g_categories[i];
- return false;
+ return &g_categories[i];
}
}
+ return nullptr;
+}
- // This is the slow path: the lock is not held in the case above, so more
- // than one thread could have reached here trying to add the same category.
- // Only hold the lock when actually appending a new category, and check the
- // categories groups again.
- // TODO(primiano): there should be no need for the acquire/release semantics
- // on g_category_index below, the outer lock implies that. Remove once the
- // tracing refactoring reaches a quieter state and we can afford the risk.
- AutoLock lock(g_category_lock.Get());
- category_index = base::subtle::Acquire_Load(&g_category_index);
- for (size_t i = 0; i < category_index; ++i) {
- if (strcmp(g_categories[i].name(), category_name) == 0) {
- *category = &g_categories[i];
- return false;
- }
- }
+bool CategoryRegistry::GetOrCreateCategoryLocked(
+ const char* category_name,
+ CategoryInitializerFn category_initializer_fn,
+ TraceCategory** category) {
+ // This is the slow path: the lock is not held in the fastpath
+ // (GetCategoryByName), so more than one thread could have reached here trying
+ // to add the same category.
+ *category = GetCategoryByName(category_name);
+ if (*category)
+ return false;
// Create a new category.
+ size_t category_index = base::subtle::Acquire_Load(&g_category_index);
if (category_index >= kMaxCategories) {
NOTREACHED() << "must increase kMaxCategories";
*category = kCategoryExhausted;
@@ -128,6 +121,7 @@ bool CategoryRegistry::GetOrCreateCategoryByName(const char* category_name,
DCHECK(!(*category)->is_valid());
DCHECK(!(*category)->is_enabled());
(*category)->set_name(category_name_copy);
+ category_initializer_fn(*category);
// Update the max index now.
base::subtle::Release_Store(&g_category_index, category_index + 1);
diff --git a/chromium/base/trace_event/category_registry.h b/chromium/base/trace_event/category_registry.h
index da998993c4e..9c08efa3e14 100644
--- a/chromium/base/trace_event/category_registry.h
+++ b/chromium/base/trace_event/category_registry.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_TRACE_EVENT_CATEGORY_H_
-#define BASE_TRACE_EVENT_CATEGORY_H_
+#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
+#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
#include <stddef.h>
#include <stdint.h>
@@ -18,10 +18,12 @@ struct TraceCategory;
class TraceCategoryTest;
class TraceLog;
-// Keeps track of the state of all tracing categories. The reason why this
-// is a fully static class with global state is to allow to statically define
-// known categories as global linker-initialized structs, without requiring
-// static initializers.
+// Allows fast and thread-safe acces to the state of all tracing categories.
+// All the methods in this class can be concurrently called on multiple threads,
+// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
+// The reason why this is a fully static class with global state is to allow to
+// statically define known categories as global linker-initialized structs,
+// without requiring static initializers.
class BASE_EXPORT CategoryRegistry {
public:
// Allows for-each iterations over a slice of the categories array.
@@ -52,11 +54,18 @@ class BASE_EXPORT CategoryRegistry {
static const TraceCategory* GetCategoryByStatePtr(
const uint8_t* category_state);
+ // Returns a category from its name or nullptr if not found.
+ // The output |category| argument is an undefinitely lived pointer to the
+ // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
+ // pointer and use it for checks in their fast-paths.
+ static TraceCategory* GetCategoryByName(const char* category_name);
+
static bool IsBuiltinCategory(const TraceCategory*);
private:
friend class TraceCategoryTest;
friend class TraceLog;
+ using CategoryInitializerFn = void (*)(TraceCategory*);
// Only for debugging/testing purposes, is a no-op on release builds.
static void Initialize();
@@ -64,13 +73,14 @@ class BASE_EXPORT CategoryRegistry {
// Resets the state of all categories, to clear up the state between tests.
static void ResetForTesting();
- // The output |category| argument is an undefinitely lived pointer to the
- // TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
- // pointer and use it for checks in their fast-paths.
- // Returns false if the category was already present, true if the category
- // has just been added and hence requires initialization.
- static bool GetOrCreateCategoryByName(const char* category_name,
- TraceCategory** category);
+ // Used to get/create a category in the slow-path. If the category exists
+ // already, this has the same effect of GetCategoryByName and returns false.
+ // If not, a new category is created and the CategoryInitializerFn is invoked
+ // before retuning true. The caller must guarantee serialization: either call
+ // this method from a single thread or hold a lock when calling this.
+ static bool GetOrCreateCategoryLocked(const char* category_name,
+ CategoryInitializerFn,
+ TraceCategory**);
// Allows to iterate over the valid categories in a for-each loop.
// This includes builtin categories such as __metadata.
@@ -80,4 +90,4 @@ class BASE_EXPORT CategoryRegistry {
} // namespace trace_event
} // namespace base
-#endif // BASE_TRACE_EVENT_CATEGORY_H_
+#endif // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
diff --git a/chromium/base/trace_event/event_name_filter.cc b/chromium/base/trace_event/event_name_filter.cc
new file mode 100644
index 00000000000..8d0058c1474
--- /dev/null
+++ b/chromium/base/trace_event/event_name_filter.cc
@@ -0,0 +1,26 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+// static
+const char EventNameFilter::kName[] = "event_whitelist_predicate";
+
+EventNameFilter::EventNameFilter(
+ std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
+ : event_names_whitelist_(std::move(event_names_whitelist)) {}
+
+EventNameFilter::~EventNameFilter() {}
+
+bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ return event_names_whitelist_->count(trace_event.name()) != 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/event_name_filter.h b/chromium/base/trace_event/event_name_filter.h
new file mode 100644
index 00000000000..19333b3e033
--- /dev/null
+++ b/chromium/base/trace_event/event_name_filter.h
@@ -0,0 +1,46 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
+
+#include <memory>
+#include <string>
+#include <unordered_set>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// Filters trace events by checking the full name against a whitelist.
+// The current implementation is quite simple and dumb and just uses a
+// hashtable which requires char* to std::string conversion. It could be smarter
+// and use a bloom filter trie. However, today this is used too rarely to
+// justify that cost.
+class BASE_EXPORT EventNameFilter : public TraceEventFilter {
+ public:
+ using EventNamesWhitelist = std::unordered_set<std::string>;
+ static const char kName[];
+
+ EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
+ ~EventNameFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent&) const override;
+
+ private:
+ std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
+
+ DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
diff --git a/chromium/base/trace_event/event_name_filter_unittest.cc b/chromium/base/trace_event/event_name_filter_unittest.cc
new file mode 100644
index 00000000000..0bc2a4dafcf
--- /dev/null
+++ b/chromium/base/trace_event/event_name_filter_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/event_name_filter.h"
+
+#include "base/memory/ptr_util.h"
+#include "base/trace_event/trace_event_impl.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace trace_event {
+
+const TraceEvent& MakeTraceEvent(const char* name) {
+ static TraceEvent event;
+ event.Reset();
+ event.Initialize(0, TimeTicks(), ThreadTicks(), 'b', nullptr, name, "", 0, 0,
+ 0, nullptr, nullptr, nullptr, nullptr, 0);
+ return event;
+}
+
+TEST(TraceEventNameFilterTest, Whitelist) {
+ auto empty_whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ auto filter = MakeUnique<EventNameFilter>(std::move(empty_whitelist));
+
+ // No events should be filtered if the whitelist is empty.
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+
+ auto whitelist = MakeUnique<EventNameFilter::EventNamesWhitelist>();
+ whitelist->insert("foo");
+ whitelist->insert("bar");
+ filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("foo")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("fooz")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("afoo")));
+ EXPECT_TRUE(filter->FilterTraceEvent(MakeTraceEvent("bar")));
+ EXPECT_FALSE(filter->FilterTraceEvent(MakeTraceEvent("foobar")));
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
index f3a03fe90d0..31c3fd41b2e 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.cc
@@ -29,7 +29,6 @@ const size_t kMaxStackDepth = 128u;
const size_t kMaxTaskDepth = 16u;
AllocationContextTracker* const kInitializingSentinel =
reinterpret_cast<AllocationContextTracker*>(-1);
-const char kTracingOverhead[] = "tracing_overhead";
ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
@@ -157,21 +156,15 @@ void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
}
// static
-AllocationContext AllocationContextTracker::GetContextSnapshot() {
- AllocationContext ctx;
-
- if (ignore_scope_depth_) {
- ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
- ctx.type_name = kTracingOverhead;
- ctx.backtrace.frame_count = 1;
- return ctx;
- }
+bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
+ if (ignore_scope_depth_)
+ return false;
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
- auto* backtrace = std::begin(ctx.backtrace.frames);
- auto* backtrace_end = std::end(ctx.backtrace.frames);
+ auto* backtrace = std::begin(ctx->backtrace.frames);
+ auto* backtrace_end = std::end(ctx->backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
@@ -236,19 +229,21 @@ AllocationContext AllocationContextTracker::GetContextSnapshot() {
}
}
- ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
+ ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
// TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
// (component name) in the heap profiler and not piggy back on the type name.
if (!task_contexts_.empty()) {
- ctx.type_name = task_contexts_.back();
+ ctx->type_name = task_contexts_.back();
} else if (!pseudo_stack_.empty()) {
// If task context was unavailable, then the category names are taken from
// trace events.
- ctx.type_name = pseudo_stack_.back().trace_event_category;
+ ctx->type_name = pseudo_stack_.back().trace_event_category;
+ } else {
+ ctx->type_name = nullptr;
}
- return ctx;
+ return true;
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
index ae5f73af432..4f2a8c95026 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker.h
@@ -70,8 +70,8 @@ class BASE_EXPORT AllocationContextTracker {
static void SetCurrentThreadName(const char* name);
// Starts and ends a new ignore scope between which the allocations are
- // ignored in the heap profiler. A dummy context that short circuits to
- // "tracing_overhead" is returned for these allocations.
+ // ignored by the heap profiler. GetContextSnapshot() returns false when
+ // allocations are ignored.
void begin_ignore_scope() { ignore_scope_depth_++; }
void end_ignore_scope() {
if (ignore_scope_depth_)
@@ -89,8 +89,9 @@ class BASE_EXPORT AllocationContextTracker {
void PushCurrentTaskContext(const char* context);
void PopCurrentTaskContext(const char* context);
- // Returns a snapshot of the current thread-local context.
- AllocationContext GetContextSnapshot();
+ // Fills a snapshot of the current thread-local context. Doesn't fill and
+ // returns false if allocations are being ignored.
+ bool GetContextSnapshot(AllocationContext* snapshot);
~AllocationContextTracker();
diff --git a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
index 056aa2c001d..577f50043da 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_context_tracker_unittest.cc
@@ -43,9 +43,9 @@ const char kFilteringTraceConfig[] =
// in |AllocationContextTracker::GetContextSnapshot|.
template <size_t N>
void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
auto* actual = std::begin(ctx.backtrace.frames);
auto* actual_bottom = actual + ctx.backtrace.frame_count;
@@ -65,9 +65,9 @@ void AssertBacktraceEquals(const StackFrame(&expected_backtrace)[N]) {
void AssertBacktraceContainsOnlyThreadName() {
StackFrame t = StackFrame::FromThreadName(kThreadName);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(1u, ctx.backtrace.frame_count);
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -243,9 +243,9 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
{
TRACE_EVENT0("Testing", kGingerbread);
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
// The pseudo stack relies on pointer equality, not deep string comparisons.
ASSERT_EQ(t, ctx.backtrace.frames[0]);
@@ -254,9 +254,9 @@ TEST_F(AllocationContextTrackerTest, BacktraceTakesTop) {
}
{
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_EQ(t, ctx.backtrace.frames[0]);
ASSERT_EQ(c, ctx.backtrace.frames[1]);
ASSERT_EQ(f, ctx.backtrace.frames[11]);
@@ -269,39 +269,39 @@ TEST_F(AllocationContextTrackerTest, TrackCategoryName) {
{
// The context from the scoped task event should be used as type name.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event1(kContext1);
- AllocationContext ctx1 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
ASSERT_EQ(kContext1, ctx1.type_name);
// In case of nested events, the last event's context should be used.
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION event2(kContext2);
- AllocationContext ctx2 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
ASSERT_EQ(kContext2, ctx2.type_name);
}
{
// Type should be category name of the last seen trace event.
TRACE_EVENT0("Testing", kCupcake);
- AllocationContext ctx1 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx1;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx1));
ASSERT_EQ("Testing", std::string(ctx1.type_name));
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("Testing"), kCupcake);
- AllocationContext ctx2 =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx2;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx2));
ASSERT_EQ(TRACE_DISABLED_BY_DEFAULT("Testing"),
std::string(ctx2.type_name));
}
// Type should be nullptr without task event.
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
+ AllocationContext ctx;
+ ASSERT_TRUE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
ASSERT_FALSE(ctx.type_name);
}
@@ -309,13 +309,9 @@ TEST_F(AllocationContextTrackerTest, IgnoreAllocationTest) {
TRACE_EVENT0("Testing", kCupcake);
TRACE_EVENT0("Testing", kDonut);
HEAP_PROFILER_SCOPED_IGNORE;
- AllocationContext ctx =
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->GetContextSnapshot();
- const StringPiece kTracingOverhead("tracing_overhead");
- ASSERT_EQ(kTracingOverhead,
- static_cast<const char*>(ctx.backtrace.frames[0].value));
- ASSERT_EQ(1u, ctx.backtrace.frame_count);
+ AllocationContext ctx;
+ ASSERT_FALSE(AllocationContextTracker::GetInstanceForCurrentThread()
+ ->GetContextSnapshot(&ctx));
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.cc b/chromium/base/trace_event/heap_profiler_allocation_register.cc
index 2c2cd378bbd..63d40611a6f 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.cc
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.cc
@@ -60,12 +60,12 @@ size_t AllocationRegister::AddressHasher::operator () (
// The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has
// been chosen carefully based on measurements with real-word data (addresses
// recorded from a Chrome trace run). It is the first prime after 2^17. For
- // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18
- // buckets. Microbenchmarks show that this simple scheme outperforms fancy
- // hashes like Murmur3 by 20 to 40 percent.
+ // |shift|, 15 yield good results for both 2^18 and 2^19 bucket sizes.
+ // Microbenchmarks show that this simple scheme outperforms fancy hashes like
+ // Murmur3 by 20 to 40 percent.
const uintptr_t key = reinterpret_cast<uintptr_t>(address);
const uintptr_t a = 131101;
- const uintptr_t shift = 14;
+ const uintptr_t shift = 15;
const uintptr_t h = (key * a) >> shift;
return h;
}
diff --git a/chromium/base/trace_event/heap_profiler_allocation_register.h b/chromium/base/trace_event/heap_profiler_allocation_register.h
index 873aebfc0cb..d6a02faeaea 100644
--- a/chromium/base/trace_event/heap_profiler_allocation_register.h
+++ b/chromium/base/trace_event/heap_profiler_allocation_register.h
@@ -16,6 +16,7 @@
#include "base/process/process_metrics.h"
#include "base/template_util.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
+#include "build/build_config.h"
namespace base {
namespace trace_event {
@@ -198,7 +199,9 @@ class FixedHashMap {
// the simplest solution is to just allocate a humongous chunk of address
// space.
- DCHECK_LT(next_unused_cell_, num_cells_ + 1);
+ CHECK_LT(next_unused_cell_, num_cells_ + 1)
+ << "Allocation Register hash table has too little capacity. Increase "
+ "the capacity to run heap profiler in large sessions.";
return &cells_[idx];
}
@@ -299,10 +302,16 @@ class BASE_EXPORT AllocationRegister {
private:
friend AllocationRegisterTest;
- // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
- // hashing and should be changed together with AddressHasher.
+// Expect lower number of allocations from mobile platforms. Load factor
+// (capacity / bucket count) is kept less than 10 for optimal hashing. The
+// number of buckets should be changed together with AddressHasher.
+#if defined(OS_ANDROID) || defined(OS_IOS)
static const size_t kAllocationBuckets = 1 << 18;
static const size_t kAllocationCapacity = 1500000;
+#else
+ static const size_t kAllocationBuckets = 1 << 19;
+ static const size_t kAllocationCapacity = 5000000;
+#endif
// 2^16 works well with BacktraceHasher. When increasing this number make
// sure BacktraceHasher still produces low number of collisions.
diff --git a/chromium/base/trace_event/heap_profiler_event_filter.cc b/chromium/base/trace_event/heap_profiler_event_filter.cc
new file mode 100644
index 00000000000..6c91c91b136
--- /dev/null
+++ b/chromium/base/trace_event/heap_profiler_event_filter.cc
@@ -0,0 +1,67 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/heap_profiler_event_filter.h"
+
+#include "base/trace_event/category_registry.h"
+#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/trace_category.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_impl.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+inline bool IsPseudoStackEnabled() {
+ return AllocationContextTracker::capture_mode() ==
+ AllocationContextTracker::CaptureMode::PSEUDO_STACK;
+}
+
+inline AllocationContextTracker* GetThreadLocalTracker() {
+ return AllocationContextTracker::GetInstanceForCurrentThread();
+}
+
+} // namespace
+
+// static
+const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
+
+HeapProfilerEventFilter::HeapProfilerEventFilter() {}
+HeapProfilerEventFilter::~HeapProfilerEventFilter() {}
+
+bool HeapProfilerEventFilter::FilterTraceEvent(
+ const TraceEvent& trace_event) const {
+ if (!IsPseudoStackEnabled())
+ return true;
+
+ // TODO(primiano): Add support for events with copied name crbug.com/581079.
+ if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
+ return true;
+
+ const auto* category = CategoryRegistry::GetCategoryByStatePtr(
+ trace_event.category_group_enabled());
+ AllocationContextTracker::PseudoStackFrame frame = {category->name(),
+ trace_event.name()};
+ if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
+ trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
+ GetThreadLocalTracker()->PushPseudoStackFrame(frame);
+ } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
+ // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
+ GetThreadLocalTracker()->PopPseudoStackFrame(frame);
+ }
+ // Do not filter-out any events and always return true. TraceLog adds the
+ // event only if it is enabled for recording.
+ return true;
+}
+
+void HeapProfilerEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {
+ if (IsPseudoStackEnabled())
+ GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/heap_profiler_event_filter.h b/chromium/base/trace_event/heap_profiler_event_filter.h
new file mode 100644
index 00000000000..47368a1b070
--- /dev/null
+++ b/chromium/base/trace_event/heap_profiler_event_filter.h
@@ -0,0 +1,40 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// This filter unconditionally accepts all events and pushes/pops them from the
+// thread-local AllocationContextTracker instance as they are seen.
+// This is used to cheaply construct the heap profiler pseudo stack without
+// having to actually record all events.
+class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
+ public:
+ static const char kName[];
+
+ HeapProfilerEventFilter();
+ ~HeapProfilerEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name,
+ const char* event_name) const override;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index 49a235051c0..fc5da0d1dde 100644
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -11,6 +11,7 @@
#include <utility>
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/trace_event_argument.h"
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -23,6 +24,10 @@ StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
StackFrameDeduplicator::FrameNode::~FrameNode() {}
+size_t StackFrameDeduplicator::FrameNode::EstimateMemoryUsage() const {
+ return base::trace_event::EstimateMemoryUsage(children);
+}
+
StackFrameDeduplicator::StackFrameDeduplicator() {}
StackFrameDeduplicator::~StackFrameDeduplicator() {}
@@ -116,19 +121,10 @@ void StackFrameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void StackFrameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The sizes here are only estimates; they fail to take into account the
- // overhead of the tree nodes for the map, but as an estimate this should be
- // fine.
- size_t maps_size = roots_.size() * sizeof(std::pair<StackFrame, int>);
- size_t frames_allocated = frames_.capacity() * sizeof(FrameNode);
- size_t frames_resident = frames_.size() * sizeof(FrameNode);
-
- for (const FrameNode& node : frames_)
- maps_size += node.children.size() * sizeof(std::pair<StackFrame, int>);
-
+ size_t memory_usage =
+ EstimateMemoryUsage(frames_) + EstimateMemoryUsage(roots_);
overhead->Add("StackFrameDeduplicator",
- sizeof(StackFrameDeduplicator) + maps_size + frames_allocated,
- sizeof(StackFrameDeduplicator) + maps_size + frames_resident);
+ sizeof(StackFrameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 4932534e1d7..66d430f2ee3 100644
--- a/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/chromium/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -34,6 +34,8 @@ class BASE_EXPORT StackFrameDeduplicator : public ConvertableToTraceFormat {
FrameNode(const FrameNode& other);
~FrameNode();
+ size_t EstimateMemoryUsage() const;
+
StackFrame frame;
// The index of the parent stack frame in |frames_|, or -1 if there is no
diff --git a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
index 9d56ce7cf7b..a6dab51ad2a 100644
--- a/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
+++ b/chromium/base/trace_event/heap_profiler_type_name_deduplicator.cc
@@ -12,6 +12,7 @@
#include "base/json/string_escape.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
+#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_memory_overhead.h"
@@ -105,12 +106,9 @@ void TypeNameDeduplicator::AppendAsTraceFormat(std::string* out) const {
void TypeNameDeduplicator::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
- // The size here is only an estimate; it fails to take into account the size
- // of the tree nodes for the map, but as an estimate this should be fine.
- size_t map_size = type_ids_.size() * sizeof(std::pair<const char*, int>);
-
+ size_t memory_usage = EstimateMemoryUsage(type_ids_);
overhead->Add("TypeNameDeduplicator",
- sizeof(TypeNameDeduplicator) + map_size);
+ sizeof(TypeNameDeduplicator) + memory_usage);
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/malloc_dump_provider.cc b/chromium/base/trace_event/malloc_dump_provider.cc
index 7d0cb579315..4683694d6bb 100644
--- a/chromium/base/trace_event/malloc_dump_provider.cc
+++ b/chromium/base/trace_event/malloc_dump_provider.cc
@@ -96,98 +96,37 @@ AllocatorDispatch g_allocator_hooks = {
#if defined(OS_WIN)
// A structure containing some information about a given heap.
struct WinHeapInfo {
- HANDLE heap_id;
size_t committed_size;
size_t uncommitted_size;
size_t allocated_size;
size_t block_count;
};
-bool GetHeapInformation(WinHeapInfo* heap_info,
- const std::set<void*>& block_to_skip) {
- // NOTE: crbug.com/464430
- // As a part of the Client/Server Runtine Subsystem (CSRSS) lockdown in the
- // referenced bug, it will invalidate the heap used by CSRSS. The author has
- // not found a way to clean up an invalid heap handle, so it will be left in
- // the process's heap list. Therefore we need to support when there is this
- // invalid heap handle in the heap list.
- // HeapLock implicitly checks certain aspects of the HEAP structure, such as
- // the signature. If this passes, we assume that this heap is valid and is
- // not the one owned by CSRSS.
- if (!::HeapLock(heap_info->heap_id)) {
- return false;
- }
- PROCESS_HEAP_ENTRY heap_entry;
- heap_entry.lpData = nullptr;
- // Walk over all the entries in this heap.
- while (::HeapWalk(heap_info->heap_id, &heap_entry) != FALSE) {
- if (block_to_skip.count(heap_entry.lpData) == 1)
- continue;
- if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
- heap_info->allocated_size += heap_entry.cbData;
- heap_info->block_count++;
- } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
- heap_info->committed_size += heap_entry.Region.dwCommittedSize;
- heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
- }
- }
- CHECK(::HeapUnlock(heap_info->heap_id) == TRUE);
- return true;
-}
-
-void WinHeapMemoryDumpImpl(WinHeapInfo* all_heap_info) {
-// This method might be flaky for 2 reasons:
-// - GetProcessHeaps is racy by design. It returns a snapshot of the
-// available heaps, but there's no guarantee that that snapshot remains
-// valid. If a heap disappears between GetProcessHeaps() and HeapWalk()
-// then chaos should be assumed. This flakyness is acceptable for tracing.
-// - The MSDN page for HeapLock says: "If the HeapLock function is called on
-// a heap created with the HEAP_NO_SERIALIZATION flag, the results are
-// undefined."
-// - Note that multiple heaps occur on Windows primarily because system and
-// 3rd party DLLs will each create their own private heap. It's possible to
-// retrieve the heap the CRT allocates from and report specifically on that
-// heap. It's interesting to report all heaps, as e.g. loading or invoking
-// on a Windows API may consume memory from a private heap.
+// NOTE: crbug.com/665516
+// Unfortunately, there is no safe way to collect information from secondary
+// heaps due to limitations and racy nature of this piece of WinAPI.
+void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
#if defined(SYZYASAN)
if (base::debug::IsBinaryInstrumented())
return;
#endif
- // Retrieves the number of heaps in the current process.
- DWORD number_of_heaps = ::GetProcessHeaps(0, NULL);
-
- // Try to retrieve a handle to all the heaps owned by this process. Returns
- // false if the number of heaps has changed.
- //
- // This is inherently racy as is, but it's not something that we observe a lot
- // in Chrome, the heaps tend to be created at startup only.
- std::unique_ptr<HANDLE[]> all_heaps(new HANDLE[number_of_heaps]);
- if (::GetProcessHeaps(number_of_heaps, all_heaps.get()) != number_of_heaps)
- return;
-
- // Skip the pointer to the heap array to avoid accounting the memory used by
- // this dump provider.
- std::set<void*> block_to_skip;
- block_to_skip.insert(all_heaps.get());
-
- // Retrieves some metrics about each heap.
- size_t heap_info_errors = 0;
- for (size_t i = 0; i < number_of_heaps; ++i) {
- WinHeapInfo heap_info = {0};
- heap_info.heap_id = all_heaps[i];
- if (GetHeapInformation(&heap_info, block_to_skip)) {
- all_heap_info->allocated_size += heap_info.allocated_size;
- all_heap_info->committed_size += heap_info.committed_size;
- all_heap_info->uncommitted_size += heap_info.uncommitted_size;
- all_heap_info->block_count += heap_info.block_count;
- } else {
- ++heap_info_errors;
- // See notes in GetHeapInformation() but we only expect 1 heap to not be
- // able to be read.
- CHECK_EQ(1u, heap_info_errors);
+ // Iterate through whichever heap our CRT is using.
+ HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
+ ::HeapLock(crt_heap);
+ PROCESS_HEAP_ENTRY heap_entry;
+ heap_entry.lpData = nullptr;
+ // Walk over all the entries in the main heap.
+ while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
+ if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
+ crt_heap_info->allocated_size += heap_entry.cbData;
+ crt_heap_info->block_count++;
+ } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
+ crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
+ crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
}
}
+ CHECK(::HeapUnlock(crt_heap) == TRUE);
}
#endif // defined(OS_WIN)
} // namespace
@@ -237,17 +176,17 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
// See crrev.com/1531463004 for detailed explanation.
resident_size = stats.max_size_in_use;
#elif defined(OS_WIN)
- WinHeapInfo all_heap_info = {};
- WinHeapMemoryDumpImpl(&all_heap_info);
+ WinHeapInfo main_heap_info = {};
+ WinHeapMemoryDumpImpl(&main_heap_info);
total_virtual_size =
- all_heap_info.committed_size + all_heap_info.uncommitted_size;
+ main_heap_info.committed_size + main_heap_info.uncommitted_size;
// Resident size is approximated with committed heap size. Note that it is
// possible to do this with better accuracy on windows by intersecting the
// working set with the virtual memory ranges occuipied by the heap. It's not
// clear that this is worth it, as it's fairly expensive to do.
- resident_size = all_heap_info.committed_size;
- allocated_objects_size = all_heap_info.allocated_size;
- allocated_objects_count = all_heap_info.block_count;
+ resident_size = main_heap_info.committed_size;
+ allocated_objects_size = main_heap_info.allocated_size;
+ allocated_objects_count = main_heap_info.block_count;
#else
struct mallinfo info = mallinfo();
DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
@@ -358,7 +297,10 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
if (!tracker)
return;
- AllocationContext context = tracker->GetContextSnapshot();
+
+ AllocationContext context;
+ if (!tracker->GetContextSnapshot(&context))
+ return;
AutoLock lock(allocation_register_lock_);
if (!allocation_register_)
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index 2e6b08ab32f..c781f071bba 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -69,11 +69,6 @@ class BASE_EXPORT MemoryAllocatorDump {
// Called at trace generation time to populate the TracedValue.
void AsValueInto(TracedValue* value) const;
- // Get the ProcessMemoryDump instance that owns this.
- ProcessMemoryDump* process_memory_dump() const {
- return process_memory_dump_;
- }
-
// Use enum Flags to set values.
void set_flags(int flags) { flags_ |= flags; }
void clear_flags(int flags) { flags_ &= ~flags; }
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 28d5d56c59d..18b255ab8b1 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -19,6 +19,7 @@
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
#include "base/trace_event/heap_profiler_type_name_deduplicator.h"
#include "base/trace_event/malloc_dump_provider.h"
@@ -46,6 +47,38 @@ const unsigned char kTraceEventArgTypes[] = {TRACE_VALUE_TYPE_CONVERTABLE};
StaticAtomicSequenceNumber g_next_guid;
MemoryDumpManager* g_instance_for_testing = nullptr;
+// The list of names of dump providers that are blacklisted from strict thread
+// affinity check on unregistration. These providers could potentially cause
+// crashes on build bots if they do not unregister on right thread.
+// TODO(ssid): Fix all the dump providers to unregister if needed and clear the
+// blacklist, crbug.com/643438.
+const char* const kStrictThreadCheckBlacklist[] = {
+ "AndroidGraphics",
+ "BrowserGpuMemoryBufferManager",
+ "ClientDiscardableSharedMemoryManager",
+ "ContextProviderCommandBuffer",
+ "DiscardableSharedMemoryManager",
+ "FontCaches",
+ "GpuMemoryBufferVideoFramePool",
+ "IndexedDBBackingStore",
+ "Sql",
+ "ThreadLocalEventBuffer",
+ "TraceLog",
+ "URLRequestContext",
+ "V8Isolate",
+ "VpxVideoDecoder",
+ "cc::ResourcePool",
+ "cc::ResourceProvider",
+ "cc::SoftwareImageDecodeCache",
+ "cc::StagingBufferPool",
+ "gpu::BufferManager",
+ "gpu::MappedMemoryManager",
+ "gpu::RenderbufferManager",
+ "gpu::TransferBufferManager",
+ "sql::Connection",
+ "BlacklistTestDumpProvider" // for testing
+};
+
// Callback wrapper to hook upon the completion of RequestGlobalDump() and
// inject trace markers.
void OnGlobalDumpDone(MemoryDumpCallback wrapped_callback,
@@ -137,6 +170,9 @@ MemoryDumpManager::MemoryDumpManager()
// At this point the command line may not be initialized but we try to
// enable the heap profiler to capture allocations as soon as possible.
EnableHeapProfilingIfNeeded();
+
+ strict_thread_check_blacklist_.insert(std::begin(kStrictThreadCheckBlacklist),
+ std::end(kStrictThreadCheckBlacklist));
}
MemoryDumpManager::~MemoryDumpManager() {
@@ -210,7 +246,7 @@ void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
!(TraceLog::GetInstance()->enabled_modes() & TraceLog::FILTERING_MODE)) {
// Create trace config with heap profiling filter.
TraceConfig::EventFilterConfig heap_profiler_filter_config(
- TraceLog::TraceEventFilter::kHeapProfilerPredicate);
+ HeapProfilerEventFilter::kName);
heap_profiler_filter_config.AddIncludedCategory("*");
heap_profiler_filter_config.AddIncludedCategory(
MemoryDumpManager::kTraceCategory);
@@ -276,6 +312,11 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
new MemoryDumpProviderInfo(mdp, name, std::move(task_runner), options,
whitelisted_for_background_mode);
+ if (options.is_fast_polling_supported) {
+ DCHECK(!mdpinfo->task_runner) << "MemoryDumpProviders capable of fast "
+ "polling must NOT be thread bound.";
+ }
+
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
@@ -283,6 +324,15 @@ void MemoryDumpManager::RegisterDumpProviderInternal(
// path for RenderThreadImpl::Init().
if (already_registered)
return;
+
+ // The list of polling MDPs is populated OnTraceLogEnabled(). This code
+ // deals with the case of a MDP capable of fast polling that is registered
+ // after the OnTraceLogEnabled()
+ if (options.is_fast_polling_supported && dump_thread_) {
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::RegisterPollingMDPOnDumpThread,
+ Unretained(this), mdpinfo));
+ }
}
if (heap_profiling_enabled_)
@@ -321,9 +371,18 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
// - At the end of this function, if no dump is in progress.
// - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
// removed from |pending_dump_providers|.
+ // - When the provider is removed from |dump_providers_for_polling_|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
- } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ } else if (strict_thread_check_blacklist_.count((*mdp_iter)->name) == 0 ||
+ subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
+ // If dump provider's name is on |strict_thread_check_blacklist_|, then the
+ // DCHECK is fired only when tracing is enabled. Otherwise the DCHECK is
+ // fired even when tracing is not enabled (stricter).
+ // TODO(ssid): Remove this condition after removing all the dump providers
+ // in the blacklist and the buildbots are no longer flakily hitting the
+ // DCHECK, crbug.com/643438.
+
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
// - The MDP has specified a sequenced task runner affinity AND the
@@ -339,6 +398,13 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
<< "unregister itself in a racy way. Please file a crbug.";
}
+ if ((*mdp_iter)->options.is_fast_polling_supported && dump_thread_) {
+ DCHECK(take_mdp_ownership_and_delete_async);
+ dump_thread_->task_runner()->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManager::UnregisterPollingMDPOnDumpThread,
+ Unretained(this), *mdp_iter));
+ }
+
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
@@ -348,6 +414,20 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
dump_providers_.erase(mdp_iter);
}
+void MemoryDumpManager::RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.insert(mdpinfo);
+}
+
+void MemoryDumpManager::UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpManager::MemoryDumpProviderInfo> mdpinfo) {
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+
+ AutoLock lock(lock_);
+ dump_providers_for_polling_.erase(mdpinfo);
+}
+
void MemoryDumpManager::RequestGlobalDump(
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail,
@@ -601,6 +681,18 @@ void MemoryDumpManager::InvokeOnMemoryDump(
SetupNextMemoryDump(std::move(pmd_async_state));
}
+void MemoryDumpManager::PollFastMemoryTotal(uint64_t* memory_total) {
+ *memory_total = 0;
+ // Note that we call PollFastMemoryTotal() even if the dump provider is
+ // disabled (unregistered). This is to avoid taking lock while polling.
+ for (const auto& mdpinfo : dump_providers_for_polling_) {
+ uint64_t value = 0;
+ mdpinfo->dump_provider->PollFastMemoryTotal(&value);
+ *memory_total += value;
+ }
+ return;
+}
+
// static
void MemoryDumpManager::FinalizeDumpAndAddToTrace(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
@@ -714,16 +806,16 @@ void MemoryDumpManager::OnTraceLogEnabled() {
DCHECK(!dump_thread_);
dump_thread_ = std::move(dump_thread);
+ dump_providers_for_polling_.clear();
+ for (const auto& mdpinfo : dump_providers_) {
+ if (mdpinfo->options.is_fast_polling_supported)
+ dump_providers_for_polling_.insert(mdpinfo);
+ }
+
subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
- // TODO(primiano): This is a temporary hack to disable periodic memory dumps
- // when running memory benchmarks until telemetry uses TraceConfig to
- // enable/disable periodic dumps. See crbug.com/529184 .
- if (!is_coordinator_ ||
- CommandLine::ForCurrentProcess()->HasSwitch(
- "enable-memory-benchmarking")) {
+ if (!is_coordinator_)
return;
- }
}
// Enable periodic dumps if necessary.
@@ -734,6 +826,8 @@ void MemoryDumpManager::OnTraceLogDisabled() {
// There might be a memory dump in progress while this happens. Therefore,
// ensure that the MDM state which depends on the tracing enabled / disabled
// state is always accessed by the dumping methods holding the |lock_|.
+ if (!subtle::NoBarrier_Load(&memory_tracing_enabled_))
+ return;
subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
std::unique_ptr<Thread> dump_thread;
{
@@ -747,6 +841,15 @@ void MemoryDumpManager::OnTraceLogDisabled() {
periodic_dump_timer_.Stop();
if (dump_thread)
dump_thread->Stop();
+
+ // |dump_providers_for_polling_| must be cleared only after the dump thread is
+ // stopped (polling tasks are done).
+ {
+ AutoLock lock(lock_);
+ for (const auto& mdpinfo : dump_providers_for_polling_)
+ mdpinfo->dump_provider->SuspendFastMemoryPolling();
+ dump_providers_for_polling_.clear();
+ }
}
bool MemoryDumpManager::IsDumpModeAllowed(MemoryDumpLevelOfDetail dump_mode) {
@@ -841,7 +944,9 @@ void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
DCHECK_LE(triggers_list.size(), 3u);
auto* mdm = MemoryDumpManager::GetInstance();
for (const TraceConfig::MemoryDumpConfig::Trigger& config : triggers_list) {
- DCHECK_NE(0u, config.periodic_interval_ms);
+ DCHECK_NE(0u, config.min_time_between_dumps_ms);
+ DCHECK_EQ(MemoryDumpType::PERIODIC_INTERVAL, config.trigger_type)
+ << "Only periodic_interval triggers are suppported";
switch (config.level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::BACKGROUND));
@@ -849,16 +954,16 @@ void MemoryDumpManager::PeriodicGlobalDumpTimer::Start(
case MemoryDumpLevelOfDetail::LIGHT:
DCHECK_EQ(0u, light_dump_period_ms);
DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::LIGHT));
- light_dump_period_ms = config.periodic_interval_ms;
+ light_dump_period_ms = config.min_time_between_dumps_ms;
break;
case MemoryDumpLevelOfDetail::DETAILED:
DCHECK_EQ(0u, heavy_dump_period_ms);
DCHECK(mdm->IsDumpModeAllowed(MemoryDumpLevelOfDetail::DETAILED));
- heavy_dump_period_ms = config.periodic_interval_ms;
+ heavy_dump_period_ms = config.min_time_between_dumps_ms;
break;
}
min_timer_period_ms =
- std::min(min_timer_period_ms, config.periodic_interval_ms);
+ std::min(min_timer_period_ms, config.min_time_between_dumps_ms);
}
DCHECK_EQ(0u, light_dump_period_ms % min_timer_period_ms);
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 06b772c6e4b..61d1dd085da 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -94,7 +94,8 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// This method takes ownership of the dump provider and guarantees that:
// - The |mdp| will be deleted at some point in the near future.
// - Its deletion will not happen concurrently with the OnMemoryDump() call.
- // Note that OnMemoryDump() calls can still happen after this method returns.
+ // Note that OnMemoryDump() and PollFastMemoryTotal() calls can still happen
+ // after this method returns.
void UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp);
@@ -329,6 +330,13 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
// runner.
void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+ // Records a quick total memory usage in |memory_total|. This is used to track
+ // and detect peaks in the memory usage of the process without having to
+ // record all data from dump providers. This value is approximate to trade-off
+ // speed, and not consistent with the rest of the memory-infra metrics. Must
+ // be called on the dump thread.
+ void PollFastMemoryTotal(uint64_t* memory_total);
+
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
@@ -340,13 +348,29 @@ class BASE_EXPORT MemoryDumpManager : public TraceLog::EnabledStateObserver {
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
+ // Adds / removes provider that supports polling to
+ // |dump_providers_for_polling_|.
+ void RegisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+ void UnregisterPollingMDPOnDumpThread(
+ scoped_refptr<MemoryDumpProviderInfo> mdpinfo);
+
// An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
+ // A copy of mdpinfo list that support polling. It must be accessed only on
+ // the dump thread if dump thread exists.
+ MemoryDumpProviderInfo::OrderedSet dump_providers_for_polling_;
+
// Shared among all the PMDs to keep state scoped to the tracing session.
scoped_refptr<MemoryDumpSessionState> session_state_;
+ // The list of names of dump providers that are blacklisted from strict thread
+ // affinity check on unregistration.
+ std::unordered_set<StringPiece, StringPieceHash>
+ strict_thread_check_blacklist_;
+
MemoryDumpManagerDelegate* delegate_; // Not owned.
// When true, this instance is in charge of coordinating periodic dumps.
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 3ea8ac28fb8..e8c33af3e06 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -72,8 +72,10 @@ void RegisterDumpProvider(
mdm->set_dumper_registrations_ignored_for_testing(true);
}
-void RegisterDumpProvider(MemoryDumpProvider* mdp) {
- RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
+void RegisterDumpProvider(
+ MemoryDumpProvider* mdp,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ RegisterDumpProvider(mdp, task_runner, MemoryDumpProvider::Options());
}
void RegisterDumpProviderWithSequencedTaskRunner(
@@ -140,6 +142,8 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
MOCK_METHOD0(Destructor, void());
MOCK_METHOD2(OnMemoryDump,
bool(const MemoryDumpArgs& args, ProcessMemoryDump* pmd));
+ MOCK_METHOD1(PollFastMemoryTotal, void(uint64_t* memory_total));
+ MOCK_METHOD0(SuspendFastMemoryPolling, void());
MockMemoryDumpProvider() : enable_mock_destructor(false) {
ON_CALL(*this, OnMemoryDump(_, _))
@@ -151,6 +155,10 @@ class MockMemoryDumpProvider : public MemoryDumpProvider {
EXPECT_TRUE(pmd->session_state().get() != nullptr);
return true;
}));
+
+ ON_CALL(*this, PollFastMemoryTotal(_))
+ .WillByDefault(
+ Invoke([](uint64_t* memory_total) -> void { NOTREACHED(); }));
}
~MockMemoryDumpProvider() override {
if (enable_mock_destructor)
@@ -232,6 +240,10 @@ class MemoryDumpManagerTest : public testing::Test {
task_runner->PostTask(FROM_HERE, closure);
}
+ void PollFastMemoryTotal(uint64_t* memory_total) {
+ mdm_->PollFastMemoryTotal(memory_total);
+ }
+
protected:
void InitializeMemoryDumpManager(bool is_coordinator) {
mdm_->set_dumper_registrations_ignored_for_testing(true);
@@ -268,6 +280,10 @@ class MemoryDumpManagerTest : public testing::Test {
return MemoryDumpManager::kMaxConsecutiveFailuresCount;
}
+ scoped_refptr<SequencedTaskRunner> GetPollingTaskRunnerUnsafe() {
+ return mdm_->dump_thread_->task_runner();
+ }
+
const MemoryDumpProvider::Options kDefaultOptions;
std::unique_ptr<MemoryDumpManager> mdm_;
std::unique_ptr<MemoryDumpManagerDelegateForTesting> delegate_;
@@ -285,7 +301,7 @@ class MemoryDumpManagerTest : public testing::Test {
TEST_F(MemoryDumpManagerTest, SingleDumper) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
// Check that the dumper is not called if the memory category is not enabled.
EnableTracingWithLegacyCategories("foobar-but-not-memory");
@@ -326,7 +342,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsDetailedDump(), _)).WillOnce(Return(true));
@@ -337,7 +353,7 @@ TEST_F(MemoryDumpManagerTest, CheckMemoryDumpArgs) {
// Check that requesting dumps with low level of detail actually propagates to
// OnMemoryDump() call on dump providers.
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp, OnMemoryDump(IsLightDump(), _)).WillOnce(Return(true));
@@ -352,8 +368,8 @@ TEST_F(MemoryDumpManagerTest, SharedSessionState) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const MemoryDumpSessionState* session_state =
@@ -389,7 +405,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
MockMemoryDumpProvider mdp2;
// Enable only mdp1.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, ThreadTaskRunnerHandle::Get());
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -400,7 +416,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
// Invert: enable mdp1 and disable mdp2.
mdm_->UnregisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -410,7 +426,7 @@ TEST_F(MemoryDumpManagerTest, MultipleDumpers) {
DisableTracing();
// Enable both mdp1 and mdp2.
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).WillOnce(Return(true));
@@ -426,7 +442,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -448,7 +464,7 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
{
@@ -460,9 +476,9 @@ TEST_F(MemoryDumpManagerTest, RegistrationConsistency) {
DisableTracing();
}
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
mdm_->UnregisterDumpProvider(&mdp);
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, ThreadTaskRunnerHandle::Get());
{
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(1);
@@ -584,8 +600,8 @@ TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp1, nullptr);
+ RegisterDumpProvider(&mdp2, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
const int kNumDumps = 2 * GetMaxConsecutiveFailuresCount();
@@ -618,7 +634,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
MockMemoryDumpProvider mdp1;
MockMemoryDumpProvider mdp2;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(4);
@@ -628,7 +644,7 @@ TEST_F(MemoryDumpManagerTest, RegisterDumperWhileDumping) {
.WillOnce(Return(true))
.WillOnce(
Invoke([&mdp2](const MemoryDumpArgs&, ProcessMemoryDump*) -> bool {
- RegisterDumpProvider(&mdp2);
+ RegisterDumpProvider(&mdp2, nullptr);
return true;
}))
.WillRepeatedly(Return(true));
@@ -736,6 +752,64 @@ TEST_F(MemoryDumpManagerTest, UnregisterDumperFromThreadWhileDumping) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestPollingOnDumpThread) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider());
+ std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider());
+ mdp1->enable_mock_destructor = true;
+ mdp2->enable_mock_destructor = true;
+
+ EXPECT_CALL(*mdp1, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp2, SuspendFastMemoryPolling()).Times(1);
+ EXPECT_CALL(*mdp1, Destructor());
+ EXPECT_CALL(*mdp2, Destructor());
+
+ RunLoop run_loop;
+ scoped_refptr<SingleThreadTaskRunner> test_task_runner =
+ ThreadTaskRunnerHandle::Get();
+ auto quit_closure = run_loop.QuitClosure();
+
+ int call_count = 0;
+ EXPECT_CALL(*mdp1, PollFastMemoryTotal(_))
+ .Times(4)
+ .WillRepeatedly(Invoke([&call_count, &test_task_runner,
+ quit_closure](uint64_t* total) -> void {
+ ++call_count;
+ if (call_count == 4)
+ test_task_runner->PostTask(FROM_HERE, quit_closure);
+ }));
+
+ // Depending on the order of PostTask calls the mdp2 might be registered after
+ // all polls or in between polls.
+ EXPECT_CALL(*mdp2, PollFastMemoryTotal(_))
+ .Times(Between(0, 4))
+ .WillRepeatedly(Return());
+
+ MemoryDumpProvider::Options options;
+ options.is_fast_polling_supported = true;
+ RegisterDumpProvider(mdp1.get(), nullptr, options);
+ EnableTracingWithTraceConfig(
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(1));
+ scoped_refptr<SequencedTaskRunner> polling_task_runner =
+ GetPollingTaskRunnerUnsafe().get();
+ ASSERT_TRUE(polling_task_runner);
+
+ uint64_t value = 0;
+ for (int i = 0; i < 4; i++) {
+ if (i == 0)
+ RegisterDumpProvider(mdp2.get(), nullptr, options);
+ if (i == 2)
+ mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp2));
+ polling_task_runner->PostTask(
+ FROM_HERE, Bind(&MemoryDumpManagerTest::PollFastMemoryTotal,
+ Unretained(this), &value));
+ }
+
+ run_loop.Run();
+ DisableTracing();
+ mdm_->UnregisterAndDeleteDumpProviderSoon(std::move(mdp1));
+}
+
// If a thread (with a dump provider living on it) is torn down during a dump
// its dump provider should be skipped but the dump itself should succeed.
TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
@@ -793,7 +867,7 @@ TEST_F(MemoryDumpManagerTest, TearDownThreadWhileDumping) {
TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
InitializeMemoryDumpManager(false /* is_coordinator */);
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(0);
EXPECT_CALL(mdp1, OnMemoryDump(_, _)).Times(0);
@@ -808,7 +882,7 @@ TEST_F(MemoryDumpManagerTest, CallbackCalledOnFailure) {
// began, it will still late-join the party (real use case: startup tracing).
TEST_F(MemoryDumpManagerTest, InitializedAfterStartOfTracing) {
MockMemoryDumpProvider mdp;
- RegisterDumpProvider(&mdp);
+ RegisterDumpProvider(&mdp, nullptr);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
// First check that a RequestGlobalDump() issued before the MemoryDumpManager
@@ -991,7 +1065,7 @@ TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
// Create both same-thread MDP and another MDP with dedicated thread
MockMemoryDumpProvider mdp1;
- RegisterDumpProvider(&mdp1);
+ RegisterDumpProvider(&mdp1, nullptr);
MockMemoryDumpProvider mdp2;
RegisterDumpProvider(&mdp2, mdp_thread->task_runner(), kDefaultOptions);
EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
@@ -1141,7 +1215,7 @@ TEST_F(MemoryDumpManagerTest, TestWhitelistingMDP) {
InitializeMemoryDumpManager(false /* is_coordinator */);
SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
std::unique_ptr<MockMemoryDumpProvider> mdp1(new MockMemoryDumpProvider);
- RegisterDumpProvider(mdp1.get());
+ RegisterDumpProvider(mdp1.get(), nullptr);
std::unique_ptr<MockMemoryDumpProvider> mdp2(new MockMemoryDumpProvider);
RegisterDumpProvider(mdp2.get(), nullptr, kDefaultOptions,
kWhitelistedMDPName);
@@ -1192,5 +1266,22 @@ TEST_F(MemoryDumpManagerTest, TestBackgroundTracingSetup) {
DisableTracing();
}
+TEST_F(MemoryDumpManagerTest, TestBlacklistedUnsafeUnregistration) {
+ InitializeMemoryDumpManager(false /* is_coordinator */);
+ MockMemoryDumpProvider mdp1;
+ RegisterDumpProvider(&mdp1, nullptr, kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Not calling UnregisterAndDeleteDumpProviderSoon() should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+
+ Thread thread("test thread");
+ thread.Start();
+ RegisterDumpProvider(&mdp1, thread.task_runner(), kDefaultOptions,
+ "BlacklistTestDumpProvider");
+ // Unregistering on wrong thread should not crash.
+ mdm_->UnregisterDumpProvider(&mdp1);
+ thread.Stop();
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index c899ea9c346..244319efa7b 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -22,7 +22,8 @@ class BASE_EXPORT MemoryDumpProvider {
struct Options {
Options()
: target_pid(kNullProcessId),
- dumps_on_single_thread_task_runner(false) {}
+ dumps_on_single_thread_task_runner(false),
+ is_fast_polling_supported(false) {}
// If the dump provider generates dumps on behalf of another process,
// |target_pid| contains the pid of that process.
@@ -34,6 +35,11 @@ class BASE_EXPORT MemoryDumpProvider {
// a SingleThreadTaskRunner, which is usually the case. It is faster to run
// all providers that run on the same thread together without thread hops.
bool dumps_on_single_thread_task_runner;
+
+ // Set to true if the dump provider implementation supports high frequency
+ // polling. Only providers running without task runner affinity are
+ // supported.
+ bool is_fast_polling_supported;
};
virtual ~MemoryDumpProvider() {}
@@ -52,6 +58,18 @@ class BASE_EXPORT MemoryDumpProvider {
// collecting extensive allocation data, if supported.
virtual void OnHeapProfilingEnabled(bool enabled) {}
+ // Quickly record the total memory usage in |memory_total|. This method will
+ // be called only when the dump provider registration has
+ // |is_fast_polling_supported| set to true. This method is used for polling at
+ // high frequency for detecting peaks. See comment on
+ // |is_fast_polling_supported| option if you need to override this method.
+ virtual void PollFastMemoryTotal(uint64_t* memory_total) {}
+
+ // Indicates that fast memory polling is not going to be used in the near
+ // future and the MDP can tear down any resource kept around for fast memory
+ // polling.
+ virtual void SuspendFastMemoryPolling() {}
+
protected:
MemoryDumpProvider() {}
diff --git a/chromium/base/trace_event/memory_dump_request_args.cc b/chromium/base/trace_event/memory_dump_request_args.cc
index e6c5b87b226..bf72bef5e4d 100644
--- a/chromium/base/trace_event/memory_dump_request_args.cc
+++ b/chromium/base/trace_event/memory_dump_request_args.cc
@@ -12,19 +12,28 @@ namespace trace_event {
// static
const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
- case MemoryDumpType::TASK_BEGIN:
- return "task_begin";
- case MemoryDumpType::TASK_END:
- return "task_end";
case MemoryDumpType::PERIODIC_INTERVAL:
return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
return "explicitly_triggered";
+ case MemoryDumpType::PEAK_MEMORY_USAGE:
+ return "peak_memory_usage";
}
NOTREACHED();
return "unknown";
}
+MemoryDumpType StringToMemoryDumpType(const std::string& str) {
+ if (str == "periodic_interval")
+ return MemoryDumpType::PERIODIC_INTERVAL;
+ if (str == "explicitly_triggered")
+ return MemoryDumpType::EXPLICITLY_TRIGGERED;
+ if (str == "peak_memory_usage")
+ return MemoryDumpType::PEAK_MEMORY_USAGE;
+ NOTREACHED();
+ return MemoryDumpType::LAST;
+}
+
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index f3ff9d8e3b5..69aa3ec570e 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -18,16 +18,19 @@ namespace base {
namespace trace_event {
// Captures the reason why a memory dump is being requested. This is to allow
-// selective enabling of dumps, filtering and post-processing.
+// selective enabling of dumps, filtering and post-processing. Important: this
+// must be kept consistent with
+// services/memory_infra/public/cpp/memory_infra_traits.cc.
enum class MemoryDumpType {
- TASK_BEGIN, // Dumping memory at the beginning of a message-loop task.
- TASK_END, // Dumping memory at the ending of a message-loop task.
- PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
+ PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
- LAST = EXPLICITLY_TRIGGERED // For IPC macros.
+ PEAK_MEMORY_USAGE, // Dumping memory at detected peak total memory usage.
+ LAST = PEAK_MEMORY_USAGE // For IPC macros.
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
+// Important: this must be kept consistent with
+// services/memory_infra/public/cpp/memory_infra_traits.cc.
enum class MemoryDumpLevelOfDetail : uint32_t {
FIRST,
@@ -50,7 +53,8 @@ enum class MemoryDumpLevelOfDetail : uint32_t {
};
// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()).
+// MemoryDumpManager::RequestGlobalMemoryDump()). Important: this must be kept
+// consistent with services/memory_infra/public/cpp/memory_infra_traits.cc.
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
@@ -72,6 +76,8 @@ using MemoryDumpCallback = Callback<void(uint64_t dump_guid, bool success)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
+BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
+
BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail);
diff --git a/chromium/base/trace_event/memory_dump_session_state.cc b/chromium/base/trace_event/memory_dump_session_state.cc
index b3d9a8ccfc8..3e3bedbb298 100644
--- a/chromium/base/trace_event/memory_dump_session_state.cc
+++ b/chromium/base/trace_event/memory_dump_session_state.cc
@@ -7,7 +7,7 @@
namespace base {
namespace trace_event {
-MemoryDumpSessionState::MemoryDumpSessionState() {}
+MemoryDumpSessionState::MemoryDumpSessionState() : is_polling_enabled_(false) {}
MemoryDumpSessionState::~MemoryDumpSessionState() {}
@@ -26,6 +26,10 @@ void MemoryDumpSessionState::SetTypeNameDeduplicator(
void MemoryDumpSessionState::SetMemoryDumpConfig(
const TraceConfig::MemoryDumpConfig& config) {
memory_dump_config_ = config;
+ for (const auto& trigger : config.triggers) {
+ if (trigger.trigger_type == MemoryDumpType::PEAK_MEMORY_USAGE)
+ is_polling_enabled_ = true;
+ }
}
} // namespace trace_event
diff --git a/chromium/base/trace_event/memory_dump_session_state.h b/chromium/base/trace_event/memory_dump_session_state.h
index f199ec1a2f9..29a2da9b9f5 100644
--- a/chromium/base/trace_event/memory_dump_session_state.h
+++ b/chromium/base/trace_event/memory_dump_session_state.h
@@ -46,6 +46,8 @@ class BASE_EXPORT MemoryDumpSessionState
void SetMemoryDumpConfig(const TraceConfig::MemoryDumpConfig& config);
+ bool is_polling_enabled() { return is_polling_enabled_; }
+
private:
friend class RefCountedThreadSafe<MemoryDumpSessionState>;
~MemoryDumpSessionState();
@@ -61,6 +63,9 @@ class BASE_EXPORT MemoryDumpSessionState
// The memory dump config, copied at the time when the tracing session was
// started.
TraceConfig::MemoryDumpConfig memory_dump_config_;
+
+ // True if memory polling is enabled by the config in the tracing session.
+ bool is_polling_enabled_;
};
} // namespace trace_event
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index 21bff690456..ea37739430c 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -34,6 +34,7 @@ const char* const kDumpProviderWhitelist[] = {
"Sql",
"V8Isolate",
"WinHeap",
+ "SyncDirectory",
nullptr // End of list marker.
};
@@ -50,12 +51,6 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"java_heap/allocated_objects",
"leveldb/index_db/0x?",
"leveldb/leveldb_proto/0x?",
- "leveldb/leveldb_proto/BudgetManager/0x?",
- "leveldb/leveldb_proto/DomDistillerStore/0x?",
- "leveldb/leveldb_proto/GCMKeyStore/0x?",
- "leveldb/leveldb_proto/ImageManager/0x?",
- "leveldb/leveldb_proto/NTPSnippetImages/0x?",
- "leveldb/leveldb_proto/NTPSnippets/0x?",
"leveldb/value_store/Extensions.Database.Open.Settings/0x?",
"leveldb/value_store/Extensions.Database.Open.Rules/0x?",
"leveldb/value_store/Extensions.Database.Open.State/0x?",
@@ -91,6 +86,8 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"v8/isolate_0x?/zapped_for_debug",
"winheap",
"winheap/allocated_objects",
+ "sync/0x?/kernel",
+ "sync/0x?/store",
nullptr // End of list marker.
};
diff --git a/chromium/base/trace_event/memory_usage_estimator.h b/chromium/base/trace_event/memory_usage_estimator.h
index c089b0ee614..4f230c7b389 100644
--- a/chromium/base/trace_event/memory_usage_estimator.h
+++ b/chromium/base/trace_event/memory_usage_estimator.h
@@ -92,6 +92,11 @@ template <class T, class D>
size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
size_t array_length);
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
+
// Containers
template <class F, class S>
@@ -276,6 +281,28 @@ size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
return EstimateMemoryUsage(array.get(), array_length);
}
+// std::shared_ptr
+
+template <class T>
+size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
+ auto use_count = ptr.use_count();
+ if (use_count == 0) {
+ return 0;
+ }
+ // Model shared_ptr after libc++,
+ // see __shared_ptr_pointer from include/memory
+ struct SharedPointer {
+ void* vtbl;
+ long shared_owners;
+ long shared_weak_owners;
+ T* value;
+ };
+ // If object of size S shared N > S times we prefer to (potentially)
+ // overestimate than to return 0.
+ return sizeof(SharedPointer) +
+ (EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
+}
+
// std::pair
template <class F, class S>
diff --git a/chromium/base/trace_event/trace_category_unittest.cc b/chromium/base/trace_event/trace_category_unittest.cc
index 6fc9bb3dc5d..a33924f4ffe 100644
--- a/chromium/base/trace_event/trace_category_unittest.cc
+++ b/chromium/base/trace_event/trace_category_unittest.cc
@@ -7,6 +7,8 @@
#include <memory>
#include "base/bind.h"
+#include "base/lazy_instance.h"
+#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "base/trace_event/category_registry.h"
@@ -33,7 +35,15 @@ class TraceCategoryTest : public testing::Test {
void TearDown() override { CategoryRegistry::ResetForTesting(); }
static bool GetOrCreateCategoryByName(const char* name, TraceCategory** cat) {
- return CategoryRegistry::GetOrCreateCategoryByName(name, cat);
+ static LazyInstance<Lock>::Leaky g_lock = LAZY_INSTANCE_INITIALIZER;
+ bool is_new_cat = false;
+ *cat = CategoryRegistry::GetCategoryByName(name);
+ if (!*cat) {
+ AutoLock lock(g_lock.Get());
+ is_new_cat = CategoryRegistry::GetOrCreateCategoryLocked(
+ name, [](TraceCategory*) {}, cat);
+ }
+ return is_new_cat;
};
static CategoryRegistry::Range GetAllCategories() {
@@ -60,7 +70,7 @@ TEST_F(TraceCategoryTest, Basic) {
ASSERT_EQ(CategoryRegistry::kCategoryMetadata, cat_meta);
TraceCategory* cat_1 = nullptr;
- ASSERT_TRUE(GetOrCreateCategoryByName("__test_ab", &cat_1));
+ ASSERT_TRUE(GetOrCreateCategoryByName("__test_basic_ab", &cat_1));
ASSERT_FALSE(cat_1->is_enabled());
ASSERT_EQ(0u, cat_1->enabled_filters());
cat_1->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
@@ -79,16 +89,17 @@ TEST_F(TraceCategoryTest, Basic) {
ASSERT_TRUE(cat_1->is_enabled());
TraceCategory* cat_2 = nullptr;
- ASSERT_TRUE(GetOrCreateCategoryByName("__test_a", &cat_2));
+ ASSERT_TRUE(GetOrCreateCategoryByName("__test_basic_a", &cat_2));
ASSERT_FALSE(cat_2->is_enabled());
cat_2->set_state_flag(TraceCategory::ENABLED_FOR_RECORDING);
TraceCategory* cat_2_copy = nullptr;
- ASSERT_FALSE(GetOrCreateCategoryByName("__test_a", &cat_2_copy));
+ ASSERT_FALSE(GetOrCreateCategoryByName("__test_basic_a", &cat_2_copy));
ASSERT_EQ(cat_2, cat_2_copy);
TraceCategory* cat_3 = nullptr;
- ASSERT_TRUE(GetOrCreateCategoryByName("__test_ab,__test_a", &cat_3));
+ ASSERT_TRUE(
+ GetOrCreateCategoryByName("__test_basic_ab,__test_basic_a", &cat_3));
ASSERT_FALSE(cat_3->is_enabled());
ASSERT_EQ(0u, cat_3->enabled_filters());
@@ -97,7 +108,7 @@ TEST_F(TraceCategoryTest, Basic) {
if (strcmp(cat.name(), kMetadataName) == 0)
ASSERT_TRUE(CategoryRegistry::IsBuiltinCategory(&cat));
- if (strncmp(cat.name(), "__test", 6) == 0) {
+ if (strncmp(cat.name(), "__test_basic_", 13) == 0) {
ASSERT_FALSE(CategoryRegistry::IsBuiltinCategory(&cat));
num_test_categories_seen++;
}
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 9a17adb969a..36de107bf8b 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -48,8 +48,10 @@ const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
const char kMemoryDumpConfigParam[] = "memory_dump_config";
const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
-const char kPeriodicIntervalParam[] = "periodic_interval_ms";
-const char kModeParam[] = "mode";
+const char kTriggerModeParam[] = "mode";
+const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
+const char kTriggerTypeParam[] = "type";
+const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
const char kHeapProfilerOptions[] = "heap_profiler_options";
const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
@@ -60,11 +62,11 @@ const char kFilterArgsParam[] = "filter_args";
// Default configuration of memory dumps.
const TraceConfig::MemoryDumpConfig::Trigger kDefaultHeavyMemoryDumpTrigger = {
- 2000, // periodic_interval_ms
- MemoryDumpLevelOfDetail::DETAILED};
+ 2000, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::DETAILED, MemoryDumpType::PERIODIC_INTERVAL};
const TraceConfig::MemoryDumpConfig::Trigger kDefaultLightMemoryDumpTrigger = {
- 250, // periodic_interval_ms
- MemoryDumpLevelOfDetail::LIGHT};
+ 250, // min_time_between_dumps_ms
+ MemoryDumpLevelOfDetail::LIGHT, MemoryDumpType::PERIODIC_INTERVAL};
class ConvertableTraceConfigToTraceFormat
: public base::trace_event::ConvertableToTraceFormat {
@@ -169,6 +171,20 @@ void TraceConfig::EventFilterConfig::SetArgs(
args_ = std::move(args);
}
+bool TraceConfig::EventFilterConfig::GetArgAsSet(
+ const char* key,
+ std::unordered_set<std::string>* out_set) const {
+ const ListValue* list = nullptr;
+ if (!args_->GetList(key, &list))
+ return false;
+ for (size_t i = 0; i < list->GetSize(); ++i) {
+ std::string value;
+ if (list->GetString(i, &value))
+ out_set->insert(value);
+ }
+ return true;
+}
+
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
const char* category_group_name) const {
CStringTokenizer category_group_tokens(
@@ -592,17 +608,26 @@ void TraceConfig::SetMemoryDumpConfigFromConfigDict(
if (!trigger_list->GetDictionary(i, &trigger))
continue;
+ MemoryDumpConfig::Trigger dump_config;
int interval = 0;
- if (!trigger->GetInteger(kPeriodicIntervalParam, &interval))
- continue;
-
+ if (!trigger->GetInteger(kMinTimeBetweenDumps, &interval)) {
+ // If "min_time_between_dumps_ms" param was not given, then the trace
+ // config uses old format where only periodic dumps are supported.
+ trigger->GetInteger(kPeriodicIntervalLegacyParam, &interval);
+ dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
+ } else {
+ std::string trigger_type_str;
+ trigger->GetString(kTriggerTypeParam, &trigger_type_str);
+ dump_config.trigger_type = StringToMemoryDumpType(trigger_type_str);
+ }
DCHECK_GT(interval, 0);
- MemoryDumpConfig::Trigger dump_config;
- dump_config.periodic_interval_ms = static_cast<uint32_t>(interval);
+ dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(interval);
+
std::string level_of_detail_str;
- trigger->GetString(kModeParam, &level_of_detail_str);
+ trigger->GetString(kTriggerModeParam, &level_of_detail_str);
dump_config.level_of_detail =
StringToMemoryDumpLevelOfDetail(level_of_detail_str);
+
memory_dump_config_.triggers.push_back(dump_config);
}
}
@@ -751,10 +776,14 @@ std::unique_ptr<DictionaryValue> TraceConfig::ToDict() const {
auto triggers_list = MakeUnique<ListValue>();
for (const auto& config : memory_dump_config_.triggers) {
auto trigger_dict = MakeUnique<DictionaryValue>();
- trigger_dict->SetInteger(kPeriodicIntervalParam,
- static_cast<int>(config.periodic_interval_ms));
+ trigger_dict->SetString(kTriggerTypeParam,
+ MemoryDumpTypeToString(config.trigger_type));
+ trigger_dict->SetInteger(
+ kMinTimeBetweenDumps,
+ static_cast<int>(config.min_time_between_dumps_ms));
trigger_dict->SetString(
- kModeParam, MemoryDumpLevelOfDetailToString(config.level_of_detail));
+ kTriggerModeParam,
+ MemoryDumpLevelOfDetailToString(config.level_of_detail));
triggers_list->Append(std::move(trigger_dict));
}
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index c10ed47f5be..717c2613169 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -7,8 +7,10 @@
#include <stdint.h>
+#include <memory>
#include <set>
#include <string>
+#include <unordered_set>
#include <vector>
#include "base/base_export.h"
@@ -51,8 +53,9 @@ class BASE_EXPORT TraceConfig {
// Specifies the triggers in the memory dump config.
struct Trigger {
- uint32_t periodic_interval_ms;
+ uint32_t min_time_between_dumps_ms;
MemoryDumpLevelOfDetail level_of_detail;
+ MemoryDumpType trigger_type;
};
// Specifies the configuration options for the heap profiler.
@@ -82,7 +85,7 @@ class BASE_EXPORT TraceConfig {
HeapProfiler heap_profiler_options;
};
- class EventFilterConfig {
+ class BASE_EXPORT EventFilterConfig {
public:
EventFilterConfig(const std::string& predicate_name);
EventFilterConfig(const EventFilterConfig& tc);
@@ -94,6 +97,7 @@ class BASE_EXPORT TraceConfig {
void AddIncludedCategory(const std::string& category);
void AddExcludedCategory(const std::string& category);
void SetArgs(std::unique_ptr<base::DictionaryValue> args);
+ bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
bool IsCategoryGroupEnabled(const char* category_group_name) const;
diff --git a/chromium/base/trace_event/trace_config_memory_test_util.h b/chromium/base/trace_event/trace_config_memory_test_util.h
index 0ecdb3c15ca..744e8a8acc1 100644
--- a/chromium/base/trace_event/trace_config_memory_test_util.h
+++ b/chromium/base/trace_event/trace_config_memory_test_util.h
@@ -13,83 +13,144 @@ namespace trace_event {
class TraceConfigMemoryTestUtil {
public:
+ static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
+ int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"mode\":\"light\","
+ "\"periodic_interval_ms\":%d"
+ "},"
+ "{"
+ "\"mode\":\"detailed\","
+ "\"periodic_interval_ms\":%d"
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ ;
+ }
+
static std::string GetTraceConfig_PeriodicTriggers(int light_period,
int heavy_period) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"heap_profiler_options\":{"
- "\"breakdown_threshold_bytes\":2048"
- "},"
- "\"triggers\":["
- "{"
- "\"mode\":\"light\","
- "\"periodic_interval_ms\":%d"
- "},"
- "{"
- "\"mode\":\"detailed\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, light_period, heavy_period);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"heap_profiler_options\":{"
+ "\"breakdown_threshold_bytes\":2048"
+ "},"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, light_period, heavy_period);
}
static std::string GetTraceConfig_EmptyTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
- "\"triggers\":["
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_NoTriggers() {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
return StringPrintf(
"{"
- "\"enable_argument_filter\":false,"
- "\"enable_systrace\":false,"
- "\"included_categories\":["
- "\"%s\""
- "],"
- "\"memory_dump_config\":{"
- "\"allowed_dump_modes\":[\"background\"],"
- "\"triggers\":["
- "{"
- "\"mode\":\"background\","
- "\"periodic_interval_ms\":%d"
- "}"
- "]"
- "},"
- "\"record_mode\":\"record-until-full\""
- "}", MemoryDumpManager::kTraceCategory, period_ms);
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"background\","
+ "\"type\":\"periodic_interval\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, period_ms);
+ }
+
+ static std::string GetTraceConfig_PeakDetectionTrigger(int heavy_period) {
+ return StringPrintf(
+ "{"
+ "\"enable_argument_filter\":false,"
+ "\"enable_systrace\":false,"
+ "\"included_categories\":["
+ "\"%s\""
+ "],"
+ "\"memory_dump_config\":{"
+ "\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
+ "\"triggers\":["
+ "{"
+ "\"min_time_between_dumps_ms\":%d,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
+ "]"
+ "},"
+ "\"record_mode\":\"record-until-full\""
+ "}",
+ MemoryDumpManager::kTraceCategory, heavy_period);
}
};
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index c04029909cc..74aa7bdc63a 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -37,7 +37,8 @@ const char kCustomTraceConfigString[] =
"}"
"],"
"\"excluded_categories\":[\"excluded\",\"exc_pattern*\"],"
- "\"included_categories\":[\"included\","
+ "\"included_categories\":["
+ "\"included\","
"\"inc_pattern*\","
"\"disabled-by-default-cc\","
"\"disabled-by-default-memory-infra\"],"
@@ -47,8 +48,16 @@ const char kCustomTraceConfigString[] =
"\"breakdown_threshold_bytes\":10240"
"},"
"\"triggers\":["
- "{\"mode\":\"light\",\"periodic_interval_ms\":50},"
- "{\"mode\":\"detailed\",\"periodic_interval_ms\":1000}"
+ "{"
+ "\"min_time_between_dumps_ms\":50,"
+ "\"mode\":\"light\","
+ "\"type\":\"periodic_interval\""
+ "},"
+ "{"
+ "\"min_time_between_dumps_ms\":1000,"
+ "\"mode\":\"detailed\","
+ "\"type\":\"peak_memory_usage\""
+ "}"
"]"
"},"
"\"record_mode\":\"record-continuously\","
@@ -432,6 +441,11 @@ TEST(TraceConfigTest, TraceConfigFromValidString) {
base::JSONWriter::Write(*event_filter.filter_args(), &json_out);
EXPECT_STREQ(json_out.c_str(),
"{\"event_name_whitelist\":[\"a snake\",\"a dog\"]}");
+ std::unordered_set<std::string> filter_values;
+ EXPECT_TRUE(event_filter.GetArgAsSet("event_name_whitelist", &filter_values));
+ EXPECT_EQ(2u, filter_values.size());
+ EXPECT_EQ(1u, filter_values.count("a snake"));
+ EXPECT_EQ(1u, filter_values.count("a dog"));
const char config_string_2[] = "{\"included_categories\":[\"*\"]}";
TraceConfig tc2(config_string_2);
@@ -617,30 +631,47 @@ TEST(TraceConfigTest, TraceConfigFromMemoryConfigString) {
TraceConfigMemoryTestUtil::GetTraceConfig_PeriodicTriggers(200, 2000);
TraceConfig tc1(tc_str1);
EXPECT_EQ(tc_str1, tc1.ToString());
+ TraceConfig tc2(
+ TraceConfigMemoryTestUtil::GetTraceConfig_LegacyPeriodicTriggers(200,
+ 2000));
+ EXPECT_EQ(tc_str1, tc2.ToString());
+
EXPECT_TRUE(tc1.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
ASSERT_EQ(2u, tc1.memory_dump_config_.triggers.size());
- EXPECT_EQ(200u, tc1.memory_dump_config_.triggers[0].periodic_interval_ms);
+ EXPECT_EQ(200u,
+ tc1.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::LIGHT,
tc1.memory_dump_config_.triggers[0].level_of_detail);
- EXPECT_EQ(2000u, tc1.memory_dump_config_.triggers[1].periodic_interval_ms);
+ EXPECT_EQ(2000u,
+ tc1.memory_dump_config_.triggers[1].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
tc1.memory_dump_config_.triggers[1].level_of_detail);
EXPECT_EQ(
2048u,
tc1.memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
- std::string tc_str2 =
+ std::string tc_str3 =
TraceConfigMemoryTestUtil::GetTraceConfig_BackgroundTrigger(
1 /* period_ms */);
- TraceConfig tc2(tc_str2);
- EXPECT_EQ(tc_str2, tc2.ToString());
- EXPECT_TRUE(tc2.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
- ASSERT_EQ(1u, tc2.memory_dump_config_.triggers.size());
- EXPECT_EQ(1u, tc2.memory_dump_config_.triggers[0].periodic_interval_ms);
+ TraceConfig tc3(tc_str3);
+ EXPECT_EQ(tc_str3, tc3.ToString());
+ EXPECT_TRUE(tc3.IsCategoryGroupEnabled(MemoryDumpManager::kTraceCategory));
+ ASSERT_EQ(1u, tc3.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc3.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
EXPECT_EQ(MemoryDumpLevelOfDetail::BACKGROUND,
- tc2.memory_dump_config_.triggers[0].level_of_detail);
+ tc3.memory_dump_config_.triggers[0].level_of_detail);
+
+ std::string tc_str4 =
+ TraceConfigMemoryTestUtil::GetTraceConfig_PeakDetectionTrigger(
+ 1 /*heavy_period */);
+ TraceConfig tc4(tc_str4);
+ EXPECT_EQ(tc_str4, tc4.ToString());
+ ASSERT_EQ(1u, tc4.memory_dump_config_.triggers.size());
+ EXPECT_EQ(1u, tc4.memory_dump_config_.triggers[0].min_time_between_dumps_ms);
+ EXPECT_EQ(MemoryDumpLevelOfDetail::DETAILED,
+ tc4.memory_dump_config_.triggers[0].level_of_detail);
}
TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
diff --git a/chromium/base/trace_event/trace_event.h b/chromium/base/trace_event/trace_event.h
index 0299ddd0cb7..51e6927cbd5 100644
--- a/chromium/base/trace_event/trace_event.h
+++ b/chromium/base/trace_event/trace_event.h
@@ -43,9 +43,21 @@
trace_event_internal::TraceID::DontMangle(id)
// By default, trace IDs are eventually converted to a single 64-bit number. Use
-// this macro to add a scope string.
-#define TRACE_ID_WITH_SCOPE(scope, id) \
- trace_event_internal::TraceID::WithScope(scope, id)
+// this macro to add a scope string. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
+//
+// Also, it is possible to prepend the ID with another number, like the process
+// ID. This is useful in creatin IDs that are unique among all processes. To do
+// that, pass two numbers after the scope string instead of one. For example,
+//
+// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
+// "network", "ResourceLoad",
+// TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
+#define TRACE_ID_WITH_SCOPE(scope, ...) \
+ trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
@@ -55,14 +67,14 @@
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
- (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
- base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
- base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
-#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_FILTERING_MODE( \
- category_group_enabled) \
- UNLIKELY(category_group_enabled& \
- base::trace_event::TraceCategory::ENABLED_FOR_FILTERING)
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED() \
+ UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
+ (base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
+ base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
+ base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
@@ -171,14 +183,6 @@
#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
base::trace_event::TraceLog::GetInstance()->UpdateTraceEventDuration
-// Call EndEvent on the filter for a filtered event.
-// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
-// const unsigned char* category_group_enabled,
-// const char* name,
-// base::trace_event::TraceEventHandle id)
-#define TRACE_EVENT_API_END_FILTERED_EVENT \
- base::trace_event::TraceLog::GetInstance()->EndFilteredEvent
-
// Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
// on the convertable value will be called at flush time.
// TRACE_EVENT_API_ADD_METADATA_EVENT(
@@ -236,43 +240,43 @@
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- base::trace_event::TraceEventHandle h = \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_COMPLETE, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
- ##__VA_ARGS__); \
- INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
- }
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ base::trace_event::TraceEventHandle h = \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_COMPLETE, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+ TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+ ##__VA_ARGS__); \
+ INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
+ }
-#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW( \
- category_group, name, bind_id, flow_flags, ...) \
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
+ bind_id, flow_flags, ...) \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::TraceID trace_event_bind_id((bind_id)); \
- unsigned int trace_event_flags = flow_flags | \
- trace_event_bind_id.id_flags(); \
+ unsigned int trace_event_flags = \
+ flow_flags | trace_event_bind_id.id_flags(); \
base::trace_event::TraceEventHandle h = \
trace_event_internal::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
@@ -285,20 +289,20 @@
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
- flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::TraceID trace_event_trace_id((id)); \
- unsigned int trace_event_flags = flags | \
- trace_event_trace_id.id_flags(); \
- trace_event_internal::AddTraceEvent( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
- } while (0)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
+ flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
@@ -306,62 +310,62 @@
timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
- TRACE_EVENT_API_CURRENT_THREAD_ID, \
- timestamp, flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
+ flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
- phase, category_group, name, id, thread_id, timestamp, flags, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::TraceID trace_event_trace_id((id)); \
- unsigned int trace_event_flags = flags | \
- trace_event_trace_id.id_flags(); \
- trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
- phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
- thread_id, timestamp, \
- trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
- trace_event_internal::kNoId, ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
+ phase, category_group, name, id, thread_id, timestamp, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID trace_event_trace_id((id)); \
+ unsigned int trace_event_flags = \
+ flags | trace_event_trace_id.id_flags(); \
+ trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+ thread_id, timestamp, \
+ trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
+ trace_event_internal::kNoId, ##__VA_ARGS__); \
+ } \
} while (0)
// The linked ID will not be mangled.
-#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- trace_event_internal::TraceID source_id((id1)); \
- unsigned int source_flags = source_id.id_flags(); \
- trace_event_internal::TraceID target_id((id2)); \
- trace_event_internal::AddTraceEvent( \
- TRACE_EVENT_PHASE_LINK_IDS, \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
- name, source_id.scope(), source_id.raw_id(), source_flags, \
- trace_event_internal::kNoId, \
- "linked_id", target_id.AsConvertableToTraceFormat()); \
- } \
- } while (0)
+#define INTERNAL_TRACE_EVENT_ADD_LINK_IDS(category_group, name, id1, id2) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::TraceID source_id((id1)); \
+ unsigned int source_flags = source_id.id_flags(); \
+ trace_event_internal::TraceID target_id((id2)); \
+ trace_event_internal::AddTraceEvent( \
+ TRACE_EVENT_PHASE_LINK_IDS, \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ source_id.scope(), source_id.raw_id(), source_flags, \
+ trace_event_internal::kNoId, "linked_id", \
+ target_id.AsConvertableToTraceFormat()); \
+ } \
+ } while (0)
// Implementation detail: internal macro to create static category and add
// metadata event if the category is enabled.
-#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
- do { \
- INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
- if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
- TRACE_EVENT_API_ADD_METADATA_EVENT( \
- INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
- ##__VA_ARGS__); \
- } \
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ TRACE_EVENT_API_ADD_METADATA_EVENT( \
+ INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ ##__VA_ARGS__); \
+ } \
} while (0)
// Implementation detail: internal macro to enter and leave a
@@ -438,11 +442,27 @@ class BASE_EXPORT TraceID {
: scope_(scope), raw_id_(global_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
+ WithScope(const char* scope,
+ unsigned long long prefix,
+ unsigned long long raw_id)
+ : scope_(scope), has_prefix_(true), prefix_(prefix), raw_id_(raw_id) {}
+ WithScope(const char* scope, unsigned long long prefix, GlobalId global_id)
+ : scope_(scope),
+ has_prefix_(true),
+ prefix_(prefix),
+ raw_id_(global_id.raw_id()) {
+ id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
+ }
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
unsigned int id_flags() const { return id_flags_; }
+
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
@@ -525,11 +545,17 @@ class BASE_EXPORT TraceID {
TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
- TraceID(WithScope scoped_id) : scope_(scoped_id.scope()),
- raw_id_(scoped_id.raw_id()), id_flags_(scoped_id.id_flags()) {}
+ TraceID(WithScope scoped_id)
+ : scope_(scoped_id.scope()),
+ has_prefix_(scoped_id.has_prefix()),
+ prefix_(scoped_id.prefix()),
+ raw_id_(scoped_id.raw_id()),
+ id_flags_(scoped_id.id_flags()) {}
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
+ bool has_prefix() const { return has_prefix_; }
+ unsigned long long prefix() const { return prefix_; }
unsigned int id_flags() const { return id_flags_; }
std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
@@ -537,6 +563,8 @@ class BASE_EXPORT TraceID {
private:
const char* scope_ = nullptr;
+ bool has_prefix_ = false;
+ unsigned long long prefix_;
unsigned long long raw_id_;
unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
diff --git a/chromium/base/trace_event/trace_event_argument.cc b/chromium/base/trace_event/trace_event_argument.cc
index 336d964bff4..da33c6da004 100644
--- a/chromium/base/trace_event/trace_event_argument.cc
+++ b/chromium/base/trace_event/trace_event_argument.cc
@@ -244,36 +244,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
SetBooleanWithCopiedName(name, bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
SetIntegerWithCopiedName(name, int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
SetDoubleWithCopiedName(name, double_value);
} break;
- case base::Value::TYPE_STRING: {
+ case base::Value::Type::STRING: {
const StringValue* string_value;
value.GetAsString(&string_value);
SetStringWithCopiedName(name, string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionaryWithCopiedName(name);
@@ -284,7 +284,7 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArrayWithCopiedName(name);
@@ -298,36 +298,36 @@ void TracedValue::SetBaseValueWithCopiedName(base::StringPiece name,
void TracedValue::AppendBaseValue(const base::Value& value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
switch (value.GetType()) {
- case base::Value::TYPE_NULL:
- case base::Value::TYPE_BINARY:
+ case base::Value::Type::NONE:
+ case base::Value::Type::BINARY:
NOTREACHED();
break;
- case base::Value::TYPE_BOOLEAN: {
+ case base::Value::Type::BOOLEAN: {
bool bool_value;
value.GetAsBoolean(&bool_value);
AppendBoolean(bool_value);
} break;
- case base::Value::TYPE_INTEGER: {
+ case base::Value::Type::INTEGER: {
int int_value;
value.GetAsInteger(&int_value);
AppendInteger(int_value);
} break;
- case base::Value::TYPE_DOUBLE: {
+ case base::Value::Type::DOUBLE: {
double double_value;
value.GetAsDouble(&double_value);
AppendDouble(double_value);
} break;
- case base::Value::TYPE_STRING: {
+ case base::Value::Type::STRING: {
const StringValue* string_value;
value.GetAsString(&string_value);
AppendString(string_value->GetString());
} break;
- case base::Value::TYPE_DICTIONARY: {
+ case base::Value::Type::DICTIONARY: {
const DictionaryValue* dict_value;
value.GetAsDictionary(&dict_value);
BeginDictionary();
@@ -338,7 +338,7 @@ void TracedValue::AppendBaseValue(const base::Value& value) {
EndDictionary();
} break;
- case base::Value::TYPE_LIST: {
+ case base::Value::Type::LIST: {
const ListValue* list_value;
value.GetAsList(&list_value);
BeginArray();
diff --git a/chromium/base/trace_event/trace_event_filter.cc b/chromium/base/trace_event/trace_event_filter.cc
new file mode 100644
index 00000000000..62652958647
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_filter.cc
@@ -0,0 +1,17 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+TraceEventFilter::TraceEventFilter() {}
+TraceEventFilter::~TraceEventFilter() {}
+
+void TraceEventFilter::EndEvent(const char* category_name,
+ const char* event_name) const {}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_event_filter.h b/chromium/base/trace_event/trace_event_filter.h
new file mode 100644
index 00000000000..48c6711432f
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_filter.h
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace trace_event {
+
+class TraceEvent;
+
+// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
+// enabled on a per-category basis, hence a single filter instance can serve
+// more than a TraceCategory. There are two use cases for filters:
+// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
+// possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
+// ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
+// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
+// requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
+// on the category.
+// More importantly, filters must be thread-safe. The FilterTraceEvent and
+// EndEvent methods can be called concurrently as trace macros are hit on
+// different threads.
+class BASE_EXPORT TraceEventFilter {
+ public:
+ TraceEventFilter();
+ virtual ~TraceEventFilter();
+
+ // If the category is ENABLED_FOR_RECORDING, the event is added iff all the
+ // filters enabled for the category return true. false causes the event to be
+ // discarded.
+ virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
+
+ // Notifies the end of a duration event when the RAII macro goes out of scope.
+ virtual void EndEvent(const char* category_name,
+ const char* event_name) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
diff --git a/chromium/base/trace_event/trace_event_filter_test_utils.cc b/chromium/base/trace_event/trace_event_filter_test_utils.cc
new file mode 100644
index 00000000000..06548b049a2
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_filter_test_utils.cc
@@ -0,0 +1,61 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_event_filter_test_utils.h"
+
+#include "base/logging.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+TestEventFilter::HitsCounter* g_hits_counter;
+} // namespace;
+
+// static
+const char TestEventFilter::kName[] = "testing_predicate";
+bool TestEventFilter::filter_return_value_;
+
+// static
+std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
+ const std::string& predicate_name) {
+ std::unique_ptr<TraceEventFilter> res;
+ if (predicate_name == kName)
+ res.reset(new TestEventFilter());
+ return res;
+}
+
+TestEventFilter::TestEventFilter() {}
+TestEventFilter::~TestEventFilter() {}
+
+bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
+ if (g_hits_counter)
+ g_hits_counter->filter_trace_event_hit_count++;
+ return filter_return_value_;
+}
+
+void TestEventFilter::EndEvent(const char* category_name,
+ const char* name) const {
+ if (g_hits_counter)
+ g_hits_counter->end_event_hit_count++;
+}
+
+TestEventFilter::HitsCounter::HitsCounter() {
+ Reset();
+ DCHECK(!g_hits_counter);
+ g_hits_counter = this;
+}
+
+TestEventFilter::HitsCounter::~HitsCounter() {
+ DCHECK(g_hits_counter);
+ g_hits_counter = nullptr;
+}
+
+void TestEventFilter::HitsCounter::Reset() {
+ filter_trace_event_hit_count = 0;
+ end_event_hit_count = 0;
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_event_filter_test_utils.h b/chromium/base/trace_event/trace_event_filter_test_utils.h
new file mode 100644
index 00000000000..419068b221a
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_filter_test_utils.h
@@ -0,0 +1,53 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
+
+#include <memory>
+#include <string>
+
+#include "base/macros.h"
+#include "base/trace_event/trace_event_filter.h"
+
+namespace base {
+namespace trace_event {
+
+class TestEventFilter : public TraceEventFilter {
+ public:
+ struct HitsCounter {
+ HitsCounter();
+ ~HitsCounter();
+ void Reset();
+ size_t filter_trace_event_hit_count;
+ size_t end_event_hit_count;
+ };
+
+ static const char kName[];
+
+ // Factory method for TraceLog::SetFilterFactoryForTesting().
+ static std::unique_ptr<TraceEventFilter> Factory(
+ const std::string& predicate_name);
+
+ TestEventFilter();
+ ~TestEventFilter() override;
+
+ // TraceEventFilter implementation.
+ bool FilterTraceEvent(const TraceEvent& trace_event) const override;
+ void EndEvent(const char* category_name, const char* name) const override;
+
+ static void set_filter_return_value(bool value) {
+ filter_return_value_ = value;
+ }
+
+ private:
+ static bool filter_return_value_;
+
+ DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index f9792d0d6df..cb23eb474c5 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -458,30 +458,32 @@ TraceID::AsConvertableToTraceFormat() const {
if (scope_ != kGlobalScope)
value->SetString("scope", scope_);
- switch (id_flags_) {
- case TRACE_EVENT_FLAG_HAS_ID:
- value->SetString(
- "id",
- base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
- break;
- case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
- value->BeginDictionary("id2");
- value->SetString(
- "global",
- base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
- value->EndDictionary();
- break;
- case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
- value->BeginDictionary("id2");
- value->SetString(
- "local",
- base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
- value->EndDictionary();
- break;
- default:
- NOTREACHED() << "Unrecognized ID flag";
+
+ const char* id_field_name = "id";
+ if (id_flags_ == TRACE_EVENT_FLAG_HAS_GLOBAL_ID) {
+ id_field_name = "global";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ == TRACE_EVENT_FLAG_HAS_LOCAL_ID) {
+ id_field_name = "local";
+ value->BeginDictionary("id2");
+ } else if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID) {
+ NOTREACHED() << "Unrecognized ID flag";
}
+ if (has_prefix_) {
+ value->SetString(id_field_name,
+ base::StringPrintf("0x%" PRIx64 "/0x%" PRIx64,
+ static_cast<uint64_t>(prefix_),
+ static_cast<uint64_t>(raw_id_)));
+ } else {
+ value->SetString(
+ id_field_name,
+ base::StringPrintf("0x%" PRIx64, static_cast<uint64_t>(raw_id_)));
+ }
+
+ if (id_flags_ != TRACE_EVENT_FLAG_HAS_ID)
+ value->EndDictionary();
+
return std::move(value);
}
diff --git a/chromium/base/trace_event/trace_event_memory_overhead.cc b/chromium/base/trace_event/trace_event_memory_overhead.cc
index 23579cbb22d..48a0d29f446 100644
--- a/chromium/base/trace_event/trace_event_memory_overhead.cc
+++ b/chromium/base/trace_event/trace_event_memory_overhead.cc
@@ -69,27 +69,27 @@ void TraceEventMemoryOverhead::AddRefCountedString(
void TraceEventMemoryOverhead::AddValue(const Value& value) {
switch (value.GetType()) {
- case Value::TYPE_NULL:
- case Value::TYPE_BOOLEAN:
- case Value::TYPE_INTEGER:
- case Value::TYPE_DOUBLE:
+ case Value::Type::NONE:
+ case Value::Type::BOOLEAN:
+ case Value::Type::INTEGER:
+ case Value::Type::DOUBLE:
Add("FundamentalValue", sizeof(Value));
break;
- case Value::TYPE_STRING: {
+ case Value::Type::STRING: {
const StringValue* string_value = nullptr;
value.GetAsString(&string_value);
Add("StringValue", sizeof(StringValue));
AddString(string_value->GetString());
} break;
- case Value::TYPE_BINARY: {
+ case Value::Type::BINARY: {
const BinaryValue* binary_value = nullptr;
value.GetAsBinary(&binary_value);
Add("BinaryValue", sizeof(BinaryValue) + binary_value->GetSize());
} break;
- case Value::TYPE_DICTIONARY: {
+ case Value::Type::DICTIONARY: {
const DictionaryValue* dictionary_value = nullptr;
value.GetAsDictionary(&dictionary_value);
Add("DictionaryValue", sizeof(DictionaryValue));
@@ -100,7 +100,7 @@ void TraceEventMemoryOverhead::AddValue(const Value& value) {
}
} break;
- case Value::TYPE_LIST: {
+ case Value::Type::LIST: {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add("ListValue", sizeof(ListValue));
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 81d043ac394..29be3feec83 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -30,8 +30,12 @@
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
+#include "base/trace_event/event_name_filter.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/trace_buffer.h"
#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_filter.h"
+#include "base/trace_event/trace_event_filter_test_utils.h"
#include "base/trace_event/trace_event_synthetic_delay.h"
#include "base/values.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -260,7 +264,7 @@ DictionaryValue* TraceEventTestFixture::FindMatchingTraceEntry(
for (size_t i = 0; i < trace_parsed_count; i++) {
Value* value = NULL;
trace_parsed_.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
DictionaryValue* dict = static_cast<DictionaryValue*>(value);
@@ -278,7 +282,7 @@ void TraceEventTestFixture::DropTracedMetadataRecords() {
for (size_t i = 0; i < old_trace_parsed_size; i++) {
Value* value = nullptr;
old_trace_parsed->Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY) {
+ if (!value || value->GetType() != Value::Type::DICTIONARY) {
trace_parsed_.Append(value->CreateDeepCopy());
continue;
}
@@ -367,7 +371,7 @@ const DictionaryValue* FindTraceEntry(
match_after_this_item = NULL;
continue;
}
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -385,7 +389,7 @@ std::vector<const DictionaryValue*> FindTraceEntries(
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
@@ -524,6 +528,8 @@ void TraceWithAllMacroVariants(WaitableEvent* task_complete_event) {
TRACE_ID_LOCAL(0x2000));
TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a global ID", 0x1000,
TRACE_ID_GLOBAL(0x2000));
+ TRACE_LINK_IDS("all", "TRACE_LINK_IDS to a composite ID", 0x1000,
+ TRACE_ID_WITH_SCOPE("scope 1", 0x2000, 0x3000));
TRACE_EVENT_ASYNC_BEGIN0("all", "async default process scope", 0x1000);
TRACE_EVENT_ASYNC_BEGIN0("all", "async local id", TRACE_ID_LOCAL(0x2000));
@@ -1044,6 +1050,25 @@ void ValidateAllTraceMacrosCreatedData(const ListValue& trace_parsed) {
EXPECT_EQ("0x2000", id2);
}
+ EXPECT_FIND_("TRACE_LINK_IDS to a composite ID");
+ {
+ std::string ph;
+ EXPECT_TRUE((item && item->GetString("ph", &ph)));
+ EXPECT_EQ("=", ph);
+
+ EXPECT_FALSE(item->HasKey("scope"));
+ std::string id1;
+ EXPECT_TRUE(item->GetString("id", &id1));
+ EXPECT_EQ("0x1000", id1);
+
+ std::string scope;
+ EXPECT_TRUE(item->GetString("args.linked_id.scope", &scope));
+ EXPECT_EQ("scope 1", scope);
+ std::string id2;
+ EXPECT_TRUE(item->GetString("args.linked_id.id", &id2));
+ EXPECT_EQ(id2, "0x2000/0x3000");
+ }
+
EXPECT_FIND_("async default process scope");
{
std::string ph;
@@ -1114,7 +1139,7 @@ void ValidateInstantEventPresentOnEveryThread(const ListValue& trace_parsed,
for (size_t i = 0; i < trace_parsed_count; i++) {
const Value* value = NULL;
trace_parsed.Get(i, &value);
- if (!value || value->GetType() != Value::TYPE_DICTIONARY)
+ if (!value || value->GetType() != Value::Type::DICTIONARY)
continue;
const DictionaryValue* dict = static_cast<const DictionaryValue*>(value);
std::string name;
@@ -2320,7 +2345,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_one", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(1, double_value);
@@ -2330,7 +2355,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_half", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(0.5, double_value);
@@ -2340,7 +2365,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
- EXPECT_TRUE(value->IsType(Value::TYPE_DOUBLE));
+ EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(-0.5, double_value);
@@ -2956,45 +2981,6 @@ TEST_F(TraceEventTestFixture, SyntheticDelayConfigurationToString) {
EXPECT_EQ(filter, config.ToCategoryFilterString());
}
-class TestEventFilter : public TraceLog::TraceEventFilter {
- public:
- bool FilterTraceEvent(const TraceEvent& trace_event) const override {
- filter_trace_event_hit_count_++;
- return filter_return_value_;
- }
-
- void EndEvent(const char* category_group, const char* name) override {
- end_event_hit_count_++;
- }
-
- static void set_filter_return_value(bool value) {
- filter_return_value_ = value;
- }
-
- static size_t filter_trace_event_hit_count() {
- return filter_trace_event_hit_count_;
- }
- static size_t end_event_hit_count() { return end_event_hit_count_; }
-
- static void clear_counts() {
- filter_trace_event_hit_count_ = 0;
- end_event_hit_count_ = 0;
- }
-
- private:
- static size_t filter_trace_event_hit_count_;
- static size_t end_event_hit_count_;
- static bool filter_return_value_;
-};
-
-size_t TestEventFilter::filter_trace_event_hit_count_ = 0;
-size_t TestEventFilter::end_event_hit_count_ = 0;
-bool TestEventFilter::filter_return_value_ = false;
-
-std::unique_ptr<TraceLog::TraceEventFilter> ConstructTestEventFilter() {
- return WrapUnique(new TestEventFilter);
-}
-
TEST_F(TraceEventTestFixture, TraceFilteringMode) {
const char config_json[] =
"{"
@@ -3007,8 +2993,9 @@ TEST_F(TraceEventTestFixture, TraceFilteringMode) {
"}";
// Run RECORDING_MODE within FILTERING_MODE:
+ TestEventFilter::HitsCounter filter_hits_counter;
TestEventFilter::set_filter_return_value(true);
- TraceLog::SetTraceEventFilterConstructorForTesting(ConstructTestEventFilter);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
// Only filtering mode is enabled with test filters.
TraceLog::GetInstance()->SetEnabled(TraceConfig(config_json),
@@ -3048,10 +3035,10 @@ TEST_F(TraceEventTestFixture, TraceFilteringMode) {
EXPECT_FALSE(FindMatchingValue("name", "name1"));
EXPECT_TRUE(FindMatchingValue("cat", "c2"));
EXPECT_TRUE(FindMatchingValue("name", "name2"));
- EXPECT_EQ(6u, TestEventFilter::filter_trace_event_hit_count());
- EXPECT_EQ(3u, TestEventFilter::end_event_hit_count());
+ EXPECT_EQ(6u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(3u, filter_hits_counter.end_event_hit_count);
Clear();
- TestEventFilter::clear_counts();
+ filter_hits_counter.Reset();
// Run FILTERING_MODE within RECORDING_MODE:
// Only recording mode is enabled and all events must be recorded.
@@ -3090,10 +3077,9 @@ TEST_F(TraceEventTestFixture, TraceFilteringMode) {
EXPECT_TRUE(FindMatchingValue("name", "name2"));
EXPECT_FALSE(FindMatchingValue("cat", "c1"));
EXPECT_FALSE(FindMatchingValue("name", "name1"));
- EXPECT_EQ(1u, TestEventFilter::filter_trace_event_hit_count());
- EXPECT_EQ(1u, TestEventFilter::end_event_hit_count());
+ EXPECT_EQ(1u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
Clear();
- TestEventFilter::clear_counts();
}
TEST_F(TraceEventTestFixture, EventFiltering) {
@@ -3111,8 +3097,10 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
" ]"
"}";
+ TestEventFilter::HitsCounter filter_hits_counter;
TestEventFilter::set_filter_return_value(true);
- TraceLog::SetTraceEventFilterConstructorForTesting(ConstructTestEventFilter);
+ TraceLog::GetInstance()->SetFilterFactoryForTesting(TestEventFilter::Factory);
+
TraceConfig trace_config(config_json);
TraceLog::GetInstance()->SetEnabled(
trace_config, TraceLog::RECORDING_MODE | TraceLog::FILTERING_MODE);
@@ -3127,9 +3115,8 @@ TEST_F(TraceEventTestFixture, EventFiltering) {
EndTraceAndFlush();
- EXPECT_EQ(3u, TestEventFilter::filter_trace_event_hit_count());
- EXPECT_EQ(1u, TestEventFilter::end_event_hit_count());
- TestEventFilter::clear_counts();
+ EXPECT_EQ(3u, filter_hits_counter.filter_trace_event_hit_count);
+ EXPECT_EQ(1u, filter_hits_counter.end_event_hit_count);
}
TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
@@ -3150,7 +3137,7 @@ TEST_F(TraceEventTestFixture, EventWhitelistFiltering) {
" "
" ]"
"}",
- TraceLog::TraceEventFilter::kEventWhitelistPredicate);
+ EventNameFilter::kName);
TraceConfig trace_config(config_json);
TraceLog::GetInstance()->SetEnabled(
@@ -3182,7 +3169,7 @@ TEST_F(TraceEventTestFixture, HeapProfilerFiltering) {
" }"
" ]"
"}",
- TraceLog::TraceEventFilter::kHeapProfilerPredicate);
+ HeapProfilerEventFilter::kName);
TraceConfig trace_config(config_json);
TraceLog::GetInstance()->SetEnabled(
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index f76393cf230..edfd6488bb5 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -32,8 +32,10 @@
#include "base/threading/worker_pool.h"
#include "base/time/time.h"
#include "base/trace_event/category_registry.h"
+#include "base/trace_event/event_name_filter.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
+#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
@@ -84,82 +86,11 @@ const size_t kEchoToConsoleTraceEventBufferChunks = 256;
const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
const int kThreadFlushTimeoutMs = 3000;
-const char kEventNameWhitelist[] = "event_name_whitelist";
-
#define MAX_TRACE_EVENT_FILTERS 32
// List of TraceEventFilter objects from the most recent tracing session.
-base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>::
- Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER;
-
-class EventNameFilter : public TraceLog::TraceEventFilter {
- public:
- EventNameFilter(const base::DictionaryValue* filter_args) {
- const base::ListValue* whitelist = nullptr;
- if (filter_args->GetList(kEventNameWhitelist, &whitelist)) {
- for (size_t i = 0; i < whitelist->GetSize(); ++i) {
- std::string event_name;
- if (!whitelist->GetString(i, &event_name))
- continue;
-
- whitelist_.insert(event_name);
- }
- }
- }
-
- bool FilterTraceEvent(const TraceEvent& trace_event) const override {
- return ContainsKey(whitelist_, trace_event.name());
- }
-
- private:
- std::unordered_set<std::string> whitelist_;
-};
-
-// This filter is used to record trace events as pseudo stack for the heap
-// profiler. It does not filter-out any events from the trace, ie. the behavior
-// of trace events being added to TraceLog remains same: the events are added
-// iff enabled for recording and not filtered-out by any other filter.
-class HeapProfilerFilter : public TraceLog::TraceEventFilter {
- public:
- HeapProfilerFilter() {}
-
- bool FilterTraceEvent(const TraceEvent& trace_event) const override {
- if (AllocationContextTracker::capture_mode() !=
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- return true;
- }
-
- // TODO(primiano): Add support for events with copied name crbug.com/581079.
- if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
- return true;
-
- const char* category_name =
- TraceLog::GetCategoryGroupName(trace_event.category_group_enabled());
- if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
- trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PushPseudoStackFrame({category_name, trace_event.name()});
- } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
- // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame({category_name, trace_event.name()});
- }
- // Do not filter-out any events and always return true. TraceLog adds the
- // event only if it is enabled for recording.
- return true;
- }
-
- void EndEvent(const char* name, const char* category_group) override {
- if (AllocationContextTracker::capture_mode() ==
- AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
- AllocationContextTracker::GetInstanceForCurrentThread()
- ->PopPseudoStackFrame({category_group, name});
- }
- }
-};
-
-TraceLog::TraceEventFilterConstructorForTesting
- g_trace_event_filter_constructor_for_testing = nullptr;
+base::LazyInstance<std::vector<std::unique_ptr<TraceEventFilter>>>::Leaky
+ g_category_group_filters = LAZY_INSTANCE_INITIALIZER;
// The name of the current thread. This is used to decide if the current
// thread name has changed. We combine all the seen thread names into the
@@ -232,17 +163,14 @@ void MakeHandle(uint32_t chunk_seq,
}
template <typename Function>
-void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled,
- Function filter_fn) {
+void ForEachCategoryFilter(const unsigned char* category_group_enabled,
+ Function filter_fn) {
const TraceCategory* category =
CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
uint32_t filter_bitmap = category->enabled_filters();
- int index = 0;
- while (filter_bitmap) {
+ for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
if (filter_bitmap & 1 && g_category_group_filters.Get()[index])
filter_fn(g_category_group_filters.Get()[index].get());
- filter_bitmap = filter_bitmap >> 1;
- index++;
}
}
@@ -429,7 +357,8 @@ TraceLog::TraceLog()
trace_config_(TraceConfig()),
thread_shared_chunk_index_(0),
generation_(0),
- use_worker_thread_(false) {
+ use_worker_thread_(false),
+ filter_factory_for_testing_(nullptr) {
CategoryRegistry::Initialize();
#if defined(OS_NACL) // NaCl shouldn't expose the process id.
@@ -493,11 +422,18 @@ const unsigned char* TraceLog::GetCategoryGroupEnabled(
DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled());
return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr();
}
- TraceCategory* category = nullptr;
- bool is_new_category =
- CategoryRegistry::GetOrCreateCategoryByName(category_group, &category);
- if (is_new_category)
- tracelog->UpdateCategoryState(category);
+ TraceCategory* category = CategoryRegistry::GetCategoryByName(category_group);
+ if (!category) {
+ // Slow path: in the case of a new category we have to repeat the check
+ // holding the lock, as multiple threads might have reached this point
+ // at the same time.
+ auto category_initializer = [](TraceCategory* category) {
+ TraceLog::GetInstance()->UpdateCategoryState(category);
+ };
+ AutoLock lock(tracelog->lock_);
+ CategoryRegistry::GetOrCreateCategoryLocked(
+ category_group, category_initializer, &category);
+ }
DCHECK(category->state_ptr());
return category->state_ptr();
}
@@ -509,6 +445,7 @@ const char* TraceLog::GetCategoryGroupName(
}
void TraceLog::UpdateCategoryState(TraceCategory* category) {
+ lock_.AssertAcquired();
DCHECK(category->is_valid());
unsigned char state_flags = 0;
if (enabled_modes_ & RECORDING_MODE &&
@@ -549,6 +486,7 @@ void TraceLog::UpdateCategoryState(TraceCategory* category) {
}
void TraceLog::UpdateCategoryRegistry() {
+ lock_.AssertAcquired();
CreateFiltersForTraceConfig();
for (TraceCategory& category : CategoryRegistry::GetAllCategories()) {
UpdateCategoryState(&category);
@@ -564,7 +502,7 @@ void TraceLog::CreateFiltersForTraceConfig() {
if (g_category_group_filters.Get().size())
return;
- for (auto& event_filter : enabled_event_filters_) {
+ for (auto& filter_config : enabled_event_filters_) {
if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) {
NOTREACHED()
<< "Too many trace event filters installed in the current session";
@@ -572,17 +510,17 @@ void TraceLog::CreateFiltersForTraceConfig() {
}
std::unique_ptr<TraceEventFilter> new_filter;
- if (event_filter.predicate_name() ==
- TraceEventFilter::kEventWhitelistPredicate) {
- new_filter = MakeUnique<EventNameFilter>(event_filter.filter_args());
- } else if (event_filter.predicate_name() ==
- TraceEventFilter::kHeapProfilerPredicate) {
- new_filter = MakeUnique<HeapProfilerFilter>();
- } else if (event_filter.predicate_name() == "testing_predicate") {
- CHECK(g_trace_event_filter_constructor_for_testing);
- new_filter = g_trace_event_filter_constructor_for_testing();
+ const std::string& predicate_name = filter_config.predicate_name();
+ if (predicate_name == EventNameFilter::kName) {
+ auto whitelist = MakeUnique<std::unordered_set<std::string>>();
+ CHECK(filter_config.GetArgAsSet("event_name_whitelist", &*whitelist));
+ new_filter = MakeUnique<EventNameFilter>(std::move(whitelist));
+ } else if (predicate_name == HeapProfilerEventFilter::kName) {
+ new_filter = MakeUnique<HeapProfilerEventFilter>();
} else {
- NOTREACHED();
+ if (filter_factory_for_testing_)
+ new_filter = filter_factory_for_testing_(predicate_name);
+ CHECK(new_filter) << "Unknown trace filter " << predicate_name;
}
g_category_group_filters.Get().push_back(std::move(new_filter));
}
@@ -784,9 +722,8 @@ void TraceLog::SetDisabledWhileLocked(uint8_t modes_to_disable) {
if (modes_to_disable & FILTERING_MODE)
enabled_event_filters_.clear();
- if (modes_to_disable & RECORDING_MODE) {
+ if (modes_to_disable & RECORDING_MODE)
trace_config_.Clear();
- }
UpdateCategoryRegistry();
@@ -1331,7 +1268,7 @@ TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
arg_values, convertable_values, flags);
disabled_by_filters = true;
- ForEachCategoryGroupFilter(
+ ForEachCategoryFilter(
category_group_enabled, [&new_trace_event, &disabled_by_filters](
TraceEventFilter* trace_event_filter) {
if (trace_event_filter->FilterTraceEvent(*new_trace_event))
@@ -1461,10 +1398,10 @@ void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
const char* name,
TraceEventHandle handle) {
const char* category_name = GetCategoryGroupName(category_group_enabled);
- ForEachCategoryGroupFilter(
+ ForEachCategoryFilter(
category_group_enabled,
[name, category_name](TraceEventFilter* trace_event_filter) {
- trace_event_filter->EndEvent(name, category_name);
+ trace_event_filter->EndEvent(category_name, name);
});
}
@@ -1605,11 +1542,6 @@ void TraceLog::DeleteForTesting() {
CategoryRegistry::ResetForTesting();
}
-void TraceLog::SetTraceEventFilterConstructorForTesting(
- TraceEventFilterConstructorForTesting predicate) {
- g_trace_event_filter_constructor_for_testing = predicate;
-}
-
TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
return GetEventByHandleInternal(handle, NULL);
}
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index 68a7fbbcb17..88b6e588e40 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -35,6 +35,7 @@ struct TraceCategory;
class TraceBuffer;
class TraceBufferChunk;
class TraceEvent;
+class TraceEventFilter;
class TraceEventMemoryOverhead;
struct BASE_EXPORT TraceLogStatus {
@@ -278,27 +279,16 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
// Exposed for unittesting:
+ // Testing factory for TraceEventFilter.
+ typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
+ const std::string& /* predicate_name */);
+ void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
+ filter_factory_for_testing_ = factory;
+ }
+
// Allows deleting our singleton instance.
static void DeleteForTesting();
- class BASE_EXPORT TraceEventFilter {
- public:
- static const char* const kEventWhitelistPredicate;
- static const char* const kHeapProfilerPredicate;
-
- TraceEventFilter() {}
- virtual ~TraceEventFilter() {}
- virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
- virtual void EndEvent(const char* category_group, const char* name) {}
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
- };
- typedef std::unique_ptr<TraceEventFilter> (
- *TraceEventFilterConstructorForTesting)(void);
- static void SetTraceEventFilterConstructorForTesting(
- TraceEventFilterConstructorForTesting predicate);
-
// Allow tests to inspect TraceEvents.
TraceEvent* GetEventByHandle(TraceEventHandle handle);
@@ -506,6 +496,8 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
subtle::AtomicWord generation_;
bool use_worker_thread_;
+ FilterFactoryForTesting filter_factory_for_testing_;
+
DISALLOW_COPY_AND_ASSIGN(TraceLog);
};
diff --git a/chromium/base/trace_event/trace_log_constants.cc b/chromium/base/trace_event/trace_log_constants.cc
index b72ca1b3b4a..65dca2e4d6f 100644
--- a/chromium/base/trace_event/trace_log_constants.cc
+++ b/chromium/base/trace_event/trace_log_constants.cc
@@ -22,11 +22,5 @@ const TraceLog::InternalTraceOptions
const TraceLog::InternalTraceOptions
TraceLog::kInternalEnableArgumentFilter = 1 << 5;
-// TraceEventFilter predicate names used in trace config.
-const char* const TraceLog::TraceEventFilter::kEventWhitelistPredicate =
- "event_whitelist_predicate";
-const char* const TraceLog::TraceEventFilter::kHeapProfilerPredicate =
- "heap_profiler_predicate";
-
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/tracked_objects.cc b/chromium/base/tracked_objects.cc
index 675c9b89e67..158fb94cc88 100644
--- a/chromium/base/tracked_objects.cc
+++ b/chromium/base/tracked_objects.cc
@@ -4,6 +4,7 @@
#include "base/tracked_objects.h"
+#include <ctype.h>
#include <limits.h>
#include <stdlib.h>
@@ -13,8 +14,10 @@
#include "base/compiler_specific.h"
#include "base/debug/leak_annotations.h"
#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/numerics/safe_math.h"
#include "base/process/process_handle.h"
-#include "base/strings/stringprintf.h"
#include "base/third_party/valgrind/memcheck.h"
#include "base/threading/worker_pool.h"
#include "base/tracking_info.h"
@@ -29,6 +32,9 @@ class TimeDelta;
namespace tracked_objects {
namespace {
+
+constexpr char kWorkerThreadSanitizedName[] = "WorkerThread-*";
+
// When ThreadData is first initialized, should we start in an ACTIVE state to
// record all of the startup-time tasks, or should we start up DEACTIVATED, so
// that we only record after parsing the command line flag --enable-tracking.
@@ -74,6 +80,22 @@ inline bool IsProfilerTimingEnabled() {
return current_timing_enabled == ENABLED_TIMING;
}
+// Sanitize a thread name by replacing trailing sequence of digits with "*".
+// Examples:
+// 1. "BrowserBlockingWorker1/23857" => "BrowserBlockingWorker1/*"
+// 2. "Chrome_IOThread" => "Chrome_IOThread"
+std::string SanitizeThreadName(const std::string& thread_name) {
+ size_t i = thread_name.length();
+
+ while (i > 0 && isdigit(thread_name[i - 1]))
+ --i;
+
+ if (i == thread_name.length())
+ return thread_name;
+
+ return thread_name.substr(0, i) + '*';
+}
+
} // namespace
//------------------------------------------------------------------------------
@@ -86,10 +108,15 @@ DeathData::DeathData()
queue_duration_sum_(0),
run_duration_max_(0),
queue_duration_max_(0),
+ alloc_ops_(0),
+ free_ops_(0),
+ allocated_bytes_(0),
+ freed_bytes_(0),
+ alloc_overhead_bytes_(0),
+ max_allocated_bytes_(0),
run_duration_sample_(0),
queue_duration_sample_(0),
- last_phase_snapshot_(nullptr) {
-}
+ last_phase_snapshot_(nullptr) {}
DeathData::DeathData(const DeathData& other)
: count_(other.count_),
@@ -98,6 +125,12 @@ DeathData::DeathData(const DeathData& other)
queue_duration_sum_(other.queue_duration_sum_),
run_duration_max_(other.run_duration_max_),
queue_duration_max_(other.queue_duration_max_),
+ alloc_ops_(other.alloc_ops_),
+ free_ops_(other.free_ops_),
+ allocated_bytes_(other.allocated_bytes_),
+ freed_bytes_(other.freed_bytes_),
+ alloc_overhead_bytes_(other.alloc_overhead_bytes_),
+ max_allocated_bytes_(other.max_allocated_bytes_),
run_duration_sample_(other.run_duration_sample_),
queue_duration_sample_(other.queue_duration_sample_),
last_phase_snapshot_(nullptr) {
@@ -125,9 +158,9 @@ DeathData::~DeathData() {
#define CONDITIONAL_ASSIGN(assign_it, target, source) \
((target) ^= ((target) ^ (source)) & -static_cast<int32_t>(assign_it))
-void DeathData::RecordDeath(const int32_t queue_duration,
- const int32_t run_duration,
- const uint32_t random_number) {
+void DeathData::RecordDurations(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number) {
// We'll just clamp at INT_MAX, but we should note this in the UI as such.
if (count_ < INT_MAX)
base::subtle::NoBarrier_Store(&count_, count_ + 1);
@@ -164,12 +197,28 @@ void DeathData::RecordDeath(const int32_t queue_duration,
}
}
+void DeathData::RecordAllocations(const uint32_t alloc_ops,
+ const uint32_t free_ops,
+ const uint32_t allocated_bytes,
+ const uint32_t freed_bytes,
+ const uint32_t alloc_overhead_bytes,
+ const uint32_t max_allocated_bytes) {
+ // Use saturating arithmetic.
+ SaturatingMemberAdd(alloc_ops, &alloc_ops_);
+ SaturatingMemberAdd(free_ops, &free_ops_);
+ SaturatingMemberAdd(allocated_bytes, &allocated_bytes_);
+ SaturatingMemberAdd(freed_bytes, &freed_bytes_);
+ SaturatingMemberAdd(alloc_overhead_bytes, &alloc_overhead_bytes_);
+
+ int32_t max = base::saturated_cast<int32_t>(max_allocated_bytes);
+ if (max > max_allocated_bytes_)
+ base::subtle::NoBarrier_Store(&max_allocated_bytes_, max);
+}
+
void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
// Snapshotting and storing current state.
- last_phase_snapshot_ = new DeathDataPhaseSnapshot(
- profiling_phase, count(), run_duration_sum(), run_duration_max(),
- run_duration_sample(), queue_duration_sum(), queue_duration_max(),
- queue_duration_sample(), last_phase_snapshot_);
+ last_phase_snapshot_ =
+ new DeathDataPhaseSnapshot(profiling_phase, *this, last_phase_snapshot_);
// Not touching fields for which a delta can be computed by comparing with a
// snapshot from the previous phase. Resetting other fields. Sample values
@@ -201,6 +250,17 @@ void DeathData::OnProfilingPhaseCompleted(int profiling_phase) {
base::subtle::NoBarrier_Store(&queue_duration_max_, 0);
}
+void DeathData::SaturatingMemberAdd(const uint32_t addend,
+ base::subtle::Atomic32* sum) {
+ // Bail quick if no work or already saturated.
+ if (addend == 0U || *sum == INT_MAX)
+ return;
+
+ base::CheckedNumeric<int32_t> new_sum = *sum;
+ new_sum += addend;
+ base::subtle::NoBarrier_Store(sum, new_sum.ValueOrDefault(INT_MAX));
+}
+
//------------------------------------------------------------------------------
DeathDataSnapshot::DeathDataSnapshot()
: count(-1),
@@ -209,8 +269,13 @@ DeathDataSnapshot::DeathDataSnapshot()
run_duration_sample(-1),
queue_duration_sum(-1),
queue_duration_max(-1),
- queue_duration_sample(-1) {
-}
+ queue_duration_sample(-1),
+ alloc_ops(-1),
+ free_ops(-1),
+ allocated_bytes(-1),
+ freed_bytes(-1),
+ alloc_overhead_bytes(-1),
+ max_allocated_bytes(-1) {}
DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sum,
@@ -218,25 +283,58 @@ DeathDataSnapshot::DeathDataSnapshot(int count,
int32_t run_duration_sample,
int32_t queue_duration_sum,
int32_t queue_duration_max,
- int32_t queue_duration_sample)
+ int32_t queue_duration_sample,
+ int32_t alloc_ops,
+ int32_t free_ops,
+ int32_t allocated_bytes,
+ int32_t freed_bytes,
+ int32_t alloc_overhead_bytes,
+ int32_t max_allocated_bytes)
: count(count),
run_duration_sum(run_duration_sum),
run_duration_max(run_duration_max),
run_duration_sample(run_duration_sample),
queue_duration_sum(queue_duration_sum),
queue_duration_max(queue_duration_max),
- queue_duration_sample(queue_duration_sample) {}
+ queue_duration_sample(queue_duration_sample),
+ alloc_ops(alloc_ops),
+ free_ops(free_ops),
+ allocated_bytes(allocated_bytes),
+ freed_bytes(freed_bytes),
+ alloc_overhead_bytes(alloc_overhead_bytes),
+ max_allocated_bytes(max_allocated_bytes) {}
+
+DeathDataSnapshot::DeathDataSnapshot(const DeathData& death_data)
+ : count(death_data.count()),
+ run_duration_sum(death_data.run_duration_sum()),
+ run_duration_max(death_data.run_duration_max()),
+ run_duration_sample(death_data.run_duration_sample()),
+ queue_duration_sum(death_data.queue_duration_sum()),
+ queue_duration_max(death_data.queue_duration_max()),
+ queue_duration_sample(death_data.queue_duration_sample()),
+ alloc_ops(death_data.alloc_ops()),
+ free_ops(death_data.free_ops()),
+ allocated_bytes(death_data.allocated_bytes()),
+ freed_bytes(death_data.freed_bytes()),
+ alloc_overhead_bytes(death_data.alloc_overhead_bytes()),
+ max_allocated_bytes(death_data.max_allocated_bytes()) {}
+
+DeathDataSnapshot::DeathDataSnapshot(const DeathDataSnapshot& death_data) =
+ default;
DeathDataSnapshot::~DeathDataSnapshot() {
}
DeathDataSnapshot DeathDataSnapshot::Delta(
const DeathDataSnapshot& older) const {
- return DeathDataSnapshot(count - older.count,
- run_duration_sum - older.run_duration_sum,
- run_duration_max, run_duration_sample,
- queue_duration_sum - older.queue_duration_sum,
- queue_duration_max, queue_duration_sample);
+ return DeathDataSnapshot(
+ count - older.count, run_duration_sum - older.run_duration_sum,
+ run_duration_max, run_duration_sample,
+ queue_duration_sum - older.queue_duration_sum, queue_duration_max,
+ queue_duration_sample, alloc_ops - older.alloc_ops,
+ free_ops - older.free_ops, allocated_bytes - older.allocated_bytes,
+ freed_bytes - older.freed_bytes,
+ alloc_overhead_bytes - older.alloc_overhead_bytes, max_allocated_bytes);
}
//------------------------------------------------------------------------------
@@ -252,8 +350,7 @@ BirthOnThreadSnapshot::BirthOnThreadSnapshot() {
BirthOnThreadSnapshot::BirthOnThreadSnapshot(const BirthOnThread& birth)
: location(birth.location()),
- thread_name(birth.birth_thread()->thread_name()) {
-}
+ sanitized_thread_name(birth.birth_thread()->sanitized_thread_name()) {}
BirthOnThreadSnapshot::~BirthOnThreadSnapshot() {
}
@@ -285,9 +382,6 @@ ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
base::ThreadLocalStorage::StaticSlot ThreadData::tls_index_ = TLS_INITIALIZER;
// static
-int ThreadData::worker_thread_data_creation_count_ = 0;
-
-// static
int ThreadData::cleanup_count_ = 0;
// static
@@ -297,7 +391,7 @@ int ThreadData::incarnation_counter_ = 0;
ThreadData* ThreadData::all_thread_data_list_head_ = NULL;
// static
-ThreadData* ThreadData::first_retired_worker_ = NULL;
+ThreadData* ThreadData::first_retired_thread_data_ = NULL;
// static
base::LazyInstance<base::Lock>::Leaky
@@ -306,25 +400,14 @@ base::LazyInstance<base::Lock>::Leaky
// static
base::subtle::Atomic32 ThreadData::status_ = ThreadData::UNINITIALIZED;
-ThreadData::ThreadData(const std::string& suggested_name)
- : next_(NULL),
- next_retired_worker_(NULL),
- worker_thread_number_(0),
- incarnation_count_for_pool_(-1),
- current_stopwatch_(NULL) {
- DCHECK_GE(suggested_name.size(), 0u);
- thread_name_ = suggested_name;
- PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
-}
-
-ThreadData::ThreadData(int thread_number)
+ThreadData::ThreadData(const std::string& sanitized_thread_name)
: next_(NULL),
- next_retired_worker_(NULL),
- worker_thread_number_(thread_number),
+ next_retired_thread_data_(NULL),
+ sanitized_thread_name_(sanitized_thread_name),
incarnation_count_for_pool_(-1),
current_stopwatch_(NULL) {
- CHECK_GT(thread_number, 0);
- base::StringAppendF(&thread_name_, "WorkerThread-%d", thread_number);
+ DCHECK(sanitized_thread_name_.empty() ||
+ !isdigit(sanitized_thread_name_.back()));
PushToHeadOfList(); // Which sets real incarnation_count_for_pool_.
}
@@ -355,15 +438,17 @@ ThreadData* ThreadData::first() {
ThreadData* ThreadData::next() const { return next_; }
// static
-void ThreadData::InitializeThreadContext(const std::string& suggested_name) {
+void ThreadData::InitializeThreadContext(const std::string& thread_name) {
if (base::WorkerPool::RunsTasksOnCurrentThread())
return;
+ DCHECK_NE(thread_name, kWorkerThreadSanitizedName);
EnsureTlsInitialization();
ThreadData* current_thread_data =
reinterpret_cast<ThreadData*>(tls_index_.Get());
if (current_thread_data)
return; // Browser tests instigate this.
- current_thread_data = new ThreadData(suggested_name);
+ current_thread_data =
+ GetRetiredOrCreateThreadData(SanitizeThreadName(thread_name));
tls_index_.Set(current_thread_data);
}
@@ -376,26 +461,8 @@ ThreadData* ThreadData::Get() {
return registered;
// We must be a worker thread, since we didn't pre-register.
- ThreadData* worker_thread_data = NULL;
- int worker_thread_number = 0;
- {
- base::AutoLock lock(*list_lock_.Pointer());
- if (first_retired_worker_) {
- worker_thread_data = first_retired_worker_;
- first_retired_worker_ = first_retired_worker_->next_retired_worker_;
- worker_thread_data->next_retired_worker_ = NULL;
- } else {
- worker_thread_number = ++worker_thread_data_creation_count_;
- }
- }
-
- // If we can't find a previously used instance, then we have to create one.
- if (!worker_thread_data) {
- DCHECK_GT(worker_thread_number, 0);
- worker_thread_data = new ThreadData(worker_thread_number);
- }
- DCHECK_GT(worker_thread_data->worker_thread_number_, 0);
-
+ ThreadData* worker_thread_data =
+ GetRetiredOrCreateThreadData(kWorkerThreadSanitizedName);
tls_index_.Set(worker_thread_data);
return worker_thread_data;
}
@@ -409,21 +476,23 @@ void ThreadData::OnThreadTermination(void* thread_data) {
}
void ThreadData::OnThreadTerminationCleanup() {
+ // We must NOT do any allocations during this callback. There is a chance that
+ // the allocator is no longer active on this thread.
+
// The list_lock_ was created when we registered the callback, so it won't be
// allocated here despite the lazy reference.
base::AutoLock lock(*list_lock_.Pointer());
if (incarnation_counter_ != incarnation_count_for_pool_)
return; // ThreadData was constructed in an earlier unit test.
++cleanup_count_;
- // Only worker threads need to be retired and reused.
- if (!worker_thread_number_) {
- return;
- }
- // We must NOT do any allocations during this callback.
- // Using the simple linked lists avoids all allocations.
- DCHECK_EQ(this->next_retired_worker_, reinterpret_cast<ThreadData*>(NULL));
- this->next_retired_worker_ = first_retired_worker_;
- first_retired_worker_ = this;
+
+ // Add this ThreadData to a retired list so that it can be reused by a thread
+ // with the same name sanitized name in the future.
+ // |next_retired_thread_data_| is expected to be nullptr for a ThreadData
+ // associated with an active thread.
+ DCHECK(!next_retired_thread_data_);
+ next_retired_thread_data_ = first_retired_thread_data_;
+ first_retired_thread_data_ = this;
}
// static
@@ -455,7 +524,8 @@ void ThreadData::Snapshot(int current_profiling_phase,
if (birth_count.second > 0) {
current_phase_tasks->push_back(
TaskSnapshot(BirthOnThreadSnapshot(*birth_count.first),
- DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0),
+ DeathDataSnapshot(birth_count.second, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0),
"Still_Alive"));
}
}
@@ -514,7 +584,21 @@ void ThreadData::TallyADeath(const Births& births,
base::AutoLock lock(map_lock_); // Lock as the map may get relocated now.
death_data = &death_map_[&births];
} // Release lock ASAP.
- death_data->RecordDeath(queue_duration, run_duration, random_number_);
+ death_data->RecordDurations(queue_duration, run_duration, random_number_);
+
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (stopwatch.heap_tracking_enabled()) {
+ base::debug::ThreadHeapUsage heap_usage = stopwatch.heap_usage().usage();
+ // Saturate the 64 bit counts on conversion to 32 bit storage.
+ death_data->RecordAllocations(
+ base::saturated_cast<int32_t>(heap_usage.alloc_ops),
+ base::saturated_cast<int32_t>(heap_usage.free_ops),
+ base::saturated_cast<int32_t>(heap_usage.alloc_bytes),
+ base::saturated_cast<int32_t>(heap_usage.free_bytes),
+ base::saturated_cast<int32_t>(heap_usage.alloc_overhead_bytes),
+ base::saturated_cast<int32_t>(heap_usage.max_allocated_bytes));
+ }
+#endif
}
// static
@@ -635,7 +719,7 @@ void ThreadData::SnapshotExecutedTasks(
if (death_data.count > 0) {
(*phased_snapshots)[phase->profiling_phase].tasks.push_back(
TaskSnapshot(BirthOnThreadSnapshot(*death.first), death_data,
- thread_name()));
+ sanitized_thread_name()));
}
}
}
@@ -653,13 +737,7 @@ void ThreadData::SnapshotMaps(int profiling_phase,
for (const auto& death : death_map_) {
deaths->push_back(std::make_pair(
death.first,
- DeathDataPhaseSnapshot(profiling_phase, death.second.count(),
- death.second.run_duration_sum(),
- death.second.run_duration_max(),
- death.second.run_duration_sample(),
- death.second.queue_duration_sum(),
- death.second.queue_duration_max(),
- death.second.queue_duration_sample(),
+ DeathDataPhaseSnapshot(profiling_phase, death.second,
death.second.last_phase_snapshot())));
}
}
@@ -705,6 +783,14 @@ void ThreadData::EnsureTlsInitialization() {
// we get the lock earlier in this method.
base::subtle::Release_Store(&status_, kInitialStartupState);
DCHECK(base::subtle::NoBarrier_Load(&status_) != UNINITIALIZED);
+
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ // Make sure heap tracking is enabled ASAP if the default state is active.
+ if (kInitialStartupState == PROFILING_ACTIVE &&
+ !base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled()) {
+ base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
+ }
+#endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
}
// static
@@ -714,8 +800,14 @@ void ThreadData::InitializeAndSetTrackingStatus(Status status) {
EnsureTlsInitialization(); // No-op if already initialized.
- if (status > DEACTIVATED)
+ if (status > DEACTIVATED) {
status = PROFILING_ACTIVE;
+
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (!base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled())
+ base::debug::ThreadHeapUsageTracker::EnableHeapTracking();
+#endif // BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ }
base::subtle::Release_Store(&status_, status);
}
@@ -746,8 +838,6 @@ TrackedTime ThreadData::Now() {
// static
void ThreadData::EnsureCleanupWasCalled(int major_threads_shutdown_count) {
base::AutoLock lock(*list_lock_.Pointer());
- if (worker_thread_data_creation_count_ == 0)
- return; // We haven't really run much, and couldn't have leaked.
// TODO(jar): until this is working on XP, don't run the real test.
#if 0
@@ -772,16 +862,14 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
all_thread_data_list_head_ = NULL;
++incarnation_counter_;
// To be clean, break apart the retired worker list (though we leak them).
- while (first_retired_worker_) {
- ThreadData* worker = first_retired_worker_;
- CHECK_GT(worker->worker_thread_number_, 0);
- first_retired_worker_ = worker->next_retired_worker_;
- worker->next_retired_worker_ = NULL;
+ while (first_retired_thread_data_) {
+ ThreadData* thread_data = first_retired_thread_data_;
+ first_retired_thread_data_ = thread_data->next_retired_thread_data_;
+ thread_data->next_retired_thread_data_ = nullptr;
}
}
// Put most global static back in pristine shape.
- worker_thread_data_creation_count_ = 0;
cleanup_count_ = 0;
tls_index_.Set(NULL);
// Almost UNINITIALIZED.
@@ -813,6 +901,39 @@ void ThreadData::ShutdownSingleThreadedCleanup(bool leak) {
}
}
+// static
+ThreadData* ThreadData::GetRetiredOrCreateThreadData(
+ const std::string& sanitized_thread_name) {
+ SCOPED_UMA_HISTOGRAM_TIMER("TrackedObjects.GetRetiredOrCreateThreadData");
+
+ {
+ base::AutoLock lock(*list_lock_.Pointer());
+ ThreadData** pcursor = &first_retired_thread_data_;
+ ThreadData* cursor = first_retired_thread_data_;
+
+ // Assuming that there aren't more than a few tens of retired ThreadData
+ // instances, this lookup should be quick compared to the thread creation
+ // time. Retired ThreadData instances cannot be stored in a map because
+ // insertions are done from OnThreadTerminationCleanup() where allocations
+ // are not allowed.
+ //
+ // Note: Test processes may have more than a few tens of retired ThreadData
+ // instances.
+ while (cursor) {
+ if (cursor->sanitized_thread_name() == sanitized_thread_name) {
+ DCHECK_EQ(*pcursor, cursor);
+ *pcursor = cursor->next_retired_thread_data_;
+ cursor->next_retired_thread_data_ = nullptr;
+ return cursor;
+ }
+ pcursor = &cursor->next_retired_thread_data_;
+ cursor = cursor->next_retired_thread_data_;
+ }
+ }
+
+ return new ThreadData(sanitized_thread_name);
+}
+
//------------------------------------------------------------------------------
TaskStopwatch::TaskStopwatch()
: wallclock_duration_ms_(0),
@@ -823,6 +944,10 @@ TaskStopwatch::TaskStopwatch()
state_ = CREATED;
child_ = NULL;
#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ heap_tracking_enabled_ =
+ base::debug::ThreadHeapUsageTracker::IsHeapTrackingEnabled();
+#endif
}
TaskStopwatch::~TaskStopwatch() {
@@ -839,6 +964,10 @@ void TaskStopwatch::Start() {
#endif
start_time_ = ThreadData::Now();
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (heap_tracking_enabled_)
+ heap_usage_.Start();
+#endif
current_thread_data_ = ThreadData::Get();
if (!current_thread_data_)
@@ -862,6 +991,10 @@ void TaskStopwatch::Stop() {
state_ = STOPPED;
DCHECK(child_ == NULL);
#endif
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ if (heap_tracking_enabled_)
+ heap_usage_.Stop(true);
+#endif
if (!start_time_.is_null() && !end_time.is_null()) {
wallclock_duration_ms_ = (end_time - start_time_).InMilliseconds();
@@ -913,23 +1046,9 @@ ThreadData* TaskStopwatch::GetThreadData() const {
DeathDataPhaseSnapshot::DeathDataPhaseSnapshot(
int profiling_phase,
- int count,
- int32_t run_duration_sum,
- int32_t run_duration_max,
- int32_t run_duration_sample,
- int32_t queue_duration_sum,
- int32_t queue_duration_max,
- int32_t queue_duration_sample,
+ const DeathData& death,
const DeathDataPhaseSnapshot* prev)
- : profiling_phase(profiling_phase),
- death_data(count,
- run_duration_sum,
- run_duration_max,
- run_duration_sample,
- queue_duration_sum,
- queue_duration_max,
- queue_duration_sample),
- prev(prev) {}
+ : profiling_phase(profiling_phase), death_data(death), prev(prev) {}
//------------------------------------------------------------------------------
// TaskSnapshot
@@ -939,11 +1058,10 @@ TaskSnapshot::TaskSnapshot() {
TaskSnapshot::TaskSnapshot(const BirthOnThreadSnapshot& birth,
const DeathDataSnapshot& death_data,
- const std::string& death_thread_name)
+ const std::string& death_sanitized_thread_name)
: birth(birth),
death_data(death_data),
- death_thread_name(death_thread_name) {
-}
+ death_sanitized_thread_name(death_sanitized_thread_name) {}
TaskSnapshot::~TaskSnapshot() {
}
diff --git a/chromium/base/tracked_objects.h b/chromium/base/tracked_objects.h
index 7ef0317c39b..36caec3c6e4 100644
--- a/chromium/base/tracked_objects.h
+++ b/chromium/base/tracked_objects.h
@@ -14,9 +14,12 @@
#include <utility>
#include <vector>
+#include "base/allocator/features.h"
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/containers/hash_tables.h"
+#include "base/debug/debugging_flags.h"
+#include "base/debug/thread_heap_usage_tracker.h"
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
#include "base/location.h"
@@ -59,71 +62,76 @@ struct TrackingInfo;
// with great efficiency (i.e., copying of strings is never needed, and
// comparisons for equality can be based on pointer comparisons).
//
-// Next, a Births instance is created for use ONLY on the thread where this
-// instance was created. That Births instance records (in a base class
-// BirthOnThread) references to the static data provided in a Location instance,
-// as well as a pointer specifying the thread on which the birth takes place.
-// Hence there is at most one Births instance for each Location on each thread.
-// The derived Births class contains slots for recording statistics about all
-// instances born at the same location. Statistics currently include only the
-// count of instances constructed.
+// Next, a Births instance is constructed or found. A Births instance records
+// (in a base class BirthOnThread) references to the static data provided in a
+// Location instance, as well as a pointer to the ThreadData bound to the thread
+// on which the birth takes place (see discussion on ThreadData below). There is
+// at most one Births instance for each Location / ThreadData pair. The derived
+// Births class contains slots for recording statistics about all instances born
+// at the same location. Statistics currently include only the count of
+// instances constructed.
//
// Since the base class BirthOnThread contains only constant data, it can be
-// freely accessed by any thread at any time (i.e., only the statistic needs to
-// be handled carefully, and stats are updated exclusively on the birth thread).
+// freely accessed by any thread at any time. The statistics must be handled
+// more carefully; they are updated exclusively by the single thread to which
+// the ThreadData is bound at a given time.
//
// For Tasks, having now either constructed or found the Births instance
// described above, a pointer to the Births instance is then recorded into the
-// PendingTask structure in MessageLoop. This fact alone is very useful in
-// debugging, when there is a question of where an instance came from. In
-// addition, the birth time is also recorded and used to later evaluate the
-// lifetime duration of the whole Task. As a result of the above embedding, we
-// can find out a Task's location of birth, and thread of birth, without using
-// any locks, as all that data is constant across the life of the process.
+// PendingTask structure. This fact alone is very useful in debugging, when
+// there is a question of where an instance came from. In addition, the birth
+// time is also recorded and used to later evaluate the lifetime duration of the
+// whole Task. As a result of the above embedding, we can find out a Task's
+// location of birth, and name of birth thread, without using any locks, as all
+// that data is constant across the life of the process.
//
// The above work *could* also be done for any other object as well by calling
// TallyABirthIfActive() and TallyRunOnNamedThreadIfTracking() as appropriate.
//
-// The amount of memory used in the above data structures depends on how many
-// threads there are, and how many Locations of construction there are.
-// Fortunately, we don't use memory that is the product of those two counts, but
-// rather we only need one Births instance for each thread that constructs an
-// instance at a Location. In many cases, instances are only created on one
-// thread, so the memory utilization is actually fairly restrained.
+// The upper bound for the amount of memory used in the above data structures is
+// the product of the number of ThreadData instances and the number of
+// Locations. Fortunately, Locations are often created on a single thread and
+// the memory utilization is actually fairly restrained.
//
// Lastly, when an instance is deleted, the final tallies of statistics are
// carefully accumulated. That tallying writes into slots (members) in a
-// collection of DeathData instances. For each birth place Location that is
-// destroyed on a thread, there is a DeathData instance to record the additional
-// death count, as well as accumulate the run-time and queue-time durations for
-// the instance as it is destroyed (dies). By maintaining a single place to
-// aggregate this running sum *only* for the given thread, we avoid the need to
-// lock such DeathData instances. (i.e., these accumulated stats in a DeathData
-// instance are exclusively updated by the singular owning thread).
+// collection of DeathData instances. For each Births / death ThreadData pair,
+// there is a DeathData instance to record the additional death count, as well
+// as to accumulate the run-time and queue-time durations for the instance as it
+// is destroyed (dies). Since a ThreadData is bound to at most one thread at a
+// time, there is no need to lock such DeathData instances. (i.e., these
+// accumulated stats in a DeathData instance are exclusively updated by the
+// singular owning thread).
//
-// With the above life cycle description complete, the major remaining detail
-// is explaining how each thread maintains a list of DeathData instances, and
-// of Births instances, and is able to avoid additional (redundant/unnecessary)
-// allocations.
+// With the above life cycle description complete, the major remaining detail is
+// explaining how existing Births and DeathData instances are found to avoid
+// redundant allocations.
//
-// Each thread maintains a list of data items specific to that thread in a
-// ThreadData instance (for that specific thread only). The two critical items
-// are lists of DeathData and Births instances. These lists are maintained in
-// STL maps, which are indexed by Location. As noted earlier, we can compare
-// locations very efficiently as we consider the underlying data (file,
-// function, line) to be atoms, and hence pointer comparison is used rather than
-// (slow) string comparisons.
+// A ThreadData instance maintains maps of Births and DeathData instances. The
+// Births map is indexed by Location and the DeathData map is indexed by
+// Births*. As noted earlier, we can compare Locations very efficiently as we
+// consider the underlying data (file, function, line) to be atoms, and hence
+// pointer comparison is used rather than (slow) string comparisons.
//
-// To provide a mechanism for iterating over all "known threads," which means
-// threads that have recorded a birth or a death, we create a singly linked list
-// of ThreadData instances. Each such instance maintains a pointer to the next
-// one. A static member of ThreadData provides a pointer to the first item on
-// this global list, and access via that all_thread_data_list_head_ item
-// requires the use of the list_lock_.
-// When new ThreadData instances is added to the global list, it is pre-pended,
-// which ensures that any prior acquisition of the list is valid (i.e., the
-// holder can iterate over it without fear of it changing, or the necessity of
-// using an additional lock. Iterations are actually pretty rare (used
+// The first time that a thread calls ThreadData::InitializeThreadContext() or
+// ThreadData::Get(), a ThreadData instance is bound to it and stored in TLS. If
+// a ThreadData bound to a terminated thread with the same sanitized name (i.e.
+// name without trailing digits) as the current thread is available, it is
+// reused. Otherwise, a new ThreadData instance is instantiated. Since a
+// ThreadData is bound to at most one thread at a time, there is no need to
+// acquire a lock to access its maps. Over time, a ThreadData may be bound to
+// different threads that share the same sanitized name.
+//
+// We maintain a list of all ThreadData instances for the current process. Each
+// ThreadData instance has a pointer to the next one. A static member of
+// ThreadData provides a pointer to the first item on this global list, and
+// access via that all_thread_data_list_head_ item requires the use of the
+// list_lock_.
+//
+// When new ThreadData instances are added to the global list, they are pre-
+// pended, which ensures that any prior acquisition of the list is valid (i.e.,
+// the holder can iterate over it without fear of it changing, or the necessity
+// of using an additional lock. Iterations are actually pretty rare (used
// primarily for cleanup, or snapshotting data for display), so this lock has
// very little global performance impact.
//
@@ -170,12 +178,13 @@ struct TrackingInfo;
// memory reference).
//
// TODO(jar): We can implement a Snapshot system that *tries* to grab the
-// snapshots on the source threads *when* they have MessageLoops available
-// (worker threads don't have message loops generally, and hence gathering from
-// them will continue to be asynchronous). We had an implementation of this in
-// the past, but the difficulty is dealing with message loops being terminated.
-// We can *try* to spam the available threads via some task runner to
-// achieve this feat, and it *might* be valuable when we are collecting data
+// snapshots on the source threads *when* they have SingleThreadTaskRunners
+// available (worker threads don't have SingleThreadTaskRunners, and hence
+// gathering from them will continue to be asynchronous). We had an
+// implementation of this in the past, but the difficulty is dealing with
+// threads being terminated. We can *try* to post a task to threads that have a
+// SingleThreadTaskRunner and check if that succeeds (will fail if the thread
+// has been terminated). This *might* be valuable when we are collecting data
// for upload via UMA (where correctness of data may be more significant than
// for a single screen of about:profiler).
//
@@ -226,7 +235,7 @@ struct BASE_EXPORT BirthOnThreadSnapshot {
~BirthOnThreadSnapshot();
LocationSnapshot location;
- std::string thread_name;
+ std::string sanitized_thread_name;
};
//------------------------------------------------------------------------------
@@ -248,6 +257,8 @@ class BASE_EXPORT Births: public BirthOnThread {
DISALLOW_COPY_AND_ASSIGN(Births);
};
+class DeathData;
+
//------------------------------------------------------------------------------
// A "snapshotted" representation of the DeathData class.
@@ -265,7 +276,15 @@ struct BASE_EXPORT DeathDataSnapshot {
int32_t run_duration_sample,
int32_t queue_duration_sum,
int32_t queue_duration_max,
- int32_t queue_duration_sample);
+ int32_t queue_duration_sample,
+ int32_t alloc_ops,
+ int32_t free_ops,
+ int32_t allocated_bytes,
+ int32_t freed_bytes,
+ int32_t alloc_overhead_bytes,
+ int32_t max_allocated_bytes);
+ DeathDataSnapshot(const DeathData& death_data);
+ DeathDataSnapshot(const DeathDataSnapshot& other);
~DeathDataSnapshot();
// Calculates and returns the delta between this snapshot and an earlier
@@ -279,6 +298,13 @@ struct BASE_EXPORT DeathDataSnapshot {
int32_t queue_duration_sum;
int32_t queue_duration_max;
int32_t queue_duration_sample;
+
+ int32_t alloc_ops;
+ int32_t free_ops;
+ int32_t allocated_bytes;
+ int32_t freed_bytes;
+ int32_t alloc_overhead_bytes;
+ int32_t max_allocated_bytes;
};
//------------------------------------------------------------------------------
@@ -287,13 +313,7 @@ struct BASE_EXPORT DeathDataSnapshot {
struct DeathDataPhaseSnapshot {
DeathDataPhaseSnapshot(int profiling_phase,
- int count,
- int32_t run_duration_sum,
- int32_t run_duration_max,
- int32_t run_duration_sample,
- int32_t queue_duration_sum,
- int32_t queue_duration_max,
- int32_t queue_duration_sample,
+ const DeathData& death_data,
const DeathDataPhaseSnapshot* prev);
// Profiling phase at which completion this snapshot was taken.
@@ -326,9 +346,26 @@ class BASE_EXPORT DeathData {
// Update stats for a task destruction (death) that had a Run() time of
// |duration|, and has had a queueing delay of |queue_duration|.
- void RecordDeath(const int32_t queue_duration,
- const int32_t run_duration,
- const uint32_t random_number);
+ void RecordDurations(const int32_t queue_duration,
+ const int32_t run_duration,
+ const uint32_t random_number);
+
+ // Update stats for a task destruction that performed |alloc_ops|
+ // allocations, |free_ops| frees, allocated |allocated_bytes| bytes, freed
+ // |freed_bytes|, where an estimated |alloc_overhead_bytes| went to heap
+ // overhead, and where at most |max_allocated_bytes| were outstanding at any
+ // one time.
+ // Note that |alloc_overhead_bytes|/|alloc_ops| yields the average estimated
+ // heap overhead of allocations in the task, and |allocated_bytes|/|alloc_ops|
+ // yields the average size of allocation.
+ // Note also that |allocated_bytes|-|freed_bytes| yields the net heap memory
+ // usage of the task, which can be negative.
+ void RecordAllocations(const uint32_t alloc_ops,
+ const uint32_t free_ops,
+ const uint32_t allocated_bytes,
+ const uint32_t freed_bytes,
+ const uint32_t alloc_overhead_bytes,
+ const uint32_t max_allocated_bytes);
// Metrics and past snapshots accessors, used only for serialization and in
// tests.
@@ -351,6 +388,22 @@ class BASE_EXPORT DeathData {
int32_t queue_duration_sample() const {
return base::subtle::NoBarrier_Load(&queue_duration_sample_);
}
+ int32_t alloc_ops() const {
+ return base::subtle::NoBarrier_Load(&alloc_ops_);
+ }
+ int32_t free_ops() const { return base::subtle::NoBarrier_Load(&free_ops_); }
+ int32_t allocated_bytes() const {
+ return base::subtle::NoBarrier_Load(&allocated_bytes_);
+ }
+ int32_t freed_bytes() const {
+ return base::subtle::NoBarrier_Load(&freed_bytes_);
+ }
+ int32_t alloc_overhead_bytes() const {
+ return base::subtle::NoBarrier_Load(&alloc_overhead_bytes_);
+ }
+ int32_t max_allocated_bytes() const {
+ return base::subtle::NoBarrier_Load(&max_allocated_bytes_);
+ }
const DeathDataPhaseSnapshot* last_phase_snapshot() const {
return last_phase_snapshot_;
}
@@ -361,6 +414,12 @@ class BASE_EXPORT DeathData {
void OnProfilingPhaseCompleted(int profiling_phase);
private:
+ // A saturating addition operation for member variables. This elides the
+ // use of atomic-primitive reads for members that are only written on the
+ // owning thread.
+ static void SaturatingMemberAdd(const uint32_t addend,
+ base::subtle::Atomic32* sum);
+
// Members are ordered from most regularly read and updated, to least
// frequently used. This might help a bit with cache lines.
// Number of runs seen (divisor for calculating averages).
@@ -383,6 +442,24 @@ class BASE_EXPORT DeathData {
// snapshot thread.
base::subtle::Atomic32 run_duration_max_;
base::subtle::Atomic32 queue_duration_max_;
+
+ // The cumulative number of allocation and free operations.
+ base::subtle::Atomic32 alloc_ops_;
+ base::subtle::Atomic32 free_ops_;
+
+ // The number of bytes allocated by the task.
+ base::subtle::Atomic32 allocated_bytes_;
+
+ // The number of bytes freed by the task.
+ base::subtle::Atomic32 freed_bytes_;
+
+ // The cumulative number of overhead bytes. Where available this yields an
+ // estimate of the heap overhead for allocations.
+ base::subtle::Atomic32 alloc_overhead_bytes_;
+
+ // The high-watermark for the number of outstanding heap allocated bytes.
+ base::subtle::Atomic32 max_allocated_bytes_;
+
// Samples, used by crowd sourcing gatherers. These are almost never read,
// and rarely updated. They can be modified only on the death thread.
base::subtle::Atomic32 run_duration_sample_;
@@ -407,14 +484,14 @@ struct BASE_EXPORT TaskSnapshot {
TaskSnapshot();
TaskSnapshot(const BirthOnThreadSnapshot& birth,
const DeathDataSnapshot& death_data,
- const std::string& death_thread_name);
+ const std::string& death_sanitized_thread_name);
~TaskSnapshot();
BirthOnThreadSnapshot birth;
// Delta between death data for a thread for a certain profiling phase and the
// snapshot for the pervious phase, if any. Otherwise, just a snapshot.
DeathDataSnapshot death_data;
- std::string death_thread_name;
+ std::string death_sanitized_thread_name;
};
//------------------------------------------------------------------------------
@@ -450,9 +527,8 @@ class BASE_EXPORT ThreadData {
// Initialize the current thread context with a new instance of ThreadData.
// This is used by all threads that have names, and should be explicitly
- // set *before* any births on the threads have taken place. It is generally
- // only used by the message loop, which has a well defined thread name.
- static void InitializeThreadContext(const std::string& suggested_name);
+ // set *before* any births on the threads have taken place.
+ static void InitializeThreadContext(const std::string& thread_name);
// Using Thread Local Store, find the current instance for collecting data.
// If an instance does not exist, construct one (and remember it for use on
@@ -510,7 +586,9 @@ class BASE_EXPORT ThreadData {
static void TallyRunInAScopedRegionIfTracking(const Births* births,
const TaskStopwatch& stopwatch);
- const std::string& thread_name() const { return thread_name_; }
+ const std::string& sanitized_thread_name() const {
+ return sanitized_thread_name_;
+ }
// Initializes all statics if needed (this initialization call should be made
// while we are single threaded).
@@ -559,12 +637,7 @@ class BASE_EXPORT ThreadData {
typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
DeathsSnapshot;
- // Worker thread construction creates a name since there is none.
- explicit ThreadData(int thread_number);
-
- // Message loop based construction should provide a name.
- explicit ThreadData(const std::string& suggested_name);
-
+ explicit ThreadData(const std::string& sanitized_thread_name);
~ThreadData();
// Push this instance to the head of all_thread_data_list_head_, linking it to
@@ -628,6 +701,12 @@ class BASE_EXPORT ThreadData {
// ThreadData instances.
static void ShutdownSingleThreadedCleanup(bool leak);
+ // Returns a ThreadData instance for a thread whose sanitized name is
+ // |sanitized_thread_name|. The returned instance may have been extracted from
+ // the list of retired ThreadData instances or newly allocated.
+ static ThreadData* GetRetiredOrCreateThreadData(
+ const std::string& sanitized_thread_name);
+
// When non-null, this specifies an external function that supplies monotone
// increasing time functcion.
static NowFunction* now_function_for_testing_;
@@ -635,22 +714,16 @@ class BASE_EXPORT ThreadData {
// We use thread local store to identify which ThreadData to interact with.
static base::ThreadLocalStorage::StaticSlot tls_index_;
- // List of ThreadData instances for use with worker threads. When a worker
- // thread is done (terminated), we push it onto this list. When a new worker
- // thread is created, we first try to re-use a ThreadData instance from the
- // list, and if none are available, construct a new one.
- // This is only accessed while list_lock_ is held.
- static ThreadData* first_retired_worker_;
+ // Linked list of ThreadData instances that were associated with threads that
+ // have been terminated and that have not been associated with a new thread
+ // since then. This is only accessed while |list_lock_| is held.
+ static ThreadData* first_retired_thread_data_;
// Link to the most recently created instance (starts a null terminated list).
// The list is traversed by about:profiler when it needs to snapshot data.
// This is only accessed while list_lock_ is held.
static ThreadData* all_thread_data_list_head_;
- // The next available worker thread number. This should only be accessed when
- // the list_lock_ is held.
- static int worker_thread_data_creation_count_;
-
// The number of times TLS has called us back to cleanup a ThreadData
// instance. This is only accessed while list_lock_ is held.
static int cleanup_count_;
@@ -671,23 +744,16 @@ class BASE_EXPORT ThreadData {
// Link to next instance (null terminated list). Used to globally track all
// registered instances (corresponds to all registered threads where we keep
- // data).
+ // data). Only modified in the constructor.
ThreadData* next_;
- // Pointer to another ThreadData instance for a Worker-Thread that has been
- // retired (its thread was terminated). This value is non-NULL only for a
- // retired ThreadData associated with a Worker-Thread.
- ThreadData* next_retired_worker_;
-
- // The name of the thread that is being recorded. If this thread has no
- // message_loop, then this is a worker thread, with a sequence number postfix.
- std::string thread_name_;
+ // Pointer to another retired ThreadData instance. This value is nullptr if
+ // this is associated with an active thread.
+ ThreadData* next_retired_thread_data_;
- // Indicate if this is a worker thread, and the ThreadData contexts should be
- // stored in the unregistered_thread_data_pool_ when not in use.
- // Value is zero when it is not a worker thread. Value is a positive integer
- // corresponding to the created thread name if it is a worker thread.
- int worker_thread_number_;
+ // The name of the thread that is being recorded, with all trailing digits
+ // replaced with a single "*" character.
+ const std::string sanitized_thread_name_;
// A map used on each thread to keep track of Births on this thread.
// This map should only be accessed on the thread it was constructed on.
@@ -755,6 +821,13 @@ class BASE_EXPORT TaskStopwatch {
// this thread during that period.
int32_t RunDurationMs() const;
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ const base::debug::ThreadHeapUsageTracker& heap_usage() const {
+ return heap_usage_;
+ }
+ bool heap_tracking_enabled() const { return heap_tracking_enabled_; }
+#endif
+
// Returns tracking info for the current thread.
ThreadData* GetThreadData() const;
@@ -762,6 +835,11 @@ class BASE_EXPORT TaskStopwatch {
// Time when the stopwatch was started.
TrackedTime start_time_;
+#if BUILDFLAG(ENABLE_MEMORY_TASK_PROFILER)
+ base::debug::ThreadHeapUsageTracker heap_usage_;
+ bool heap_tracking_enabled_;
+#endif
+
// Wallclock duration of the task.
int32_t wallclock_duration_ms_;
diff --git a/chromium/base/tracked_objects_unittest.cc b/chromium/base/tracked_objects_unittest.cc
index 70d9601cd0f..f208e3c9818 100644
--- a/chromium/base/tracked_objects_unittest.cc
+++ b/chromium/base/tracked_objects_unittest.cc
@@ -11,17 +11,27 @@
#include <memory>
+#include "base/macros.h"
#include "base/process/process_handle.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
#include "base/time/time.h"
#include "base/tracking_info.h"
#include "testing/gtest/include/gtest/gtest.h"
const int kLineNumber = 1776;
const char kFile[] = "FixedUnitTestFileName";
-const char kWorkerThreadName[] = "WorkerThread-1";
+const char kWorkerThreadName[] = "WorkerThread-*";
const char kMainThreadName[] = "SomeMainThreadName";
const char kStillAlive[] = "Still_Alive";
+const int32_t kAllocOps = 23;
+const int32_t kFreeOps = 27;
+const int32_t kAllocatedBytes = 59934;
+const int32_t kFreedBytes = 2 * kAllocatedBytes;
+const int32_t kAllocOverheadBytes = kAllocOps * 8;
+const int32_t kMaxAllocatedBytes = kAllocatedBytes / 2;
+
namespace tracked_objects {
class TrackedObjectsTest : public testing::Test {
@@ -85,7 +95,8 @@ class TrackedObjectsTest : public testing::Test {
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(birth_thread, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(birth_thread,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(count, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(count * run_ms,
@@ -100,7 +111,8 @@ class TrackedObjectsTest : public testing::Test {
EXPECT_EQ(queue_ms,
process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(death_thread, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(death_thread,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -108,6 +120,16 @@ class TrackedObjectsTest : public testing::Test {
// Sets time that will be returned by ThreadData::Now().
static void SetTestTime(unsigned int test_time) { test_time_ = test_time; }
+ int GetNumThreadData() {
+ int num_thread_data = 0;
+ ThreadData* current = ThreadData::first();
+ while (current) {
+ ++num_thread_data;
+ current = current->next();
+ }
+ return num_thread_data;
+ }
+
private:
// Returns test time in milliseconds.
static unsigned int GetTestTime() { return test_time_; }
@@ -223,7 +245,8 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
process_data_phase.tasks[0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(kWorkerThreadName,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(time_elapsed,
process_data_phase.tasks[0].death_data.run_duration_sum);
@@ -234,10 +257,11 @@ TEST_F(TrackedObjectsTest, TinyStartupShutdown) {
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(0, process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kWorkerThreadName, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(kWorkerThreadName,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
}
-TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
+TEST_F(TrackedObjectsTest, DeathDataTestRecordDurations) {
ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
std::unique_ptr<DeathData> data(new DeathData());
@@ -255,7 +279,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
int32_t queue_ms = 8;
const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms);
EXPECT_EQ(data->run_duration_max(), run_ms);
EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -265,7 +289,7 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
EXPECT_EQ(data->count(), 1);
EXPECT_EQ(nullptr, data->last_phase_snapshot());
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
EXPECT_EQ(data->run_duration_max(), run_ms);
EXPECT_EQ(data->run_duration_sample(), run_ms);
@@ -276,18 +300,77 @@ TEST_F(TrackedObjectsTest, DeathDataTestRecordDeath) {
EXPECT_EQ(nullptr, data->last_phase_snapshot());
}
+TEST_F(TrackedObjectsTest, DeathDataTestRecordAllocations) {
+ ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
+
+ std::unique_ptr<DeathData> data(new DeathData());
+ ASSERT_NE(data, nullptr);
+
+ EXPECT_EQ(data->alloc_ops(), 0);
+ EXPECT_EQ(data->free_ops(), 0);
+ EXPECT_EQ(data->allocated_bytes(), 0);
+ EXPECT_EQ(data->freed_bytes(), 0);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 0);
+ EXPECT_EQ(data->max_allocated_bytes(), 0);
+
+ EXPECT_EQ(nullptr, data->last_phase_snapshot());
+
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), kAllocOps);
+ EXPECT_EQ(data->free_ops(), kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
+ // Record another batch, with a smaller max.
+ const int32_t kSmallerMaxAllocatedBytes = kMaxAllocatedBytes / 2;
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kSmallerMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
+ // Now with a larger max.
+ const int32_t kLargerMaxAllocatedBytes = kMaxAllocatedBytes * 2;
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kLargerMaxAllocatedBytes);
+ EXPECT_EQ(data->alloc_ops(), 3 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 3 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 3 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 3 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 3 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kLargerMaxAllocatedBytes);
+
+ // Saturate everything.
+ data->RecordAllocations(INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX, INT_MAX);
+ EXPECT_EQ(data->alloc_ops(), INT_MAX);
+ EXPECT_EQ(data->free_ops(), INT_MAX);
+ EXPECT_EQ(data->allocated_bytes(), INT_MAX);
+ EXPECT_EQ(data->freed_bytes(), INT_MAX);
+ EXPECT_EQ(data->alloc_overhead_bytes(), INT_MAX);
+ EXPECT_EQ(data->max_allocated_bytes(), INT_MAX);
+}
+
TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
ThreadData::InitializeAndSetTrackingStatus(ThreadData::PROFILING_ACTIVE);
std::unique_ptr<DeathData> data(new DeathData());
ASSERT_NE(data, nullptr);
- int32_t run_ms = 42;
- int32_t queue_ms = 8;
+ const int32_t run_ms = 42;
+ const int32_t queue_ms = 8;
const int kUnrandomInt = 0; // Fake random int that ensure we sample data.
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
- data->RecordDeath(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+ data->RecordDurations(queue_ms, run_ms, kUnrandomInt);
+
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
data->OnProfilingPhaseCompleted(123);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms);
@@ -297,6 +380,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
EXPECT_EQ(data->queue_duration_max(), 0);
EXPECT_EQ(data->queue_duration_sample(), queue_ms);
EXPECT_EQ(data->count(), 2);
+
+ EXPECT_EQ(data->alloc_ops(), kAllocOps);
+ EXPECT_EQ(data->free_ops(), kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
ASSERT_NE(nullptr, data->last_phase_snapshot());
EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -311,12 +402,26 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
data->last_phase_snapshot()->death_data.queue_duration_max);
EXPECT_EQ(queue_ms,
data->last_phase_snapshot()->death_data.queue_duration_sample);
+
+ EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
+ EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
+ EXPECT_EQ(kAllocatedBytes,
+ data->last_phase_snapshot()->death_data.allocated_bytes);
+ EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
+ EXPECT_EQ(kAllocOverheadBytes,
+ data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
+ EXPECT_EQ(kMaxAllocatedBytes,
+ data->last_phase_snapshot()->death_data.max_allocated_bytes);
+
EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
- int32_t run_ms1 = 21;
- int32_t queue_ms1 = 4;
+ const int32_t run_ms1 = 21;
+ const int32_t queue_ms1 = 4;
+
+ data->RecordDurations(queue_ms1, run_ms1, kUnrandomInt);
+ data->RecordAllocations(kAllocOps, kFreeOps, kAllocatedBytes, kFreedBytes,
+ kAllocOverheadBytes, kMaxAllocatedBytes);
- data->RecordDeath(queue_ms1, run_ms1, kUnrandomInt);
EXPECT_EQ(data->run_duration_sum(), run_ms + run_ms + run_ms1);
EXPECT_EQ(data->run_duration_max(), run_ms1);
EXPECT_EQ(data->run_duration_sample(), run_ms1);
@@ -324,6 +429,14 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
EXPECT_EQ(data->queue_duration_max(), queue_ms1);
EXPECT_EQ(data->queue_duration_sample(), queue_ms1);
EXPECT_EQ(data->count(), 3);
+
+ EXPECT_EQ(data->alloc_ops(), 2 * kAllocOps);
+ EXPECT_EQ(data->free_ops(), 2 * kFreeOps);
+ EXPECT_EQ(data->allocated_bytes(), 2 * kAllocatedBytes);
+ EXPECT_EQ(data->freed_bytes(), 2 * kFreedBytes);
+ EXPECT_EQ(data->alloc_overhead_bytes(), 2 * kAllocOverheadBytes);
+ EXPECT_EQ(data->max_allocated_bytes(), kMaxAllocatedBytes);
+
ASSERT_NE(nullptr, data->last_phase_snapshot());
EXPECT_EQ(123, data->last_phase_snapshot()->profiling_phase);
EXPECT_EQ(2, data->last_phase_snapshot()->death_data.count);
@@ -338,6 +451,17 @@ TEST_F(TrackedObjectsTest, DeathDataTest2Phases) {
data->last_phase_snapshot()->death_data.queue_duration_max);
EXPECT_EQ(queue_ms,
data->last_phase_snapshot()->death_data.queue_duration_sample);
+
+ EXPECT_EQ(kAllocOps, data->last_phase_snapshot()->death_data.alloc_ops);
+ EXPECT_EQ(kFreeOps, data->last_phase_snapshot()->death_data.free_ops);
+ EXPECT_EQ(kAllocatedBytes,
+ data->last_phase_snapshot()->death_data.allocated_bytes);
+ EXPECT_EQ(kFreedBytes, data->last_phase_snapshot()->death_data.freed_bytes);
+ EXPECT_EQ(kAllocOverheadBytes,
+ data->last_phase_snapshot()->death_data.alloc_overhead_bytes);
+ EXPECT_EQ(kMaxAllocatedBytes,
+ data->last_phase_snapshot()->death_data.max_allocated_bytes);
+
EXPECT_EQ(nullptr, data->last_phase_snapshot()->prev);
}
@@ -353,6 +477,13 @@ TEST_F(TrackedObjectsTest, Delta) {
snapshot.queue_duration_max = 101;
snapshot.queue_duration_sample = 26;
+ snapshot.alloc_ops = 95;
+ snapshot.free_ops = 90;
+ snapshot.allocated_bytes = 10240;
+ snapshot.freed_bytes = 4096;
+ snapshot.alloc_overhead_bytes = 950;
+ snapshot.max_allocated_bytes = 10240;
+
DeathDataSnapshot older_snapshot;
older_snapshot.count = 2;
older_snapshot.run_duration_sum = 95;
@@ -362,6 +493,13 @@ TEST_F(TrackedObjectsTest, Delta) {
older_snapshot.queue_duration_max = 99;
older_snapshot.queue_duration_sample = 21;
+ older_snapshot.alloc_ops = 45;
+ older_snapshot.free_ops = 40;
+ older_snapshot.allocated_bytes = 4096;
+ older_snapshot.freed_bytes = 2048;
+ older_snapshot.alloc_overhead_bytes = 450;
+ older_snapshot.max_allocated_bytes = 10200;
+
const DeathDataSnapshot& delta = snapshot.Delta(older_snapshot);
EXPECT_EQ(8, delta.count);
EXPECT_EQ(5, delta.run_duration_sum);
@@ -370,6 +508,13 @@ TEST_F(TrackedObjectsTest, Delta) {
EXPECT_EQ(10, delta.queue_duration_sum);
EXPECT_EQ(101, delta.queue_duration_max);
EXPECT_EQ(26, delta.queue_duration_sample);
+
+ EXPECT_EQ(50, delta.alloc_ops);
+ EXPECT_EQ(50, delta.free_ops);
+ EXPECT_EQ(6144, delta.allocated_bytes);
+ EXPECT_EQ(2048, delta.freed_bytes);
+ EXPECT_EQ(500, delta.alloc_overhead_bytes);
+ EXPECT_EQ(10240, delta.max_allocated_bytes);
}
TEST_F(TrackedObjectsTest, DeactivatedBirthOnlyToSnapshotWorkerThread) {
@@ -531,7 +676,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -541,7 +687,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -555,7 +702,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(10, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -565,7 +713,8 @@ TEST_F(TrackedObjectsTest, TwoPhases) {
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -648,7 +797,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(6, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -658,7 +808,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(7, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -672,7 +823,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -682,7 +834,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(5, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
auto it2 = process_data.phased_snapshots.find(2);
ASSERT_TRUE(it2 != process_data.phased_snapshots.end());
@@ -696,7 +849,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(kLineNumber,
process_data_phase2.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase2.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase2.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase2.tasks[0].death_data.run_duration_sum);
@@ -706,7 +860,8 @@ TEST_F(TrackedObjectsTest, ThreePhases) {
EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(3, process_data_phase2.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase2.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase2.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -753,7 +908,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesSecondEmpty) {
EXPECT_EQ(kLineNumber,
process_data_phase0.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase0.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase0.tasks[0].death_data.run_duration_sum);
@@ -763,7 +919,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesSecondEmpty) {
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase0.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase0.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase0.tasks[0].death_sanitized_thread_name);
auto it1 = process_data.phased_snapshots.find(1);
ASSERT_TRUE(it1 != process_data.phased_snapshots.end());
@@ -816,7 +973,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesFirstEmpty) {
EXPECT_EQ(kLineNumber,
process_data_phase1.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase1.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase1.tasks[0].death_data.run_duration_sum);
@@ -826,7 +984,8 @@ TEST_F(TrackedObjectsTest, TwoPhasesFirstEmpty) {
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase1.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase1.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase1.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -993,7 +1152,8 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
process_data_phase.tasks[0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[0].death_data.count);
EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_sum);
EXPECT_EQ(2, process_data_phase.tasks[0].death_data.run_duration_max);
@@ -1001,13 +1161,15 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sum);
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase.tasks[0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[0].death_sanitized_thread_name);
EXPECT_EQ(kFile, process_data_phase.tasks[1].birth.location.file_name);
EXPECT_EQ(kFunction,
process_data_phase.tasks[1].birth.location.function_name);
EXPECT_EQ(kSecondFakeLineNumber,
process_data_phase.tasks[1].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[1].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[1].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[1].death_data.count);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.run_duration_max);
@@ -1015,7 +1177,8 @@ TEST_F(TrackedObjectsTest, DifferentLives) {
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sum);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_max);
EXPECT_EQ(0, process_data_phase.tasks[1].death_data.queue_duration_sample);
- EXPECT_EQ(kStillAlive, process_data_phase.tasks[1].death_thread_name);
+ EXPECT_EQ(kStillAlive,
+ process_data_phase.tasks[1].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
@@ -1158,7 +1321,8 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
process_data_phase.tasks[t0].birth.location.function_name);
EXPECT_EQ(kLineNumber,
process_data_phase.tasks[t0].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t0].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[t0].death_data.count);
EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_sum);
EXPECT_EQ(6, process_data_phase.tasks[t0].death_data.run_duration_max);
@@ -1166,13 +1330,15 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sum);
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_max);
EXPECT_EQ(4, process_data_phase.tasks[t0].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t0].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t0].death_sanitized_thread_name);
EXPECT_EQ(kFile, process_data_phase.tasks[t1].birth.location.file_name);
EXPECT_EQ(kFunction,
process_data_phase.tasks[t1].birth.location.function_name);
EXPECT_EQ(kSecondFakeLineNumber,
process_data_phase.tasks[t1].birth.location.line_number);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].birth.thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t1].birth.sanitized_thread_name);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.count);
EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_sum);
EXPECT_EQ(2, process_data_phase.tasks[t1].death_data.run_duration_max);
@@ -1180,8 +1346,30 @@ TEST_F(TrackedObjectsTest, TaskWithNestedExclusionWithNestedTask) {
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sum);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_max);
EXPECT_EQ(1, process_data_phase.tasks[t1].death_data.queue_duration_sample);
- EXPECT_EQ(kMainThreadName, process_data_phase.tasks[t1].death_thread_name);
+ EXPECT_EQ(kMainThreadName,
+ process_data_phase.tasks[t1].death_sanitized_thread_name);
EXPECT_EQ(base::GetCurrentProcId(), process_data.process_id);
}
+// Repetitively create and stop named threads. Verify that the number of
+// instantiated ThreadData instance is equal to the number of different
+// sanitized thread names used in the test.
+TEST_F(TrackedObjectsTest, ReuseRetiredThreadData) {
+ const char* const kThreadNames[] = {"Foo%d", "Bar%d", "123Dummy%d",
+ "456Dummy%d", "%d"};
+ constexpr int kNumIterations = 10;
+ EXPECT_EQ(0, GetNumThreadData());
+
+ for (int i = 0; i < kNumIterations; ++i) {
+ for (const char* thread_name : kThreadNames) {
+ base::Thread thread(base::StringPrintf(thread_name, i));
+ EXPECT_TRUE(thread.Start());
+ }
+ }
+
+ // Expect one ThreadData instance for each element in |kThreadNames| and one
+ // ThreadData instance for the main thread.
+ EXPECT_EQ(static_cast<int>(arraysize(kThreadNames) + 1), GetNumThreadData());
+}
+
} // namespace tracked_objects
diff --git a/chromium/base/tuple.h b/chromium/base/tuple.h
index 9f62339f0d5..34fd789976f 100644
--- a/chromium/base/tuple.h
+++ b/chromium/base/tuple.h
@@ -28,7 +28,6 @@
#include <stddef.h>
#include <tuple>
-#include "base/bind_helpers.h"
#include "build/build_config.h"
namespace base {
@@ -43,56 +42,6 @@ struct IndexSequence {};
template <size_t... Ns>
struct MakeIndexSequenceImpl;
-#if defined(_PREFAST_) && defined(OS_WIN)
-
-// Work around VC++ 2013 /analyze internal compiler error:
-// https://connect.microsoft.com/VisualStudio/feedback/details/1053626
-
-template <> struct MakeIndexSequenceImpl<0> {
- using Type = IndexSequence<>;
-};
-template <> struct MakeIndexSequenceImpl<1> {
- using Type = IndexSequence<0>;
-};
-template <> struct MakeIndexSequenceImpl<2> {
- using Type = IndexSequence<0,1>;
-};
-template <> struct MakeIndexSequenceImpl<3> {
- using Type = IndexSequence<0,1,2>;
-};
-template <> struct MakeIndexSequenceImpl<4> {
- using Type = IndexSequence<0,1,2,3>;
-};
-template <> struct MakeIndexSequenceImpl<5> {
- using Type = IndexSequence<0,1,2,3,4>;
-};
-template <> struct MakeIndexSequenceImpl<6> {
- using Type = IndexSequence<0,1,2,3,4,5>;
-};
-template <> struct MakeIndexSequenceImpl<7> {
- using Type = IndexSequence<0,1,2,3,4,5,6>;
-};
-template <> struct MakeIndexSequenceImpl<8> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7>;
-};
-template <> struct MakeIndexSequenceImpl<9> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8>;
-};
-template <> struct MakeIndexSequenceImpl<10> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9>;
-};
-template <> struct MakeIndexSequenceImpl<11> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10>;
-};
-template <> struct MakeIndexSequenceImpl<12> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11>;
-};
-template <> struct MakeIndexSequenceImpl<13> {
- using Type = IndexSequence<0,1,2,3,4,5,6,7,8,9,10,11,12>;
-};
-
-#else // defined(OS_WIN) && defined(_PREFAST_)
-
template <size_t... Ns>
struct MakeIndexSequenceImpl<0, Ns...> {
using Type = IndexSequence<Ns...>;
@@ -102,8 +51,6 @@ template <size_t N, size_t... Ns>
struct MakeIndexSequenceImpl<N, Ns...>
: MakeIndexSequenceImpl<N - 1, N - 1, Ns...> {};
-#endif // defined(OS_WIN) && defined(_PREFAST_)
-
// std::get() in <=libstdc++-4.6 returns an lvalue-reference for
// rvalue-reference of a tuple, where an rvalue-reference is expected.
template <size_t I, typename... Ts>
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index f00a03f8d30..ca3692decca 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -23,7 +23,8 @@ namespace {
const char* const kTypeNames[] = {"null", "boolean", "integer", "double",
"string", "binary", "dictionary", "list"};
-static_assert(arraysize(kTypeNames) == Value::TYPE_LIST + 1,
+static_assert(arraysize(kTypeNames) ==
+ static_cast<size_t>(Value::Type::LIST) + 1,
"kTypeNames Has Wrong Size");
std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node);
@@ -60,10 +61,10 @@ std::unique_ptr<DictionaryValue> CopyDictionaryWithoutEmptyChildren(
std::unique_ptr<Value> CopyWithoutEmptyChildren(const Value& node) {
switch (node.GetType()) {
- case Value::TYPE_LIST:
+ case Value::Type::LIST:
return CopyListWithoutEmptyChildren(static_cast<const ListValue&>(node));
- case Value::TYPE_DICTIONARY:
+ case Value::Type::DICTIONARY:
return CopyDictionaryWithoutEmptyChildren(
static_cast<const DictionaryValue&>(node));
@@ -79,14 +80,14 @@ Value::~Value() {
// static
std::unique_ptr<Value> Value::CreateNullValue() {
- return WrapUnique(new Value(TYPE_NULL));
+ return WrapUnique(new Value(Type::NONE));
}
// static
const char* Value::GetTypeName(Value::Type type) {
- DCHECK_GE(type, 0);
+ DCHECK_GE(static_cast<int>(type), 0);
DCHECK_LT(static_cast<size_t>(type), arraysize(kTypeNames));
- return kTypeNames[type];
+ return kTypeNames[static_cast<size_t>(type)];
}
bool Value::GetAsBinary(const BinaryValue** out_value) const {
@@ -117,6 +118,10 @@ bool Value::GetAsString(const StringValue** out_value) const {
return false;
}
+bool Value::GetAsString(StringPiece* out_value) const {
+ return false;
+}
+
bool Value::GetAsList(ListValue** out_value) {
return false;
}
@@ -136,7 +141,7 @@ bool Value::GetAsDictionary(const DictionaryValue** out_value) const {
Value* Value::DeepCopy() const {
// This method should only be getting called for null Values--all subclasses
// need to provide their own implementation;.
- DCHECK(IsType(TYPE_NULL));
+ DCHECK(IsType(Type::NONE));
return CreateNullValue().release();
}
@@ -147,8 +152,8 @@ std::unique_ptr<Value> Value::CreateDeepCopy() const {
bool Value::Equals(const Value* other) const {
// This method should only be getting called for null Values--all subclasses
// need to provide their own implementation;.
- DCHECK(IsType(TYPE_NULL));
- return other->IsType(TYPE_NULL);
+ DCHECK(IsType(Type::NONE));
+ return other->IsType(Type::NONE);
}
// static
@@ -170,15 +175,13 @@ Value& Value::operator=(const Value& that) {
///////////////////// FundamentalValue ////////////////////
FundamentalValue::FundamentalValue(bool in_value)
- : Value(TYPE_BOOLEAN), boolean_value_(in_value) {
-}
+ : Value(Type::BOOLEAN), boolean_value_(in_value) {}
FundamentalValue::FundamentalValue(int in_value)
- : Value(TYPE_INTEGER), integer_value_(in_value) {
-}
+ : Value(Type::INTEGER), integer_value_(in_value) {}
FundamentalValue::FundamentalValue(double in_value)
- : Value(TYPE_DOUBLE), double_value_(in_value) {
+ : Value(Type::DOUBLE), double_value_(in_value) {
if (!std::isfinite(double_value_)) {
NOTREACHED() << "Non-finite (i.e. NaN or positive/negative infinity) "
<< "values cannot be represented in JSON";
@@ -190,34 +193,34 @@ FundamentalValue::~FundamentalValue() {
}
bool FundamentalValue::GetAsBoolean(bool* out_value) const {
- if (out_value && IsType(TYPE_BOOLEAN))
+ if (out_value && IsType(Type::BOOLEAN))
*out_value = boolean_value_;
- return (IsType(TYPE_BOOLEAN));
+ return (IsType(Type::BOOLEAN));
}
bool FundamentalValue::GetAsInteger(int* out_value) const {
- if (out_value && IsType(TYPE_INTEGER))
+ if (out_value && IsType(Type::INTEGER))
*out_value = integer_value_;
- return (IsType(TYPE_INTEGER));
+ return (IsType(Type::INTEGER));
}
bool FundamentalValue::GetAsDouble(double* out_value) const {
- if (out_value && IsType(TYPE_DOUBLE))
+ if (out_value && IsType(Type::DOUBLE))
*out_value = double_value_;
- else if (out_value && IsType(TYPE_INTEGER))
+ else if (out_value && IsType(Type::INTEGER))
*out_value = integer_value_;
- return (IsType(TYPE_DOUBLE) || IsType(TYPE_INTEGER));
+ return (IsType(Type::DOUBLE) || IsType(Type::INTEGER));
}
FundamentalValue* FundamentalValue::DeepCopy() const {
switch (GetType()) {
- case TYPE_BOOLEAN:
+ case Type::BOOLEAN:
return new FundamentalValue(boolean_value_);
- case TYPE_INTEGER:
+ case Type::INTEGER:
return new FundamentalValue(integer_value_);
- case TYPE_DOUBLE:
+ case Type::DOUBLE:
return new FundamentalValue(double_value_);
default:
@@ -231,15 +234,15 @@ bool FundamentalValue::Equals(const Value* other) const {
return false;
switch (GetType()) {
- case TYPE_BOOLEAN: {
+ case Type::BOOLEAN: {
bool lhs, rhs;
return GetAsBoolean(&lhs) && other->GetAsBoolean(&rhs) && lhs == rhs;
}
- case TYPE_INTEGER: {
+ case Type::INTEGER: {
int lhs, rhs;
return GetAsInteger(&lhs) && other->GetAsInteger(&rhs) && lhs == rhs;
}
- case TYPE_DOUBLE: {
+ case Type::DOUBLE: {
double lhs, rhs;
return GetAsDouble(&lhs) && other->GetAsDouble(&rhs) && lhs == rhs;
}
@@ -252,14 +255,12 @@ bool FundamentalValue::Equals(const Value* other) const {
///////////////////// StringValue ////////////////////
StringValue::StringValue(StringPiece in_value)
- : Value(TYPE_STRING), value_(in_value.as_string()) {
+ : Value(Type::STRING), value_(in_value.as_string()) {
DCHECK(IsStringUTF8(in_value));
}
StringValue::StringValue(const string16& in_value)
- : Value(TYPE_STRING),
- value_(UTF16ToUTF8(in_value)) {
-}
+ : Value(Type::STRING), value_(UTF16ToUTF8(in_value)) {}
StringValue::~StringValue() {
}
@@ -290,6 +291,12 @@ bool StringValue::GetAsString(const StringValue** out_value) const {
return true;
}
+bool StringValue::GetAsString(StringPiece* out_value) const {
+ if (out_value)
+ *out_value = value_;
+ return true;
+}
+
StringValue* StringValue::DeepCopy() const {
return new StringValue(value_);
}
@@ -303,13 +310,10 @@ bool StringValue::Equals(const Value* other) const {
///////////////////// BinaryValue ////////////////////
-BinaryValue::BinaryValue()
- : Value(TYPE_BINARY),
- size_(0) {
-}
+BinaryValue::BinaryValue() : Value(Type::BINARY), size_(0) {}
BinaryValue::BinaryValue(std::unique_ptr<char[]> buffer, size_t size)
- : Value(TYPE_BINARY), buffer_(std::move(buffer)), size_(size) {}
+ : Value(Type::BINARY), buffer_(std::move(buffer)), size_(size) {}
BinaryValue::~BinaryValue() {
}
@@ -355,9 +359,7 @@ std::unique_ptr<DictionaryValue> DictionaryValue::From(
return nullptr;
}
-DictionaryValue::DictionaryValue()
- : Value(TYPE_DICTIONARY) {
-}
+DictionaryValue::DictionaryValue() : Value(Type::DICTIONARY) {}
DictionaryValue::~DictionaryValue() {
Clear();
@@ -561,7 +563,7 @@ bool DictionaryValue::GetBinary(StringPiece path,
const BinaryValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_BINARY))
+ if (!result || !value->IsType(Type::BINARY))
return false;
if (out_value)
@@ -580,7 +582,7 @@ bool DictionaryValue::GetDictionary(StringPiece path,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -600,7 +602,7 @@ bool DictionaryValue::GetList(StringPiece path,
const ListValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -685,7 +687,7 @@ bool DictionaryValue::GetDictionaryWithoutPathExpansion(
const DictionaryValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -709,7 +711,7 @@ bool DictionaryValue::GetListWithoutPathExpansion(
const ListValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -790,7 +792,7 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
const Value* merge_value = &it.value();
// Check whether we have to merge dictionaries.
- if (merge_value->IsType(Value::TYPE_DICTIONARY)) {
+ if (merge_value->IsType(Value::Type::DICTIONARY)) {
DictionaryValue* sub_dict;
if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
sub_dict->MergeDictionary(
@@ -865,8 +867,7 @@ std::unique_ptr<ListValue> ListValue::From(std::unique_ptr<Value> value) {
return nullptr;
}
-ListValue::ListValue() : Value(TYPE_LIST) {
-}
+ListValue::ListValue() : Value(Type::LIST) {}
ListValue::~ListValue() {
Clear();
@@ -956,7 +957,7 @@ bool ListValue::GetString(size_t index, string16* out_value) const {
bool ListValue::GetBinary(size_t index, const BinaryValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_BINARY))
+ if (!result || !value->IsType(Type::BINARY))
return false;
if (out_value)
@@ -975,7 +976,7 @@ bool ListValue::GetDictionary(size_t index,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_DICTIONARY))
+ if (!result || !value->IsType(Type::DICTIONARY))
return false;
if (out_value)
@@ -993,7 +994,7 @@ bool ListValue::GetDictionary(size_t index, DictionaryValue** out_value) {
bool ListValue::GetList(size_t index, const ListValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(TYPE_LIST))
+ if (!result || !value->IsType(Type::LIST))
return false;
if (out_value)
@@ -1173,4 +1174,11 @@ std::ostream& operator<<(std::ostream& out, const Value& value) {
return out << json;
}
+std::ostream& operator<<(std::ostream& out, const Value::Type& type) {
+ if (static_cast<int>(type) < 0 ||
+ static_cast<size_t>(type) >= arraysize(kTypeNames))
+ return out << "Invalid Type (index = " << static_cast<int>(type) << ")";
+ return out << Value::GetTypeName(type);
+}
+
} // namespace base
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 258a9603a61..8becd3046ac 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -49,15 +49,15 @@ class Value;
// See the file-level comment above for more information.
class BASE_EXPORT Value {
public:
- enum Type {
- TYPE_NULL = 0,
- TYPE_BOOLEAN,
- TYPE_INTEGER,
- TYPE_DOUBLE,
- TYPE_STRING,
- TYPE_BINARY,
- TYPE_DICTIONARY,
- TYPE_LIST
+ enum class Type {
+ NONE = 0,
+ BOOLEAN,
+ INTEGER,
+ DOUBLE,
+ STRING,
+ BINARY,
+ DICTIONARY,
+ LIST
// Note: Do not add more types. See the file-level comment above for why.
};
@@ -88,6 +88,7 @@ class BASE_EXPORT Value {
virtual bool GetAsString(std::string* out_value) const;
virtual bool GetAsString(string16* out_value) const;
virtual bool GetAsString(const StringValue** out_value) const;
+ virtual bool GetAsString(StringPiece* out_value) const;
virtual bool GetAsBinary(const BinaryValue** out_value) const;
// ListValue::From is the equivalent for std::unique_ptr conversions.
virtual bool GetAsList(ListValue** out_value);
@@ -134,7 +135,7 @@ class BASE_EXPORT FundamentalValue : public Value {
// Overridden from Value:
bool GetAsBoolean(bool* out_value) const override;
bool GetAsInteger(int* out_value) const override;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
// doubles.
bool GetAsDouble(double* out_value) const override;
FundamentalValue* DeepCopy() const override;
@@ -166,6 +167,7 @@ class BASE_EXPORT StringValue : public Value {
bool GetAsString(std::string* out_value) const override;
bool GetAsString(string16* out_value) const override;
bool GetAsString(const StringValue** out_value) const override;
+ bool GetAsString(StringPiece* out_value) const override;
StringValue* DeepCopy() const override;
bool Equals(const Value* other) const override;
@@ -287,7 +289,7 @@ class BASE_EXPORT DictionaryValue : public Value {
// |out_value| is optional and will only be set if non-NULL.
bool GetBoolean(StringPiece path, bool* out_value) const;
bool GetInteger(StringPiece path, int* out_value) const;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
// doubles.
bool GetDouble(StringPiece path, double* out_value) const;
bool GetString(StringPiece path, std::string* out_value) const;
@@ -327,7 +329,7 @@ class BASE_EXPORT DictionaryValue : public Value {
// |out_value|. If |out_value| is NULL, the removed value will be deleted.
// This method returns true if |path| is a valid path; otherwise it will
// return false and the DictionaryValue object will be unchanged.
- virtual bool Remove(StringPiece path, std::unique_ptr<Value>* out_value);
+ bool Remove(StringPiece path, std::unique_ptr<Value>* out_value);
// Like Remove(), but without special treatment of '.'. This allows e.g. URLs
// to be used as paths.
@@ -336,7 +338,7 @@ class BASE_EXPORT DictionaryValue : public Value {
// Removes a path, clearing out all dictionaries on |path| that remain empty
// after removing the value at |path|.
- virtual bool RemovePath(StringPiece path, std::unique_ptr<Value>* out_value);
+ bool RemovePath(StringPiece path, std::unique_ptr<Value>* out_value);
// Makes a copy of |this| but doesn't include empty dictionaries and lists in
// the copy. This never returns NULL, even if |this| itself is empty.
@@ -427,7 +429,7 @@ class BASE_EXPORT ListValue : public Value {
// |out_value| is optional and will only be set if non-NULL.
bool GetBoolean(size_t index, bool* out_value) const;
bool GetInteger(size_t index, int* out_value) const;
- // Values of both type TYPE_INTEGER and TYPE_DOUBLE can be obtained as
+ // Values of both type Type::INTEGER and Type::DOUBLE can be obtained as
// doubles.
bool GetDouble(size_t index, double* out_value) const;
bool GetString(size_t index, std::string* out_value) const;
@@ -562,6 +564,10 @@ BASE_EXPORT inline std::ostream& operator<<(std::ostream& out,
return out << static_cast<const Value&>(value);
}
+// Stream operator so that enum class Types can be used in log statements.
+BASE_EXPORT std::ostream& operator<<(std::ostream& out,
+ const Value::Type& type);
+
} // namespace base
#endif // BASE_VALUES_H_
diff --git a/chromium/base/values_unittest.cc b/chromium/base/values_unittest.cc
index 61e754e3d23..873b7b7a970 100644
--- a/chromium/base/values_unittest.cc
+++ b/chromium/base/values_unittest.cc
@@ -145,10 +145,10 @@ TEST(ValuesTest, StringValue) {
// Test overloaded StringValue constructor.
std::unique_ptr<Value> narrow_value(new StringValue("narrow"));
ASSERT_TRUE(narrow_value.get());
- ASSERT_TRUE(narrow_value->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(narrow_value->IsType(Value::Type::STRING));
std::unique_ptr<Value> utf16_value(new StringValue(ASCIIToUTF16("utf16")));
ASSERT_TRUE(utf16_value.get());
- ASSERT_TRUE(utf16_value->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(utf16_value->IsType(Value::Type::STRING));
// Test overloaded GetAsString.
std::string narrow = "http://google.com";
@@ -179,7 +179,7 @@ TEST(ValuesTest, StringValue) {
// properly deleted by modifying the value of external flag on destruction.
class DeletionTestValue : public Value {
public:
- explicit DeletionTestValue(bool* deletion_flag) : Value(TYPE_NULL) {
+ explicit DeletionTestValue(bool* deletion_flag) : Value(Type::NONE) {
Init(deletion_flag); // Separate function so that we can use ASSERT_*
}
@@ -343,7 +343,7 @@ TEST(ValuesTest, DictionaryWithoutPathExpansion) {
EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
Value* value4;
ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
- EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+ EXPECT_EQ(Value::Type::NONE, value4->GetType());
}
// Tests the deprecated version of SetWithoutPathExpansion.
@@ -367,7 +367,7 @@ TEST(ValuesTest, DictionaryWithoutPathExpansionDeprecated) {
EXPECT_FALSE(dict.Get("this.isnt.expanded", &value3));
Value* value4;
ASSERT_TRUE(dict.GetWithoutPathExpansion("this.isnt.expanded", &value4));
- EXPECT_EQ(Value::TYPE_NULL, value4->GetType());
+ EXPECT_EQ(Value::Type::NONE, value4->GetType());
}
TEST(ValuesTest, DictionaryRemovePath) {
@@ -378,7 +378,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
std::unique_ptr<Value> removed_item;
EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
+ EXPECT_TRUE(removed_item->IsType(base::Value::Type::INTEGER));
EXPECT_FALSE(dict.HasKey("a.long.way.down"));
EXPECT_FALSE(dict.HasKey("a.long.way"));
EXPECT_TRUE(dict.Get("a.long.key.path", NULL));
@@ -391,7 +391,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
removed_item.reset();
EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
+ EXPECT_TRUE(removed_item->IsType(base::Value::Type::BOOLEAN));
EXPECT_TRUE(dict.empty());
}
@@ -450,13 +450,13 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("null", &copy_null));
ASSERT_TRUE(copy_null);
ASSERT_NE(copy_null, original_null);
- ASSERT_TRUE(copy_null->IsType(Value::TYPE_NULL));
+ ASSERT_TRUE(copy_null->IsType(Value::Type::NONE));
Value* copy_bool = NULL;
ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
ASSERT_TRUE(copy_bool);
ASSERT_NE(copy_bool, original_bool);
- ASSERT_TRUE(copy_bool->IsType(Value::TYPE_BOOLEAN));
+ ASSERT_TRUE(copy_bool->IsType(Value::Type::BOOLEAN));
bool copy_bool_value = false;
ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
ASSERT_TRUE(copy_bool_value);
@@ -465,7 +465,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("int", &copy_int));
ASSERT_TRUE(copy_int);
ASSERT_NE(copy_int, original_int);
- ASSERT_TRUE(copy_int->IsType(Value::TYPE_INTEGER));
+ ASSERT_TRUE(copy_int->IsType(Value::Type::INTEGER));
int copy_int_value = 0;
ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
ASSERT_EQ(42, copy_int_value);
@@ -474,7 +474,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("double", &copy_double));
ASSERT_TRUE(copy_double);
ASSERT_NE(copy_double, original_double);
- ASSERT_TRUE(copy_double->IsType(Value::TYPE_DOUBLE));
+ ASSERT_TRUE(copy_double->IsType(Value::Type::DOUBLE));
double copy_double_value = 0;
ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
ASSERT_EQ(3.14, copy_double_value);
@@ -483,7 +483,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string", &copy_string));
ASSERT_TRUE(copy_string);
ASSERT_NE(copy_string, original_string);
- ASSERT_TRUE(copy_string->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(copy_string->IsType(Value::Type::STRING));
std::string copy_string_value;
string16 copy_string16_value;
ASSERT_TRUE(copy_string->GetAsString(&copy_string_value));
@@ -495,7 +495,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
ASSERT_TRUE(copy_string16);
ASSERT_NE(copy_string16, original_string16);
- ASSERT_TRUE(copy_string16->IsType(Value::TYPE_STRING));
+ ASSERT_TRUE(copy_string16->IsType(Value::Type::STRING));
ASSERT_TRUE(copy_string16->GetAsString(&copy_string_value));
ASSERT_TRUE(copy_string16->GetAsString(&copy_string16_value));
ASSERT_EQ(std::string("hello16"), copy_string_value);
@@ -505,7 +505,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
ASSERT_TRUE(copy_binary);
ASSERT_NE(copy_binary, original_binary);
- ASSERT_TRUE(copy_binary->IsType(Value::TYPE_BINARY));
+ ASSERT_TRUE(copy_binary->IsType(Value::Type::BINARY));
ASSERT_NE(original_binary->GetBuffer(),
static_cast<BinaryValue*>(copy_binary)->GetBuffer());
ASSERT_EQ(original_binary->GetSize(),
@@ -518,7 +518,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("list", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, original_list);
- ASSERT_TRUE(copy_value->IsType(Value::TYPE_LIST));
+ ASSERT_TRUE(copy_value->IsType(Value::Type::LIST));
ListValue* copy_list = NULL;
ASSERT_TRUE(copy_value->GetAsList(&copy_list));
ASSERT_TRUE(copy_list);
@@ -544,7 +544,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, original_nested_dictionary);
- ASSERT_TRUE(copy_value->IsType(Value::TYPE_DICTIONARY));
+ ASSERT_TRUE(copy_value->IsType(Value::Type::DICTIONARY));
DictionaryValue* copy_nested_dictionary = NULL;
ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
ASSERT_TRUE(copy_nested_dictionary);
diff --git a/chromium/base/win/BUILD.gn b/chromium/base/win/BUILD.gn
index ff2a754ae2f..74c1b8a0a9c 100644
--- a/chromium/base/win/BUILD.gn
+++ b/chromium/base/win/BUILD.gn
@@ -3,6 +3,7 @@
# found in the LICENSE file.
import("//build/buildflag_header.gni")
+import("//build/config/sanitizers/sanitizers.gni")
import("//build/win/message_compiler.gni")
declare_args() {
@@ -42,7 +43,9 @@ shared_library("eventlog_provider") {
"eventlog_provider.cc",
]
- ldflags = [ "/NOENTRY" ]
+ if (!is_asan) {
+ ldflags = [ "/NOENTRY" ]
+ }
deps = [
"//base/win:eventlog_messages",
diff --git a/chromium/base/win/object_watcher.cc b/chromium/base/win/object_watcher.cc
index 9a7eea2b054..6abc3995d9c 100644
--- a/chromium/base/win/object_watcher.cc
+++ b/chromium/base/win/object_watcher.cc
@@ -6,7 +6,7 @@
#include "base/bind.h"
#include "base/logging.h"
-#include "base/threading/thread_task_runner_handle.h"
+#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace win {
@@ -32,8 +32,8 @@ bool ObjectWatcher::StopWatching() {
if (!wait_object_)
return false;
- // Make sure ObjectWatcher is used in a single-threaded fashion.
- DCHECK(task_runner_->BelongsToCurrentThread());
+ // Make sure ObjectWatcher is used in a sequenced fashion.
+ DCHECK(task_runner_->RunsTasksOnCurrentThread());
// Blocking call to cancel the wait. Any callbacks already in progress will
// finish before we return from this call.
@@ -70,14 +70,14 @@ bool ObjectWatcher::StartWatchingInternal(HANDLE object, Delegate* delegate,
bool execute_only_once) {
DCHECK(delegate);
DCHECK(!wait_object_) << "Already watching an object";
- DCHECK(ThreadTaskRunnerHandle::IsSet());
+ DCHECK(SequencedTaskRunnerHandle::IsSet());
- task_runner_ = ThreadTaskRunnerHandle::Get();
+ task_runner_ = SequencedTaskRunnerHandle::Get();
run_once_ = execute_only_once;
// Since our job is to just notice when an object is signaled and report the
- // result back to this thread, we can just run on a Windows wait thread.
+ // result back to this sequence, we can just run on a Windows wait thread.
DWORD wait_flags = WT_EXECUTEINWAITTHREAD;
if (run_once_)
wait_flags |= WT_EXECUTEONLYONCE;
diff --git a/chromium/base/win/object_watcher.h b/chromium/base/win/object_watcher.h
index a2821c114f1..67a1e26c76c 100644
--- a/chromium/base/win/object_watcher.h
+++ b/chromium/base/win/object_watcher.h
@@ -12,7 +12,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
-#include "base/single_thread_task_runner.h"
+#include "base/sequenced_task_runner.h"
namespace base {
namespace win {
@@ -20,7 +20,7 @@ namespace win {
// A class that provides a means to asynchronously wait for a Windows object to
// become signaled. It is an abstraction around RegisterWaitForSingleObject
// that provides a notification callback, OnObjectSignaled, that runs back on
-// the origin thread (i.e., the thread that called StartWatching).
+// the origin sequence (i.e., the sequence that called StartWatching).
//
// This class acts like a smart pointer such that when it goes out-of-scope,
// UnregisterWaitEx is automatically called, and any in-flight notification is
@@ -48,14 +48,13 @@ namespace win {
// still called after (but not necessarily immediately after) watch is started.
//
// NOTE: Except for the constructor, all public methods of this class must be
-// called on the same thread. A ThreadTaskRunnerHandle must be set on that
-// thread.
+// called in sequence, in a scope where SequencedTaskRunnerHandle::IsSet().
class BASE_EXPORT ObjectWatcher {
public:
class BASE_EXPORT Delegate {
public:
virtual ~Delegate() {}
- // Called from the thread that started the watch when a signaled object is
+ // Called from the sequence that started the watch when a signaled object is
// detected. To continue watching the object, StartWatching must be called
// again.
virtual void OnObjectSignaled(HANDLE object) = 0;
@@ -64,16 +63,16 @@ class BASE_EXPORT ObjectWatcher {
ObjectWatcher();
~ObjectWatcher();
- // When the object is signaled, the given delegate is notified on the thread
+ // When the object is signaled, the given delegate is notified on the sequence
// where StartWatchingOnce is called. The ObjectWatcher is not responsible for
// deleting the delegate.
// Returns whether watching was successfully initiated.
bool StartWatchingOnce(HANDLE object, Delegate* delegate);
- // Notifies the delegate, on the thread where this method is called, each time
- // the object is set. By definition, the handle must be an auto-reset object.
- // The caller must ensure that it (or any Windows system code) doesn't reset
- // the event or else the delegate won't be called.
+ // Notifies the delegate, on the sequence where this method is called, each
+ // time the object is set. By definition, the handle must be an auto-reset
+ // object. The caller must ensure that it (or any Windows system code) doesn't
+ // reset the event or else the delegate won't be called.
// Returns whether watching was successfully initiated.
bool StartWatchingMultipleTimes(HANDLE object, Delegate* delegate);
@@ -112,8 +111,8 @@ class BASE_EXPORT ObjectWatcher {
// The wait handle returned by RegisterWaitForSingleObject.
HANDLE wait_object_ = nullptr;
- // The task runner of the thread on which the watch was started.
- scoped_refptr<SingleThreadTaskRunner> task_runner_;
+ // The task runner of the sequence on which the watch was started.
+ scoped_refptr<SequencedTaskRunner> task_runner_;
bool run_once_ = true;
diff --git a/chromium/base/win/scoped_bstr.h b/chromium/base/win/scoped_bstr.h
index 413fb28681e..2109c207a14 100644
--- a/chromium/base/win/scoped_bstr.h
+++ b/chromium/base/win/scoped_bstr.h
@@ -18,7 +18,7 @@ namespace base {
namespace win {
// Manages a BSTR string pointer.
-// The class interface is based on scoped_ptr.
+// The class interface is based on unique_ptr.
class BASE_EXPORT ScopedBstr {
public:
ScopedBstr() : bstr_(NULL) {
diff --git a/chromium/base/win/scoped_comptr.h b/chromium/base/win/scoped_comptr.h
index 5ce60e2b688..9442672054a 100644
--- a/chromium/base/win/scoped_comptr.h
+++ b/chromium/base/win/scoped_comptr.h
@@ -51,7 +51,7 @@ class ScopedComPtr : public scoped_refptr<Interface> {
// Explicit Release() of the held object. Useful for reuse of the
// ScopedComPtr instance.
// Note that this function equates to IUnknown::Release and should not
- // be confused with e.g. scoped_ptr::release().
+ // be confused with e.g. unique_ptr::release().
void Release() {
if (this->ptr_ != NULL) {
this->ptr_->Release();