summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn65
-rw-r--r--chromium/base/allocator/OWNERS3
-rw-r--r--chromium/base/allocator/README.md4
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc3
-rw-r--r--chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h2
-rw-r--r--chromium/base/allocator/allocator_shim_unittest.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/OWNERS5
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc2
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc142
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.h9
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc299
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h142
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc177
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/spin_lock.h4
-rw-r--r--chromium/base/android/jni_generator/BUILD.gn2
-rw-r--r--chromium/base/android/linker/BUILD.gn18
-rw-r--r--chromium/base/atomic_sequence_num.h2
-rw-r--r--chromium/base/atomicops_internals_x86_msvc.h26
-rw-r--r--chromium/base/auto_reset.h8
-rw-r--r--chromium/base/base_paths_win.cc1
-rw-r--r--chromium/base/base_switches.cc8
-rw-r--r--chromium/base/base_switches.h4
-rw-r--r--chromium/base/bind.h4
-rw-r--r--chromium/base/bind_internal.h66
-rw-r--r--chromium/base/bind_unittest.cc43
-rw-r--r--chromium/base/bind_unittest.nc11
-rw-r--r--chromium/base/callback_internal.h2
-rw-r--r--chromium/base/callback_list.h2
-rw-r--r--chromium/base/command_line.h4
-rw-r--r--chromium/base/containers/README.md29
-rw-r--r--chromium/base/containers/circular_deque.h12
-rw-r--r--chromium/base/containers/circular_deque_unittest.cc24
-rw-r--r--chromium/base/containers/mru_cache.h6
-rw-r--r--chromium/base/containers/unique_ptr_comparator.h49
-rw-r--r--chromium/base/containers/unique_ptr_comparator_unittest.cc66
-rw-r--r--chromium/base/containers/vector_buffer.h2
-rw-r--r--chromium/base/debug/alias.h14
-rw-r--r--chromium/base/debug/alias_unittest.cc28
-rw-r--r--chromium/base/debug/crash_logging.cc200
-rw-r--r--chromium/base/debug/crash_logging.h133
-rw-r--r--chromium/base/debug/crash_logging_unittest.cc180
-rw-r--r--chromium/base/debug/leak_tracker.h2
-rw-r--r--chromium/base/debug/proc_maps_linux_unittest.cc25
-rw-r--r--chromium/base/debug/stack_trace_fuchsia.cc7
-rw-r--r--chromium/base/debug/stack_trace_posix.cc7
-rw-r--r--chromium/base/files/dir_reader_posix.h4
-rw-r--r--chromium/base/files/file.cc11
-rw-r--r--chromium/base/files/file.h9
-rw-r--r--chromium/base/files/file_path_watcher_win.cc2
-rw-r--r--chromium/base/files/file_posix.cc21
-rw-r--r--chromium/base/files/file_unittest.cc21
-rw-r--r--chromium/base/files/file_util.h8
-rw-r--r--chromium/base/files/file_util_posix.cc209
-rw-r--r--chromium/base/files/file_util_unittest.cc287
-rw-r--r--chromium/base/files/file_util_win.cc230
-rw-r--r--chromium/base/files/file_win.cc25
-rw-r--r--chromium/base/files/important_file_writer.cc28
-rw-r--r--chromium/base/files/important_file_writer.h2
-rw-r--r--chromium/base/files/memory_mapped_file_win.cc2
-rw-r--r--chromium/base/files/platform_file.h2
-rw-r--r--chromium/base/fuchsia/OWNERS1
-rw-r--r--chromium/base/fuchsia/fuchsia_logging.h9
-rw-r--r--chromium/base/fuchsia/scoped_zx_handle.h4
-rw-r--r--chromium/base/guid.cc10
-rw-r--r--chromium/base/i18n/encoding_detection.cc11
-rw-r--r--chromium/base/i18n/icu_string_conversions.h2
-rw-r--r--chromium/base/i18n/message_formatter.h2
-rw-r--r--chromium/base/i18n/time_formatting_unittest.cc28
-rw-r--r--chromium/base/i18n/timezone.cc618
-rw-r--r--chromium/base/i18n/timezone.h9
-rw-r--r--chromium/base/i18n/timezone_unittest.cc12
-rw-r--r--chromium/base/json/json_parser.cc113
-rw-r--r--chromium/base/json/json_parser.h33
-rw-r--r--chromium/base/json/json_parser_unittest.cc45
-rw-r--r--chromium/base/json/json_reader.cc19
-rw-r--r--chromium/base/json/json_reader.h2
-rw-r--r--chromium/base/json/json_reader_unittest.cc58
-rw-r--r--chromium/base/json/json_value_converter.h31
-rw-r--r--chromium/base/json/json_value_converter_unittest.cc2
-rw-r--r--chromium/base/json/json_value_serializer_unittest.cc4
-rw-r--r--chromium/base/json/string_escape.cc12
-rw-r--r--chromium/base/json/string_escape.h10
-rw-r--r--chromium/base/lazy_instance.h82
-rw-r--r--chromium/base/lazy_instance_helpers.cc (renamed from chromium/base/lazy_instance.cc)19
-rw-r--r--chromium/base/lazy_instance_helpers.h77
-rw-r--r--chromium/base/lazy_instance_unittest.cc149
-rw-r--r--chromium/base/logging.cc6
-rw-r--r--chromium/base/logging.h2
-rw-r--r--chromium/base/logging_unittest.cc18
-rw-r--r--chromium/base/mac/scoped_dispatch_object.h2
-rw-r--r--chromium/base/mac/scoped_nsobject.h12
-rw-r--r--chromium/base/mac/scoped_typeref.h2
-rw-r--r--chromium/base/macros.h10
-rw-r--r--chromium/base/memory/OWNERS2
-rw-r--r--chromium/base/memory/discardable_memory_allocator.h2
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc1
-rw-r--r--chromium/base/memory/memory_coordinator_client.h2
-rw-r--r--chromium/base/memory/memory_coordinator_proxy.h2
-rw-r--r--chromium/base/memory/protected_memory.h5
-rw-r--r--chromium/base/memory/ref_counted.h5
-rw-r--r--chromium/base/memory/ref_counted_memory.cc2
-rw-r--r--chromium/base/memory/ref_counted_memory.h12
-rw-r--r--chromium/base/memory/ref_counted_memory_unittest.cc38
-rw-r--r--chromium/base/memory/ref_counted_unittest.cc49
-rw-r--r--chromium/base/memory/scoped_refptr.h72
-rw-r--r--chromium/base/memory/shared_memory.h2
-rw-r--r--chromium/base/memory/shared_memory_fuchsia.cc2
-rw-r--r--chromium/base/memory/shared_memory_handle.h2
-rw-r--r--chromium/base/memory/shared_memory_handle_win.cc2
-rw-r--r--chromium/base/memory/shared_memory_mac.cc2
-rw-r--r--chromium/base/memory/shared_memory_nacl.cc2
-rw-r--r--chromium/base/memory/shared_memory_posix.cc2
-rw-r--r--chromium/base/memory/shared_memory_unittest.cc67
-rw-r--r--chromium/base/memory/shared_memory_win.cc24
-rw-r--r--chromium/base/memory/singleton.cc34
-rw-r--r--chromium/base/memory/singleton.h76
-rw-r--r--chromium/base/memory/singleton_unittest.cc2
-rw-r--r--chromium/base/memory/weak_ptr.h8
-rw-r--r--chromium/base/message_loop/incoming_task_queue.cc13
-rw-r--r--chromium/base/message_loop/incoming_task_queue.h14
-rw-r--r--chromium/base/message_loop/message_loop.cc24
-rw-r--r--chromium/base/message_loop/message_loop.h9
-rw-r--r--chromium/base/message_loop/message_loop_io_posix_unittest.cc1
-rw-r--r--chromium/base/message_loop/message_pump.h2
-rw-r--r--chromium/base/message_loop/message_pump_default.cc2
-rw-r--r--chromium/base/message_loop/message_pump_libevent.h2
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc7
-rw-r--r--chromium/base/message_loop/message_pump_mac.mm7
-rw-r--r--chromium/base/metrics/field_trial.cc24
-rw-r--r--chromium/base/metrics/field_trial.h26
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc71
-rw-r--r--chromium/base/metrics/histogram.cc10
-rw-r--r--chromium/base/metrics/histogram_flattener.h4
-rw-r--r--chromium/base/metrics/histogram_functions.cc9
-rw-r--r--chromium/base/metrics/histogram_functions.h29
-rw-r--r--chromium/base/metrics/histogram_functions_unittest.cc30
-rw-r--r--chromium/base/metrics/histogram_macros.h26
-rw-r--r--chromium/base/metrics/histogram_macros_internal.h12
-rw-r--r--chromium/base/metrics/histogram_samples.cc5
-rw-r--r--chromium/base/metrics/histogram_snapshot_manager.h16
-rw-r--r--chromium/base/metrics/persistent_histogram_allocator.cc3
-rw-r--r--chromium/base/metrics/persistent_memory_allocator.cc11
-rw-r--r--chromium/base/metrics/record_histogram_checker.h2
-rw-r--r--chromium/base/metrics/single_sample_metrics.h8
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc33
-rw-r--r--chromium/base/metrics/statistics_recorder.cc544
-rw-r--r--chromium/base/metrics/statistics_recorder.h310
-rw-r--r--chromium/base/metrics/statistics_recorder_unittest.cc225
-rw-r--r--chromium/base/nix/xdg_util.cc4
-rw-r--r--chromium/base/nix/xdg_util.h1
-rw-r--r--chromium/base/nix/xdg_util_unittest.cc10
-rw-r--r--chromium/base/no_destructor.h72
-rw-r--r--chromium/base/no_destructor_unittest.cc67
-rw-r--r--chromium/base/numerics/checked_math.h2
-rw-r--r--chromium/base/observer_list.h16
-rw-r--r--chromium/base/observer_list_unittest.cc16
-rw-r--r--chromium/base/optional.h525
-rw-r--r--chromium/base/optional_unittest.cc205
-rw-r--r--chromium/base/os_compat_android.cc1
-rw-r--r--chromium/base/os_compat_android.h11
-rw-r--r--chromium/base/pickle.h4
-rw-r--r--chromium/base/pickle_unittest.cc8
-rw-r--r--chromium/base/posix/file_descriptor_shuffle.h2
-rw-r--r--chromium/base/power_monitor/power_observer.h2
-rw-r--r--chromium/base/process/launch.h32
-rw-r--r--chromium/base/process/launch_fuchsia.cc18
-rw-r--r--chromium/base/process/launch_posix.cc31
-rw-r--r--chromium/base/process/memory_linux.cc1
-rw-r--r--chromium/base/process/memory_win.cc3
-rw-r--r--chromium/base/process/process_handle.h2
-rw-r--r--chromium/base/process/process_info_unittest.cc5
-rw-r--r--chromium/base/process/process_info_win.cc30
-rw-r--r--chromium/base/process/process_iterator.h2
-rw-r--r--chromium/base/process/process_metrics.h11
-rw-r--r--chromium/base/process/process_metrics_linux.cc2
-rw-r--r--chromium/base/process/process_metrics_unittest.cc19
-rw-r--r--chromium/base/process/process_metrics_win.cc4
-rw-r--r--chromium/base/process/process_posix.cc8
-rw-r--r--chromium/base/process/process_util_unittest.cc276
-rw-r--r--chromium/base/process/process_win.cc11
-rw-r--r--chromium/base/run_loop.cc53
-rw-r--r--chromium/base/run_loop.h92
-rw-r--r--chromium/base/run_loop_unittest.cc221
-rw-r--r--chromium/base/security_unittest.cc13
-rw-r--r--chromium/base/sequenced_task_runner.h2
-rw-r--r--chromium/base/single_thread_task_runner.h2
-rw-r--r--chromium/base/strings/pattern.cc4
-rw-r--r--chromium/base/strings/pattern.h6
-rw-r--r--chromium/base/strings/strcat.cc81
-rw-r--r--chromium/base/strings/strcat.h93
-rw-r--r--chromium/base/strings/strcat_unittest.cc67
-rw-r--r--chromium/base/strings/string_number_conversions.cc116
-rw-r--r--chromium/base/strings/string_number_conversions.h69
-rw-r--r--chromium/base/strings/string_util.cc36
-rw-r--r--chromium/base/strings/string_util.h29
-rw-r--r--chromium/base/strings/sys_string_conversions.h7
-rw-r--r--chromium/base/strings/sys_string_conversions_mac.mm4
-rw-r--r--chromium/base/strings/sys_string_conversions_posix.cc6
-rw-r--r--chromium/base/strings/sys_string_conversions_win.cc6
-rw-r--r--chromium/base/sync_socket.h2
-rw-r--r--chromium/base/synchronization/condition_variable.h6
-rw-r--r--chromium/base/synchronization/condition_variable_win.cc12
-rw-r--r--chromium/base/synchronization/lock_impl.h6
-rw-r--r--chromium/base/synchronization/lock_impl_posix.cc10
-rw-r--r--chromium/base/synchronization/lock_impl_win.cc7
-rw-r--r--chromium/base/synchronization/waitable_event.h2
-rw-r--r--chromium/base/synchronization/waitable_event_watcher.h7
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_mac.cc9
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_posix.cc14
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_unittest.cc67
-rw-r--r--chromium/base/synchronization/waitable_event_watcher_win.cc8
-rw-r--r--chromium/base/sys_info_internal.h2
-rw-r--r--chromium/base/syslog_logging.cc1
-rw-r--r--chromium/base/system_monitor/system_monitor.h2
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.cc14
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager.h10
-rw-r--r--chromium/base/task_scheduler/delayed_task_manager_unittest.cc137
-rw-r--r--chromium/base/task_scheduler/lazy_task_runner.h2
-rw-r--r--chromium/base/task_scheduler/priority_queue_unittest.cc23
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc47
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h7
-rw-r--r--chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc69
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.cc38
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool.h8
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.cc25
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl.h22
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc43
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc4
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc2
-rw-r--r--chromium/base/task_scheduler/scheduler_worker_unittest.cc37
-rw-r--r--chromium/base/task_scheduler/sequence.cc27
-rw-r--r--chromium/base/task_scheduler/sequence.h14
-rw-r--r--chromium/base/task_scheduler/sequence_unittest.cc261
-rw-r--r--chromium/base/task_scheduler/switches.cc12
-rw-r--r--chromium/base/task_scheduler/switches.h15
-rw-r--r--chromium/base/task_scheduler/task.cc2
-rw-r--r--chromium/base/task_scheduler/task.h10
-rw-r--r--chromium/base/task_scheduler/task_scheduler.h18
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.cc24
-rw-r--r--chromium/base/task_scheduler/task_scheduler_impl.h15
-rw-r--r--chromium/base/task_scheduler/task_tracker.cc101
-rw-r--r--chromium/base/task_scheduler/task_tracker.h22
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.cc4
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix.h6
-rw-r--r--chromium/base/task_scheduler/task_tracker_posix_unittest.cc24
-rw-r--r--chromium/base/task_scheduler/task_tracker_unittest.cc337
-rw-r--r--chromium/base/task_scheduler/test_utils.cc2
-rw-r--r--chromium/base/task_scheduler/test_utils.h4
-rw-r--r--chromium/base/test/BUILD.gn8
-rw-r--r--chromium/base/third_party/dmg_fp/dtoa_wrapper.cc1
-rw-r--r--chromium/base/third_party/icu/README.chromium5
-rw-r--r--chromium/base/threading/platform_thread.h16
-rw-r--r--chromium/base/threading/platform_thread_win.cc2
-rw-r--r--chromium/base/threading/sequenced_worker_pool.cc2
-rw-r--r--chromium/base/threading/sequenced_worker_pool.h4
-rw-r--r--chromium/base/threading/thread_collision_warner.h6
-rw-r--r--chromium/base/threading/thread_local_storage.h2
-rw-r--r--chromium/base/threading/thread_restrictions.h10
-rw-r--r--chromium/base/threading/thread_task_runner_handle.cc7
-rw-r--r--chromium/base/threading/thread_task_runner_handle.h15
-rw-r--r--chromium/base/time/time.cc9
-rw-r--r--chromium/base/time/time.h43
-rw-r--r--chromium/base/time/time_unittest.cc22
-rw-r--r--chromium/base/time/time_win_unittest.cc19
-rw-r--r--chromium/base/tools_sanity_unittest.cc46
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc277
-rw-r--r--chromium/base/trace_event/memory_dump_manager.h31
-rw-r--r--chromium/base/trace_event/memory_dump_manager_test_utils.h7
-rw-r--r--chromium/base/trace_event/memory_dump_manager_unittest.cc68
-rw-r--r--chromium/base/trace_event/memory_dump_provider.h4
-rw-r--r--chromium/base/trace_event/memory_dump_request_args.h9
-rw-r--r--chromium/base/trace_event/memory_infra_background_whitelist.cc12
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc2
-rw-r--r--chromium/base/trace_event/process_memory_dump_unittest.cc1
-rw-r--r--chromium/base/trace_event/trace_buffer.h2
-rw-r--r--chromium/base/trace_event/trace_config.h2
-rw-r--r--chromium/base/trace_event/trace_config_unittest.cc8
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.cc2
-rw-r--r--chromium/base/trace_event/trace_event_impl.h4
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc6
-rw-r--r--chromium/base/trace_event/trace_log.cc45
-rw-r--r--chromium/base/trace_event/trace_log.h1
-rw-r--r--chromium/base/tuple_unittest.cc2
-rw-r--r--chromium/base/unguessable_token.cc4
-rw-r--r--chromium/base/unguessable_token.h15
-rw-r--r--chromium/base/unguessable_token_unittest.cc5
-rw-r--r--chromium/base/values.cc18
-rw-r--r--chromium/base/values.h1
-rw-r--r--chromium/base/values_unittest.cc26
-rw-r--r--chromium/base/version_unittest.cc31
-rw-r--r--chromium/base/win/OWNERS1
-rw-r--r--chromium/base/win/current_module.h2
-rw-r--r--chromium/base/win/object_watcher.cc2
-rw-r--r--chromium/base/win/object_watcher.h2
-rw-r--r--chromium/base/win/registry.h2
-rw-r--r--chromium/base/win/scoped_handle.cc2
-rw-r--r--chromium/base/win/scoped_handle.h2
-rw-r--r--chromium/base/win/shortcut_unittest.cc6
-rw-r--r--chromium/base/win/win_includes_unittest.cc32
-rw-r--r--chromium/base/win/win_util.cc39
-rw-r--r--chromium/base/win/win_util.h31
-rw-r--r--chromium/base/win/windows_full.h13
-rw-r--r--chromium/base/win/windows_types.h250
304 files changed, 6093 insertions, 4959 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 28ebd2cd454..744ec1539fb 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -38,9 +38,6 @@ declare_args() {
# details and the expected format.
override_build_date = "N/A"
- # Partition alloc is included by default except iOS.
- use_partition_alloc = !is_ios
-
# Indicates if the Location object contains the source code information
# (file, function, line). False means only the program counter (and currently
# file name) is saved.
@@ -53,6 +50,11 @@ declare_args() {
# Set to true to disable COM init check hooks.
com_init_check_hook_disabled = false
+
+ # Set to true to enable mutex priority inheritance. See the comments in
+ # LockImpl::PriorityInheritanceAvailable() in lock_impl_posix.cc for the
+ # platform requirements to safely enable priority inheritance.
+ enable_mutex_priority_inheritance = false
}
if (is_android) {
@@ -154,7 +156,7 @@ jumbo_component("base") {
"android/build_info.h",
"android/callback_android.cc",
"android/callback_android.h",
- "android/child_process_service_impl.cc",
+ "android/child_process_service.cc",
"android/command_line_android.cc",
"android/content_uri_utils.cc",
"android/content_uri_utils.h",
@@ -265,6 +267,7 @@ jumbo_component("base") {
"containers/span.h",
"containers/stack.h",
"containers/stack_container.h",
+ "containers/unique_ptr_comparator.h",
"containers/vector_buffer.h",
"cpu.cc",
"cpu.h",
@@ -408,8 +411,9 @@ jumbo_component("base") {
"json/json_writer.h",
"json/string_escape.cc",
"json/string_escape.h",
- "lazy_instance.cc",
"lazy_instance.h",
+ "lazy_instance_helpers.cc",
+ "lazy_instance_helpers.h",
"linux_util.cc",
"linux_util.h",
"location.cc",
@@ -522,7 +526,6 @@ jumbo_component("base") {
"memory/shared_memory_helper.h",
"memory/shared_memory_tracker.cc",
"memory/shared_memory_tracker.h",
- "memory/singleton.cc",
"memory/singleton.h",
"memory/weak_ptr.cc",
"memory/weak_ptr.h",
@@ -605,6 +608,7 @@ jumbo_component("base") {
"nix/mime_util_xdg.h",
"nix/xdg_util.cc",
"nix/xdg_util.h",
+ "no_destructor.h",
"observer_list.h",
"observer_list_threadsafe.cc",
"observer_list_threadsafe.h",
@@ -733,6 +737,8 @@ jumbo_component("base") {
"strings/pattern.h",
"strings/safe_sprintf.cc",
"strings/safe_sprintf.h",
+ "strings/strcat.cc",
+ "strings/strcat.h",
"strings/string16.cc",
"strings/string16.h",
"strings/string_number_conversions.cc",
@@ -871,7 +877,6 @@ jumbo_component("base") {
"third_party/nspr/prtime.cc",
"third_party/nspr/prtime.h",
"third_party/superfasthash/superfasthash.c",
- "third_party/valgrind/memcheck.h",
"threading/platform_thread.h",
"threading/platform_thread_android.cc",
"threading/platform_thread_internal_posix.cc",
@@ -1142,10 +1147,13 @@ jumbo_component("base") {
]
public_deps = [
+ ":anchor_functions_flags",
":base_static",
":build_date",
":cfi_flags",
":debugging_flags",
+ ":protected_memory_flags",
+ ":synchronization_flags",
"//base/numerics:base_numerics",
]
@@ -1747,6 +1755,34 @@ buildflag_header("debugging_flags") {
]
}
+# Build flags for ProtectedMemory, temporary workaround for crbug.com/792777
+# TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
+buildflag_header("protected_memory_flags") {
+ header = "protected_memory_flags.h"
+ header_dir = "base/memory"
+
+ # buildflag entries added to this header must also must be manually added to
+ # tools/gn/bootstrap/bootstrap.py
+ flags = [ "USE_LLD=$use_lld" ]
+}
+
+buildflag_header("synchronization_flags") {
+ header = "synchronization_flags.h"
+ header_dir = "base/synchronization"
+
+ flags =
+ [ "ENABLE_MUTEX_PRIORITY_INHERITANCE=$enable_mutex_priority_inheritance" ]
+}
+
+buildflag_header("anchor_functions_flags") {
+ header = "anchor_functions_flags.h"
+ header_dir = "base/android/library_loader"
+
+ # buildflag entries added to this header must also must be manually added to
+ # tools/gn/bootstrap/bootstrap.py
+ flags = [ "USE_LLD=$use_lld" ]
+}
+
# This is the subset of files from base that should not be used with a dynamic
# library. Note that this library cannot depend on base because base depends on
# base_static.
@@ -1754,8 +1790,6 @@ static_library("base_static") {
sources = [
"base_switches.cc",
"base_switches.h",
- "task_scheduler/switches.cc",
- "task_scheduler/switches.h",
]
if (is_win) {
@@ -2044,10 +2078,12 @@ test("base_unittests") {
"containers/small_map_unittest.cc",
"containers/span_unittest.cc",
"containers/stack_container_unittest.cc",
+ "containers/unique_ptr_comparator_unittest.cc",
"containers/vector_buffer_unittest.cc",
"cpu_unittest.cc",
"debug/activity_analyzer_unittest.cc",
"debug/activity_tracker_unittest.cc",
+ "debug/alias_unittest.cc",
"debug/crash_logging_unittest.cc",
"debug/debugger_unittest.cc",
"debug/leak_tracker_unittest.cc",
@@ -2156,6 +2192,7 @@ test("base_unittests") {
"metrics/sparse_histogram_unittest.cc",
"metrics/statistics_recorder_unittest.cc",
"native_library_unittest.cc",
+ "no_destructor_unittest.cc",
"observer_list_unittest.cc",
"optional_unittest.cc",
"os_compat_android_unittest.cc",
@@ -2190,6 +2227,7 @@ test("base_unittests") {
"strings/nullable_string16_unittest.cc",
"strings/pattern_unittest.cc",
"strings/safe_sprintf_unittest.cc",
+ "strings/strcat_unittest.cc",
"strings/string16_unittest.cc",
"strings/string_number_conversions_unittest.cc",
"strings/string_piece_unittest.cc",
@@ -2320,6 +2358,7 @@ test("base_unittests") {
"win/shortcut_unittest.cc",
"win/startup_information_unittest.cc",
"win/wait_chain_unittest.cc",
+ "win/win_includes_unittest.cc",
"win/win_util_unittest.cc",
"win/windows_version_unittest.cc",
"win/winrt_storage_util_unittest.cc",
@@ -2569,7 +2608,7 @@ if (is_android) {
"android/java/src/org/chromium/base/metrics/RecordHistogram.java",
"android/java/src/org/chromium/base/metrics/RecordUserAction.java",
"android/java/src/org/chromium/base/metrics/StatisticsRecorderAndroid.java",
- "android/java/src/org/chromium/base/process_launcher/ChildProcessServiceImpl.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
]
public_deps = [
@@ -2660,7 +2699,6 @@ if (is_android) {
"android/java/src/org/chromium/base/library_loader/LibraryLoader.java",
"android/java/src/org/chromium/base/library_loader/Linker.java",
"android/java/src/org/chromium/base/library_loader/LoaderErrors.java",
- "android/java/src/org/chromium/base/library_loader/ModernLinker.java",
"android/java/src/org/chromium/base/library_loader/NativeLibraryPreloader.java",
"android/java/src/org/chromium/base/library_loader/ProcessInitException.java",
"android/java/src/org/chromium/base/metrics/CachedMetrics.java",
@@ -2673,7 +2711,6 @@ if (is_android) {
"android/java/src/org/chromium/base/process_launcher/ChildProcessConstants.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessLauncher.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
- "android/java/src/org/chromium/base/process_launcher/ChildProcessServiceImpl.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessServiceDelegate.java",
"android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
]
@@ -2850,6 +2887,7 @@ if (is_android) {
"android/library_loader/library_loader_hooks.h",
"memory/memory_pressure_listener.h",
"metrics/histogram_base.h",
+ "trace_event/trace_config.h",
]
}
@@ -2863,6 +2901,9 @@ if (is_android) {
if (is_java_debug || dcheck_always_on) {
defines += [ "_DCHECK_IS_ON" ]
}
+ if (use_cfi_diag || is_ubsan || is_ubsan_security || is_ubsan_vptr) {
+ defines += [ "_IS_UBSAN" ]
+ }
}
java_cpp_template("base_native_libraries_gen") {
diff --git a/chromium/base/allocator/OWNERS b/chromium/base/allocator/OWNERS
index 6a22df647f0..de658d09c92 100644
--- a/chromium/base/allocator/OWNERS
+++ b/chromium/base/allocator/OWNERS
@@ -1,7 +1,4 @@
primiano@chromium.org
wfh@chromium.org
-# For changes to tcmalloc it is advisable to ask jar@chromium.org
-# before proceeding.
-
# COMPONENT: Internals
diff --git a/chromium/base/allocator/README.md b/chromium/base/allocator/README.md
index a0bc24aaf62..d69c09c870c 100644
--- a/chromium/base/allocator/README.md
+++ b/chromium/base/allocator/README.md
@@ -41,8 +41,8 @@ config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
**Mac/iOS**
`use_allocator: none`, we always use the system's allocator implementation.
-In addition, when building for `asan` / `msan` / `syzyasan` `valgrind`, the
-both the allocator and the shim layer are disabled.
+In addition, when building for `asan` / `msan` / `syzyasan`, both the allocator
+and the shim layer are disabled.
Layering and build deps
-----------------------
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
index e33754a443f..c351a7c9259 100644
--- a/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc
@@ -9,6 +9,9 @@
#if defined(OS_ANDROID) && __ANDROID_API__ < 17
#include <dlfcn.h>
+// This is defined in malloc.h on other platforms. We just need the definition
+// for the decltype(malloc_usable_size)* call to work.
+size_t malloc_usable_size(const void*);
#endif
// This translation unit defines a default dispatch for the allocator shim which
diff --git a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
index 9fb7d067f47..ed02656332d 100644
--- a/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
+++ b/chromium/base/allocator/allocator_shim_override_ucrt_symbols_win.h
@@ -12,6 +12,8 @@
#include <malloc.h>
+#include <windows.h>
+
extern "C" {
void* (*malloc_unchecked)(size_t) = &base::allocator::UncheckedAlloc;
diff --git a/chromium/base/allocator/allocator_shim_unittest.cc b/chromium/base/allocator/allocator_shim_unittest.cc
index 6f5f7f6e4d7..2bafed559a5 100644
--- a/chromium/base/allocator/allocator_shim_unittest.cc
+++ b/chromium/base/allocator/allocator_shim_unittest.cc
@@ -12,6 +12,7 @@
#include <vector>
#include "base/allocator/features.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
#include "base/atomicops.h"
#include "base/process/process_metrics.h"
#include "base/synchronization/waitable_event.h"
@@ -54,7 +55,7 @@ using testing::_;
class AllocatorShimTest : public testing::Test {
public:
- static const size_t kMaxSizeTracked = 8192;
+ static const size_t kMaxSizeTracked = 2 * base::kSystemPageSize;
AllocatorShimTest() : testing::Test() {}
static size_t Hash(const void* ptr) {
diff --git a/chromium/base/allocator/partition_allocator/OWNERS b/chromium/base/allocator/partition_allocator/OWNERS
index 374d1aed926..b0a2a850f7b 100644
--- a/chromium/base/allocator/partition_allocator/OWNERS
+++ b/chromium/base/allocator/partition_allocator/OWNERS
@@ -1,5 +1,8 @@
+ajwong@chromium.org
haraken@chromium.org
palmer@chromium.org
+tsepez@chromium.org
# TEAM: platform-architecture-dev@chromium.org
-# COMPONENT: Blink>MemoryAllocator
+# Also: security-dev@chromium.org
+# COMPONENT: Blink>MemoryAllocator>Partition
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index 114ad9557f0..dc90824b54c 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -11,6 +11,8 @@
#include "build/build_config.h"
#if defined(OS_WIN)
+#include <windows.h> // Must be in front of other Windows header files.
+
#include <VersionHelpers.h>
#endif
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index 87fead7b87e..61cd43b1837 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -46,8 +46,6 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PROT_READ | PROT_WRITE;
case PageReadExecute:
return PROT_READ | PROT_EXEC;
- case PageReadWriteExecute:
- return PROT_READ | PROT_WRITE | PROT_EXEC;
default:
NOTREACHED();
// Fall through.
@@ -74,8 +72,6 @@ int GetAccessFlags(PageAccessibilityConfiguration page_accessibility) {
return PAGE_READWRITE;
case PageReadExecute:
return PAGE_EXECUTE_READ;
- case PageReadWriteExecute:
- return PAGE_EXECUTE_READWRITE;
default:
NOTREACHED();
// Fall through.
@@ -107,22 +103,12 @@ static void* SystemAllocPages(void* hint,
DCHECK(commit || page_accessibility == PageInaccessible);
void* ret;
- // Retry failed allocations once after calling ReleaseReservation().
- bool have_retried = false;
#if defined(OS_WIN)
DWORD access_flag = GetAccessFlags(page_accessibility);
const DWORD type_flags = commit ? (MEM_RESERVE | MEM_COMMIT) : MEM_RESERVE;
- while (true) {
- ret = VirtualAlloc(hint, length, type_flags, access_flag);
- if (ret)
- break;
- if (have_retried) {
- s_allocPageErrorCode = GetLastError();
- break;
- }
- ReleaseReservation();
- have_retried = true;
- }
+ ret = VirtualAlloc(hint, length, type_flags, access_flag);
+ if (ret == nullptr)
+ s_allocPageErrorCode = GetLastError();
#else
#if defined(OS_MACOSX)
@@ -133,22 +119,33 @@ static void* SystemAllocPages(void* hint,
int fd = -1;
#endif
int access_flag = GetAccessFlags(page_accessibility);
- while (true) {
- ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
- if (ret != MAP_FAILED)
- break;
- if (have_retried) {
- s_allocPageErrorCode = errno;
- ret = nullptr;
- break;
- }
- ReleaseReservation();
- have_retried = true;
+ ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, fd, 0);
+ if (ret == MAP_FAILED) {
+ s_allocPageErrorCode = errno;
+ ret = nullptr;
}
#endif
return ret;
}
+static void* AllocPagesIncludingReserved(
+ void* address,
+ size_t length,
+ PageAccessibilityConfiguration page_accessibility,
+ bool commit) {
+ void* ret = SystemAllocPages(address, length, page_accessibility, commit);
+ if (ret == nullptr) {
+ const bool cant_alloc_length = kHintIsAdvisory || address == nullptr;
+ if (cant_alloc_length) {
+ // The system cannot allocate |length| bytes. Release any reserved address
+ // space and try once more.
+ ReleaseReservation();
+ ret = SystemAllocPages(address, length, page_accessibility, commit);
+ }
+ }
+ return ret;
+}
+
// Trims base to given length and alignment. Windows returns null on failure and
// frees base.
static void* TrimMapping(void* base,
@@ -166,7 +163,9 @@ static void* TrimMapping(void* base,
DCHECK(post_slack < base_length);
void* ret = base;
-#if defined(OS_POSIX) // On POSIX we can resize the allocation run.
+#if defined(OS_POSIX)
+ // On POSIX we can resize the allocation run. Release unneeded memory before
+ // and after the aligned range.
(void)page_accessibility;
if (pre_slack) {
int res = munmap(base, pre_slack);
@@ -177,8 +176,10 @@ static void* TrimMapping(void* base,
int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
CHECK(!res);
}
-#else // On Windows we can't resize the allocation run.
+#else
if (pre_slack || post_slack) {
+ // On Windows we can't resize the allocation run. Free it and retry at the
+ // aligned address within the freed range.
ret = reinterpret_cast<char*>(base) + pre_slack;
FreePages(base, base_length);
ret = SystemAllocPages(ret, trim_length, page_accessibility, commit);
@@ -207,34 +208,43 @@ void* AllocPages(void* address,
DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one.
- if (!address) {
+ if (address == nullptr) {
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
align_base_mask);
}
// First try to force an exact-size, aligned allocation from our random base.
- for (int count = 0; count < 3; ++count) {
- void* ret = SystemAllocPages(address, length, page_accessibility, commit);
- if (kHintIsAdvisory || ret) {
+#if defined(ARCH_CPU_32_BITS)
+ // On 32 bit systems, first try one random aligned address, and then try an
+ // aligned address derived from the value of |ret|.
+ constexpr int kExactSizeTries = 2;
+#else
+ // On 64 bit systems, try 3 random aligned addresses.
+ constexpr int kExactSizeTries = 3;
+#endif
+ for (int i = 0; i < kExactSizeTries; ++i) {
+ void* ret = AllocPagesIncludingReserved(address, length, page_accessibility,
+ commit);
+ if (ret != nullptr) {
// If the alignment is to our liking, we're done.
if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask))
return ret;
+ // Free the memory and try again.
FreePages(ret, length);
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<void*>(
- (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask);
-#endif
- } else if (!address) { // We know we're OOM when an unhinted allocation
- // fails.
- return nullptr;
} else {
-#if defined(ARCH_CPU_32_BITS)
- address = reinterpret_cast<char*>(address) + align;
-#endif
+ // |ret| is null; if this try was unhinted, we're OOM.
+ if (kHintIsAdvisory || address == nullptr)
+ return nullptr;
}
-#if !defined(ARCH_CPU_32_BITS)
+#if defined(ARCH_CPU_32_BITS)
+ // For small address spaces, try the first aligned address >= |ret|. Note
+ // |ret| may be null, in which case |address| becomes null.
+ address = reinterpret_cast<void*>(
+ (reinterpret_cast<uintptr_t>(ret) + align_offset_mask) &
+ align_base_mask);
+#else // defined(ARCH_CPU_64_BITS)
// Keep trying random addresses on systems that have a large address space.
address = GetRandomPageBase();
address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) &
@@ -242,20 +252,21 @@ void* AllocPages(void* address,
#endif
}
- // Map a larger allocation so we can force alignment, but continue randomizing
- // only on 64-bit POSIX.
+ // Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
CHECK(try_length >= length);
void* ret;
do {
- // Don't continue to burn cycles on mandatory hints (Windows).
+ // Continue randomizing only on POSIX.
address = kHintIsAdvisory ? GetRandomPageBase() : nullptr;
- ret = SystemAllocPages(address, try_length, page_accessibility, commit);
+ ret = AllocPagesIncludingReserved(address, try_length, page_accessibility,
+ commit);
// The retries are for Windows, where a race can steal our mapping on
// resize.
- } while (ret && (ret = TrimMapping(ret, try_length, length, align,
- page_accessibility, commit)) == nullptr);
+ } while (ret != nullptr &&
+ (ret = TrimMapping(ret, try_length, length, align,
+ page_accessibility, commit)) == nullptr);
return ret;
}
@@ -367,29 +378,24 @@ void DiscardSystemPages(void* address, size_t length) {
}
bool ReserveAddressSpace(size_t size) {
- // Don't take |s_reserveLock| while allocating, since a failure would invoke
- // ReleaseReservation and deadlock.
- void* mem = AllocPages(nullptr, size, kPageAllocationGranularity,
- PageInaccessible, false);
- // We guarantee this alignment when reserving address space.
- DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
- kPageAllocationGranularityOffsetMask));
- if (mem != nullptr) {
- {
- subtle::SpinLock::Guard guard(s_reserveLock.Get());
- if (s_reservation_address == nullptr) {
- s_reservation_address = mem;
- s_reservation_size = size;
- return true;
- }
+ // To avoid deadlock, call only SystemAllocPages.
+ subtle::SpinLock::Guard guard(s_reserveLock.Get());
+ if (s_reservation_address == nullptr) {
+ void* mem = SystemAllocPages(nullptr, size, PageInaccessible, false);
+ if (mem != nullptr) {
+ // We guarantee this alignment when reserving address space.
+ DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
+ s_reservation_address = mem;
+ s_reservation_size = size;
+ return true;
}
- // There was already a reservation.
- FreePages(mem, size);
}
return false;
}
void ReleaseReservation() {
+ // To avoid deadlock, call only FreePages.
subtle::SpinLock::Guard guard(s_reserveLock.Get());
if (s_reservation_address != nullptr) {
FreePages(s_reservation_address, s_reservation_size);
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.h b/chromium/base/allocator/partition_allocator/page_allocator.h
index 297d817cd9f..a39b4e3013e 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator.h
@@ -17,6 +17,8 @@ namespace base {
#if defined(OS_WIN)
static const size_t kPageAllocationGranularityShift = 16; // 64KB
+#elif defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPageAllocationGranularityShift = 14; // 16KB
#else
static const size_t kPageAllocationGranularityShift = 12; // 4KB
#endif
@@ -27,9 +29,11 @@ static const size_t kPageAllocationGranularityOffsetMask =
static const size_t kPageAllocationGranularityBaseMask =
~kPageAllocationGranularityOffsetMask;
-// All Blink-supported systems have 4096 sized system pages and can handle
-// permissions and commit / decommit at this granularity.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kSystemPageSize = 16384;
+#else
static const size_t kSystemPageSize = 4096;
+#endif
static const size_t kSystemPageOffsetMask = kSystemPageSize - 1;
static_assert((kSystemPageSize & (kSystemPageSize - 1)) == 0,
"kSystemPageSize must be power of 2");
@@ -39,7 +43,6 @@ enum PageAccessibilityConfiguration {
PageInaccessible,
PageReadWrite,
PageReadExecute,
- PageReadWriteExecute,
};
// Allocate one or more pages.
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index 8afd6c07e50..0ca9522bcb0 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -56,10 +56,6 @@ PartitionBucket g_sentinel_bucket;
} // namespace
-PartitionPage* GetSentinelPageForTesting() {
- return &g_sentinel_page;
-}
-
PartitionRootBase::PartitionRootBase() = default;
PartitionRootBase::~PartitionRootBase() = default;
PartitionRoot::PartitionRoot() = default;
@@ -78,19 +74,15 @@ PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ =
nullptr;
PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr;
-// Find the best number of System Pages to allocate for |size| to minimize
-// wasted space. Uses a heuristic that looks at number of bytes wasted after
-// the last slot and attempts to account for the PTE usage of each System Page.
-//
// TODO(ajwong): This seems to interact badly with
-// PartitionBucketPartitionPages() which rounds the value from this up to a
+// get_pages_per_slot_span() which rounds the value from this up to a
// multiple of kNumSystemPagesPerPartitionPage (aka 4) anyways.
// http://crbug.com/776537
//
// TODO(ajwong): The waste calculation seems wrong. The PTE usage should cover
// both used and unsed pages.
// http://crbug.com/776537
-static uint8_t PartitionBucketNumSystemPages(size_t size) {
+uint8_t PartitionBucket::get_system_pages_per_slot_span() {
// This works out reasonably for the current bucket sizes of the generic
// allocator, and the current values of partition page size and constants.
// Specifically, we have enough room to always pack the slots perfectly into
@@ -103,23 +95,23 @@ static uint8_t PartitionBucketNumSystemPages(size_t size) {
// to using fewer system pages.
double best_waste_ratio = 1.0f;
uint16_t best_pages = 0;
- if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
+ if (this->slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
- DCHECK(!(size % kSystemPageSize));
- best_pages = static_cast<uint16_t>(size / kSystemPageSize);
+ DCHECK(!(this->slot_size % kSystemPageSize));
+ best_pages = static_cast<uint16_t>(this->slot_size / kSystemPageSize);
// TODO(ajwong): Should this be checking against
// kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
- DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ DCHECK(this->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t page_size = kSystemPageSize * i;
- size_t num_slots = page_size / size;
- size_t waste = page_size - (num_slots * size);
+ size_t num_slots = page_size / this->slot_size;
+ size_t waste = page_size - (num_slots * this->slot_size);
// Leaving a page unfaulted is not free; the page will occupy an empty page
// table entry. Make a simple attempt to account for that.
//
@@ -162,14 +154,13 @@ static void PartitionAllocBaseInit(PartitionRootBase* root) {
root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
}
-static void PartitionBucketInitBase(PartitionBucket* bucket,
- PartitionRootBase* root) {
- bucket->active_pages_head = &g_sentinel_page;
- bucket->empty_pages_head = nullptr;
- bucket->decommitted_pages_head = nullptr;
- bucket->num_full_pages = 0;
- bucket->num_system_pages_per_slot_span =
- PartitionBucketNumSystemPages(bucket->slot_size);
+void PartitionBucket::Init(uint32_t new_slot_size) {
+ slot_size = new_slot_size;
+ active_pages_head = &g_sentinel_page;
+ empty_pages_head = nullptr;
+ decommitted_pages_head = nullptr;
+ num_full_pages = 0;
+ num_system_pages_per_slot_span = get_system_pages_per_slot_span();
}
void PartitionAllocGlobalInit(void (*oom_handling_function)()) {
@@ -186,10 +177,9 @@ void PartitionRoot::Init(size_t num_buckets, size_t max_allocation) {
for (i = 0; i < this->num_buckets; ++i) {
PartitionBucket* bucket = &this->buckets()[i];
if (!i)
- bucket->slot_size = kAllocationGranularity;
+ bucket->Init(kAllocationGranularity);
else
- bucket->slot_size = i << kBucketShift;
- PartitionBucketInitBase(bucket, this);
+ bucket->Init(i << kBucketShift);
}
}
@@ -238,8 +228,7 @@ void PartitionRootGeneric::Init() {
PartitionBucket* bucket = &this->buckets[0];
for (i = 0; i < kGenericNumBucketedOrders; ++i) {
for (j = 0; j < kGenericNumBucketsPerOrder; ++j) {
- bucket->slot_size = current_size;
- PartitionBucketInitBase(bucket, this);
+ bucket->Init(current_size);
// Disable psuedo buckets so that touching them faults.
if (current_size % kGenericSmallestBucket)
bucket->active_pages_head = nullptr;
@@ -260,7 +249,7 @@ void PartitionRootGeneric::Init() {
// Use the bucket of the finest granularity for malloc(0) etc.
*bucketPtr++ = &this->buckets[0];
} else if (order > kGenericMaxBucketedOrder) {
- *bucketPtr++ = &g_sentinel_bucket;
+ *bucketPtr++ = PartitionBucket::get_sentinel_bucket();
} else {
PartitionBucket* validBucket = bucket;
// Skip over invalid buckets.
@@ -276,7 +265,7 @@ void PartitionRootGeneric::Init() {
((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existant order.
- *bucketPtr = &g_sentinel_bucket;
+ *bucketPtr = PartitionBucket::get_sentinel_bucket();
}
#if !defined(ARCH_CPU_64_BITS)
@@ -305,47 +294,41 @@ static NOINLINE void PartitionExcessiveAllocationSize() {
OOM_CRASH();
}
-static NOINLINE void PartitionBucketFull() {
+NOINLINE void PartitionBucket::OnFull() {
OOM_CRASH();
}
-// PartitionPageStateIs*
-// Note that it's only valid to call these functions on pages found on one of
-// the page lists. Specifically, you can't call these functions on full pages
-// that were detached from the active list.
-static bool ALWAYS_INLINE
-PartitionPageStateIsActive(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- return (page->num_allocated_slots > 0 &&
- (page->freelist_head || page->num_unprovisioned_slots));
+ALWAYS_INLINE bool PartitionPage::is_active() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ return (num_allocated_slots > 0 &&
+ (freelist_head || num_unprovisioned_slots));
}
-static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- bool ret = (page->num_allocated_slots == page->bucket->get_slots_per_span());
+ALWAYS_INLINE bool PartitionPage::is_full() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
- DCHECK(!page->freelist_head);
- DCHECK(!page->num_unprovisioned_slots);
+ DCHECK(!freelist_head);
+ DCHECK(!num_unprovisioned_slots);
}
return ret;
}
-static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- return (!page->num_allocated_slots && page->freelist_head);
+ALWAYS_INLINE bool PartitionPage::is_empty() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ return (!num_allocated_slots && freelist_head);
}
-static bool ALWAYS_INLINE
-PartitionPageStateIsDecommitted(const PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
- DCHECK(!page->page_offset);
- bool ret = (!page->num_allocated_slots && !page->freelist_head);
+ALWAYS_INLINE bool PartitionPage::is_decommitted() const {
+ DCHECK(this != &g_sentinel_page);
+ DCHECK(!page_offset);
+ bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(page->empty_cache_index == -1);
+ DCHECK(!num_unprovisioned_slots);
+ DCHECK(empty_cache_index == -1);
}
return ret;
}
@@ -380,7 +363,7 @@ static ALWAYS_INLINE void PartitionRecommitSystemPages(PartitionRootBase* root,
PartitionIncreaseCommittedPages(root, length);
}
-static ALWAYS_INLINE void* PartitionAllocPartitionPages(
+ALWAYS_INLINE void* PartitionBucket::AllocNewSlotSpan(
PartitionRootBase* root,
int flags,
uint16_t num_partition_pages) {
@@ -498,37 +481,28 @@ static ALWAYS_INLINE void* PartitionAllocPartitionPages(
return ret;
}
-// Returns a natural number of PartitionPages (calculated by
-// PartitionBucketNumSystemPages()) to allocate from the current SuperPage
-// when the bucket runs out of slots.
-static ALWAYS_INLINE uint16_t
-PartitionBucketPartitionPages(const PartitionBucket* bucket) {
+ALWAYS_INLINE uint16_t PartitionBucket::get_pages_per_slot_span() {
// Rounds up to nearest multiple of kNumSystemPagesPerPartitionPage.
- return (bucket->num_system_pages_per_slot_span +
+ return (num_system_pages_per_slot_span +
(kNumSystemPagesPerPartitionPage - 1)) /
kNumSystemPagesPerPartitionPage;
}
-static ALWAYS_INLINE void PartitionPageReset(PartitionPage* page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
+ALWAYS_INLINE void PartitionPage::Reset() {
+ DCHECK(this->is_decommitted());
- page->num_unprovisioned_slots = page->bucket->get_slots_per_span();
- DCHECK(page->num_unprovisioned_slots);
+ num_unprovisioned_slots = bucket->get_slots_per_span();
+ DCHECK(num_unprovisioned_slots);
- page->next_page = nullptr;
+ next_page = nullptr;
}
-// Each bucket allocates a slot span when it runs out of slots.
-// A slot span's size is equal to PartitionBucketPartitionPages(bucket)
-// number of PartitionPages. This function initializes all pages within the
-// span.
-static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
- PartitionBucket* bucket) {
+ALWAYS_INLINE void PartitionBucket::InitializeSlotSpan(PartitionPage* page) {
// The bucket never changes. We set it up once.
- page->bucket = bucket;
+ page->bucket = this;
page->empty_cache_index = -1;
- PartitionPageReset(page);
+ page->Reset();
// If this page has just a single slot, do not set up page offsets for any
// page metadata other than the first one. This ensures that attempts to
@@ -536,7 +510,7 @@ static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
if (page->num_unprovisioned_slots == 1)
return;
- uint16_t num_partition_pages = PartitionBucketPartitionPages(bucket);
+ uint16_t num_partition_pages = get_pages_per_slot_span();
char* page_char_ptr = reinterpret_cast<char*>(page);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
page_char_ptr += kPageMetadataSize;
@@ -546,21 +520,19 @@ static ALWAYS_INLINE void PartitionPageSetup(PartitionPage* page,
}
}
-static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
- PartitionPage* page) {
- DCHECK(page != &g_sentinel_page);
+ALWAYS_INLINE char* PartitionBucket::AllocAndFillFreelist(PartitionPage* page) {
+ DCHECK(page != PartitionPage::get_sentinel_page());
uint16_t num_slots = page->num_unprovisioned_slots;
DCHECK(num_slots);
- PartitionBucket* bucket = page->bucket;
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == bucket->get_slots_per_span());
+ DCHECK(num_slots + page->num_allocated_slots == this->get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
DCHECK(!page->freelist_head);
DCHECK(page->num_allocated_slots >= 0);
- size_t size = bucket->slot_size;
+ size_t size = this->slot_size;
char* base = reinterpret_cast<char*>(PartitionPage::ToPointer(page));
char* return_object = base + (size * page->num_allocated_slots);
char* firstFreelistPointer = return_object + size;
@@ -605,80 +577,72 @@ static ALWAYS_INLINE char* PartitionPageAllocAndFillFreelist(
freelist_pointer += size;
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer);
- entry->next = PartitionFreelistMask(next_entry);
+ entry->next = PartitionFreelistEntry::Transform(next_entry);
entry = next_entry;
}
- entry->next = PartitionFreelistMask(nullptr);
+ entry->next = PartitionFreelistEntry::Transform(nullptr);
} else {
page->freelist_head = nullptr;
}
return return_object;
}
-// This helper function scans a bucket's active page list for a suitable new
-// active page.
-// When it finds a suitable new active page (one that has free slots and is not
-// empty), it is set as the new active page. If there is no suitable new
-// active page, the current active page is set to &g_sentinel_page.
-// As potential pages are scanned, they are tidied up according to their state.
-// Empty pages are swept on to the empty page list, decommitted pages on to the
-// decommitted page list and full pages are unlinked from any list.
-static bool PartitionSetNewActivePage(PartitionBucket* bucket) {
- PartitionPage* page = bucket->active_pages_head;
- if (page == &g_sentinel_page)
+bool PartitionBucket::SetNewActivePage() {
+ PartitionPage* page = this->active_pages_head;
+ if (page == PartitionPage::get_sentinel_page())
return false;
PartitionPage* next_page;
for (; page; page = next_page) {
next_page = page->next_page;
- DCHECK(page->bucket == bucket);
- DCHECK(page != bucket->empty_pages_head);
- DCHECK(page != bucket->decommitted_pages_head);
+ DCHECK(page->bucket == this);
+ DCHECK(page != this->empty_pages_head);
+ DCHECK(page != this->decommitted_pages_head);
- // Deal with empty and decommitted pages.
- if (LIKELY(PartitionPageStateIsActive(page))) {
+ if (LIKELY(page->is_active())) {
// This page is usable because it has freelist entries, or has
// unprovisioned slots we can create freelist entries from.
- bucket->active_pages_head = page;
+ this->active_pages_head = page;
return true;
}
- if (LIKELY(PartitionPageStateIsEmpty(page))) {
- page->next_page = bucket->empty_pages_head;
- bucket->empty_pages_head = page;
- } else if (LIKELY(PartitionPageStateIsDecommitted(page))) {
- page->next_page = bucket->decommitted_pages_head;
- bucket->decommitted_pages_head = page;
+
+ // Deal with empty and decommitted pages.
+ if (LIKELY(page->is_empty())) {
+ page->next_page = this->empty_pages_head;
+ this->empty_pages_head = page;
+ } else if (LIKELY(page->is_decommitted())) {
+ page->next_page = this->decommitted_pages_head;
+ this->decommitted_pages_head = page;
} else {
- DCHECK(PartitionPageStateIsFull(page));
+ DCHECK(page->is_full());
// If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list.
page->num_allocated_slots = -page->num_allocated_slots;
- ++bucket->num_full_pages;
+ ++this->num_full_pages;
// num_full_pages is a uint16_t for efficient packing so guard against
// overflow to be safe.
- if (UNLIKELY(!bucket->num_full_pages))
- PartitionBucketFull();
+ if (UNLIKELY(!this->num_full_pages))
+ OnFull();
// Not necessary but might help stop accidents.
page->next_page = nullptr;
}
}
- bucket->active_pages_head = &g_sentinel_page;
+ this->active_pages_head = PartitionPage::get_sentinel_page();
return false;
}
-static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent(
+ALWAYS_INLINE PartitionDirectMapExtent* PartitionDirectMapExtent::FromPage(
PartitionPage* page) {
DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
-static ALWAYS_INLINE void PartitionPageSetRawSize(PartitionPage* page,
- size_t size) {
- size_t* raw_size_ptr = page->get_raw_size_ptr();
+ALWAYS_INLINE void PartitionPage::set_raw_size(size_t size) {
+ size_t* raw_size_ptr = get_raw_size_ptr();
if (UNLIKELY(raw_size_ptr != nullptr))
*raw_size_ptr = size;
}
@@ -744,7 +708,7 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
- next_entry->next = PartitionFreelistMask(nullptr);
+ next_entry->next = PartitionFreelistEntry::Transform(nullptr);
DCHECK(!bucket->active_pages_head);
DCHECK(!bucket->empty_pages_head);
@@ -753,7 +717,8 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
DCHECK(!bucket->num_full_pages);
bucket->slot_size = size;
- PartitionDirectMapExtent* map_extent = partitionPageToDirectMapExtent(page);
+ PartitionDirectMapExtent* map_extent =
+ PartitionDirectMapExtent::FromPage(page);
map_extent->map_size = map_size - kPartitionPageSize - kSystemPageSize;
map_extent->bucket = bucket;
@@ -768,8 +733,9 @@ static ALWAYS_INLINE PartitionPage* PartitionDirectMap(PartitionRootBase* root,
}
static ALWAYS_INLINE void PartitionDirectUnmap(PartitionPage* page) {
- PartitionRootBase* root = PartitionPageToRoot(page);
- const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page);
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
+ const PartitionDirectMapExtent* extent =
+ PartitionDirectMapExtent::FromPage(page);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings.
@@ -817,13 +783,13 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// branches.
//
// Note: The ordering of the conditionals matter! In particular,
- // PartitionSetNewActivePage() has a side-effect even when returning
+ // SetNewActivePage() has a side-effect even when returning
// false where it sweeps the active page list and may move things into
// the empty or decommitted lists which affects the subsequent conditional.
bool returnNull = flags & PartitionAllocReturnNull;
if (UNLIKELY(this->is_direct_mapped())) {
DCHECK(size > kGenericMaxBucketed);
- DCHECK(this == &g_sentinel_bucket);
+ DCHECK(this == get_sentinel_bucket());
DCHECK(this->active_pages_head == &g_sentinel_page);
if (size > kGenericMaxDirectMapped) {
if (returnNull)
@@ -831,10 +797,10 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
PartitionExcessiveAllocationSize();
}
new_page = PartitionDirectMap(root, flags, size);
- } else if (LIKELY(PartitionSetNewActivePage(this))) {
+ } else if (LIKELY(this->SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = this->active_pages_head;
- DCHECK(PartitionPageStateIsActive(new_page));
+ DCHECK(new_page->is_active());
} else if (LIKELY(this->empty_pages_head != nullptr) ||
LIKELY(this->decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
@@ -842,15 +808,14 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// empty page might have been decommitted.
while (LIKELY((new_page = this->empty_pages_head) != nullptr)) {
DCHECK(new_page->bucket == this);
- DCHECK(PartitionPageStateIsEmpty(new_page) ||
- PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_empty() || new_page->is_decommitted());
this->empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
if (new_page->freelist_head) {
new_page->next_page = nullptr;
break;
}
- DCHECK(PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_decommitted());
new_page->next_page = this->decommitted_pages_head;
this->decommitted_pages_head = new_page;
}
@@ -858,22 +823,21 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
LIKELY(this->decommitted_pages_head != nullptr)) {
new_page = this->decommitted_pages_head;
DCHECK(new_page->bucket == this);
- DCHECK(PartitionPageStateIsDecommitted(new_page));
+ DCHECK(new_page->is_decommitted());
this->decommitted_pages_head = new_page->next_page;
void* addr = PartitionPage::ToPointer(new_page);
PartitionRecommitSystemPages(root, addr,
new_page->bucket->get_bytes_per_span());
- PartitionPageReset(new_page);
+ new_page->Reset();
}
DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
- uint16_t num_partition_pages = PartitionBucketPartitionPages(this);
- void* rawPages =
- PartitionAllocPartitionPages(root, flags, num_partition_pages);
+ uint16_t num_partition_pages = this->get_pages_per_slot_span();
+ void* rawPages = AllocNewSlotSpan(root, flags, num_partition_pages);
if (LIKELY(rawPages != nullptr)) {
new_page = PartitionPage::FromPointerNoAlignmentCheck(rawPages);
- PartitionPageSetup(new_page, this);
+ InitializeSlotSpan(new_page);
}
}
@@ -889,27 +853,32 @@ void* PartitionBucket::SlowPathAlloc(PartitionRootBase* root,
// It seems like in many of the conditional branches above, |this| ==
// |new_page->bucket|. Maybe pull this into another function?
PartitionBucket* bucket = new_page->bucket;
- DCHECK(bucket != &g_sentinel_bucket);
+ DCHECK(bucket != get_sentinel_bucket());
bucket->active_pages_head = new_page;
- PartitionPageSetRawSize(new_page, size);
+ new_page->set_raw_size(size);
// If we found an active page with free slots, or an empty page, we have a
// usable freelist head.
if (LIKELY(new_page->freelist_head != nullptr)) {
PartitionFreelistEntry* entry = new_page->freelist_head;
- PartitionFreelistEntry* new_head = PartitionFreelistMask(entry->next);
+ PartitionFreelistEntry* new_head =
+ PartitionFreelistEntry::Transform(entry->next);
new_page->freelist_head = new_head;
new_page->num_allocated_slots++;
return entry;
}
// Otherwise, we need to build the freelist.
DCHECK(new_page->num_unprovisioned_slots);
- return PartitionPageAllocAndFillFreelist(new_page);
+ return AllocAndFillFreelist(new_page);
+}
+
+PartitionBucket* PartitionBucket::get_sentinel_bucket() {
+ return &g_sentinel_bucket;
}
static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
+ DCHECK(page->is_empty());
DCHECK(!page->bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(page);
PartitionDecommitSystemPages(root, addr, page->bucket->get_bytes_per_span());
@@ -922,7 +891,7 @@ static ALWAYS_INLINE void PartitionDecommitPage(PartitionRootBase* root,
// 32 bytes in size.
page->freelist_head = nullptr;
page->num_unprovisioned_slots = 0;
- DCHECK(PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_decommitted());
}
static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
@@ -931,13 +900,13 @@ static void PartitionDecommitPageIfPossible(PartitionRootBase* root,
DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]);
page->empty_cache_index = -1;
- if (PartitionPageStateIsEmpty(page))
+ if (page->is_empty())
PartitionDecommitPage(root, page);
}
static ALWAYS_INLINE void PartitionRegisterEmptyPage(PartitionPage* page) {
- DCHECK(PartitionPageStateIsEmpty(page));
- PartitionRootBase* root = PartitionPageToRoot(page);
+ DCHECK(page->is_empty());
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
// If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) {
@@ -975,6 +944,10 @@ static void PartitionDecommitEmptyPages(PartitionRootBase* root) {
}
}
+PartitionPage* PartitionPage::get_sentinel_page() {
+ return &g_sentinel_page;
+}
+
void PartitionPage::FreeSlowPath() {
DCHECK(this != &g_sentinel_page);
if (LIKELY(this->num_allocated_slots == 0)) {
@@ -986,10 +959,10 @@ void PartitionPage::FreeSlowPath() {
// If it's the current active page, change it. We bounce the page to
// the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head))
- PartitionSetNewActivePage(bucket);
+ bucket->SetNewActivePage();
DCHECK(bucket->active_pages_head != this);
- PartitionPageSetRawSize(this, 0);
+ set_raw_size(0);
DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this);
@@ -1040,7 +1013,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
char* char_ptr = static_cast<char*>(PartitionPage::ToPointer(page));
if (new_size < current_size) {
- size_t map_size = partitionPageToDirectMapExtent(page)->map_size;
+ size_t map_size = PartitionDirectMapExtent::FromPage(page)->map_size;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
@@ -1052,7 +1025,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionDecommitSystemPages(root, char_ptr + new_size, decommitSize);
CHECK(SetSystemPagesAccess(char_ptr + new_size, decommitSize,
PageInaccessible));
- } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) {
+ } else if (new_size <= PartitionDirectMapExtent::FromPage(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_size = new_size - current_size;
@@ -1074,7 +1047,7 @@ bool PartitionReallocDirectMappedInPlace(PartitionRootGeneric* root,
PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize);
#endif
- PartitionPageSetRawSize(page, raw_size);
+ page->set_raw_size(raw_size);
DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
@@ -1122,7 +1095,7 @@ void* PartitionRootGeneric::Realloc(void* ptr,
// Trying to allocate a block of size new_size would give us a block of
// the same size as the one we've already got, so re-use the allocation
// after updating statistics (and cookies, if present).
- PartitionPageSetRawSize(page, PartitionCookieSizeAdjustAdd(new_size));
+ page->set_raw_size(PartitionCookieSizeAdjustAdd(new_size));
#if DCHECK_IS_ON()
// Write a new trailing cookie when it is possible to keep track of
// |new_size| via the raw size pointer.
@@ -1184,14 +1157,14 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
DCHECK(slotIndex < num_slots);
slot_usage[slotIndex] = 0;
- entry = PartitionFreelistMask(entry->next);
+ entry = PartitionFreelistEntry::Transform(entry->next);
#if !defined(OS_WIN)
// If we have a slot where the masked freelist entry is 0, we can
// actually discard that freelist entry because touching a discarded
// page is guaranteed to return original content or 0.
// (Note that this optimization won't fire on big endian machines
// because the masking function is negation.)
- if (!PartitionFreelistMask(entry))
+ if (!PartitionFreelistEntry::Transform(entry))
last_slot = slotIndex;
#endif
}
@@ -1232,7 +1205,7 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
continue;
auto* entry = reinterpret_cast<PartitionFreelistEntry*>(
ptr + (slot_size * slotIndex));
- *entry_ptr = PartitionFreelistMask(entry);
+ *entry_ptr = PartitionFreelistEntry::Transform(entry);
entry_ptr = reinterpret_cast<PartitionFreelistEntry**>(entry);
num_new_entries++;
#if !defined(OS_WIN)
@@ -1242,7 +1215,8 @@ static size_t PartitionPurgePage(PartitionPage* page, bool discard) {
// Terminate the freelist chain.
*entry_ptr = nullptr;
// The freelist head is stored unmasked.
- page->freelist_head = PartitionFreelistMask(page->freelist_head);
+ page->freelist_head =
+ PartitionFreelistEntry::Transform(page->freelist_head);
DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
@@ -1317,7 +1291,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
PartitionPage* page) {
uint16_t bucket_num_slots = page->bucket->get_slots_per_span();
- if (PartitionPageStateIsDecommitted(page)) {
+ if (page->is_decommitted()) {
++stats_out->num_decommitted_pages;
return;
}
@@ -1336,13 +1310,13 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) *
stats_out->bucket_slot_size);
stats_out->resident_bytes += page_bytes_resident;
- if (PartitionPageStateIsEmpty(page)) {
+ if (page->is_empty()) {
stats_out->decommittable_bytes += page_bytes_resident;
++stats_out->num_empty_pages;
- } else if (PartitionPageStateIsFull(page)) {
+ } else if (page->is_full()) {
++stats_out->num_full_pages;
} else {
- DCHECK(PartitionPageStateIsActive(page));
+ DCHECK(page->is_active());
++stats_out->num_active_pages;
}
}
@@ -1373,13 +1347,12 @@ static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out,
for (PartitionPage* page = bucket->empty_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsEmpty(page) ||
- PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
for (PartitionPage* page = bucket->decommitted_pages_head; page;
page = page->next_page) {
- DCHECK(PartitionPageStateIsDecommitted(page));
+ DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index 68201dffb17..6e5143b094a 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -97,7 +97,11 @@ static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
// system page of the span. For our current max slot span size of 64k and other
// constant values, we pack _all_ PartitionRootGeneric::Alloc() sizes perfectly
// up against the end of a system page.
+#if defined(_MIPS_ARCH_LOONGSON)
+static const size_t kPartitionPageShift = 16; // 64KB
+#else
static const size_t kPartitionPageShift = 14; // 16KB
+#endif
static const size_t kPartitionPageSize = 1 << kPartitionPageShift;
static const size_t kPartitionPageOffsetMask = kPartitionPageSize - 1;
static const size_t kPartitionPageBaseMask = ~kPartitionPageOffsetMask;
@@ -230,8 +234,31 @@ class PartitionStatsDumper;
struct PartitionBucket;
struct PartitionRootBase;
+// TODO(ajwong): Introduce an EncodedFreelistEntry type and then replace
+// Transform() with Encode()/Decode() such that the API provides some static
+// type safety.
+//
+// https://crbug.com/787153
struct PartitionFreelistEntry {
PartitionFreelistEntry* next;
+
+ static ALWAYS_INLINE PartitionFreelistEntry* Transform(
+ PartitionFreelistEntry* ptr) {
+// We use bswap on little endian as a fast mask for two reasons:
+// 1) If an object is freed and its vtable used where the attacker doesn't
+// get the chance to run allocations between the free and use, the vtable
+// dereference is likely to fault.
+// 2) If the attacker has a linear buffer overflow and elects to try and
+// corrupt a freelist pointer, partial pointer overwrite attacks are
+// thwarted.
+// For big endian, similar guarantees are arrived at with a negation.
+#if defined(ARCH_CPU_BIG_ENDIAN)
+ uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
+#else
+ uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
+#endif
+ return reinterpret_cast<PartitionFreelistEntry*>(masked);
+ }
};
// Some notes on page states. A page can be in one of four major states:
@@ -295,6 +322,26 @@ struct PartitionPage {
}
ALWAYS_INLINE size_t get_raw_size() const;
+ ALWAYS_INLINE void set_raw_size(size_t size);
+
+ ALWAYS_INLINE void Reset();
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ BASE_EXPORT static PartitionPage* get_sentinel_page();
+
+ // Page State accessors.
+ // Note that it's only valid to call these functions on pages found on one of
+ // the page lists. Specifically, you can't call these functions on full pages
+ // that were detached from the active list.
+ //
+ // This restriction provides the flexibity for some of the status fields to
+ // be repurposed when a page is taken off a list. See the negation of
+ // |num_allocated_slots| when a full page is removed from the active list
+ // for an example of such repurposing.
+ ALWAYS_INLINE bool is_active() const;
+ ALWAYS_INLINE bool is_full() const;
+ ALWAYS_INLINE bool is_empty() const;
+ ALWAYS_INLINE bool is_decommitted() const;
};
static_assert(sizeof(PartitionPage) <= kPageMetadataSize,
"PartitionPage must be able to fit in a metadata slot");
@@ -310,6 +357,7 @@ struct PartitionBucket {
unsigned num_full_pages : 24;
// Public API.
+ void Init(uint32_t new_slot_size);
// Note the matching Free() functions are in PartitionPage.
BASE_EXPORT void* Alloc(PartitionRootBase* root, int flags, size_t size);
@@ -328,6 +376,60 @@ struct PartitionBucket {
// TODO(ajwong): Chagne to CheckedMul. https://crbug.com/787153
return static_cast<uint16_t>(get_bytes_per_span() / slot_size);
}
+
+ // TODO(ajwong): Can this be made private? https://crbug.com/787153
+ static PartitionBucket* get_sentinel_bucket();
+
+ // This helper function scans a bucket's active page list for a suitable new
+ // active page. When it finds a suitable new active page (one that has
+ // free slots and is not empty), it is set as the new active page. If there
+ // is no suitable new active page, the current active page is set to
+ // PartitionPage::get_sentinel_page(). As potential pages are scanned, they
+ // are tidied up according to their state. Empty pages are swept on to the
+ // empty page list, decommitted pages on to the decommitted page list and full
+ // pages are unlinked from any list.
+ //
+ // This is where the guts of the bucket maintenance is done!
+ bool SetNewActivePage();
+
+ private:
+ static void OutOfMemory(const PartitionRootBase* root);
+ static void OutOfMemoryWithLotsOfUncommitedPages();
+
+ static NOINLINE void OnFull();
+
+ // Returns a natural number of PartitionPages (calculated by
+ // get_system_pages_per_slot_span()) to allocate from the current
+ // SuperPage when the bucket runs out of slots.
+ ALWAYS_INLINE uint16_t get_pages_per_slot_span();
+
+ // Returns the number of system pages in a slot span.
+ //
+ // The calculation attemps to find the best number of System Pages to
+ // allocate for the given slot_size to minimize wasted space. It uses a
+ // heuristic that looks at number of bytes wasted after the last slot and
+ // attempts to account for the PTE usage of each System Page.
+ uint8_t get_system_pages_per_slot_span();
+
+ // Allocates a new slot span with size |num_partition_pages| from the
+ // current extent. Metadata within this slot span will be uninitialized.
+ // Returns nullptr on error.
+ ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase* root,
+ int flags,
+ uint16_t num_partition_pages);
+
+ // Each bucket allocates a slot span when it runs out of slots.
+ // A slot span's size is equal to get_pages_per_slot_span() number of
+ // PartitionPages. This function initializes all PartitionPage within the
+ // span to point to the first PartitionPage which holds all the metadata
+ // for the span and registers this bucket as the owner of the span. It does
+ // NOT put the slots into the bucket's freelist.
+ ALWAYS_INLINE void InitializeSlotSpan(PartitionPage* page);
+
+ // Allocates one slot from the given |page| and then adds the remainder to
+ // the current bucket. If the |page| was freshly allocated, it must have been
+ // passed through InitializeSlotSpan() first.
+ ALWAYS_INLINE char* AllocAndFillFreelist(PartitionPage* page);
};
// An "extent" is a span of consecutive superpages. We link to the partition's
@@ -348,6 +450,8 @@ struct PartitionDirectMapExtent {
PartitionDirectMapExtent* prev_extent;
PartitionBucket* bucket;
size_t map_size; // Mapped size, not including guard pages and meta-data.
+
+ ALWAYS_INLINE static PartitionDirectMapExtent* FromPage(PartitionPage* page);
};
struct BASE_EXPORT PartitionRootBase {
@@ -374,8 +478,10 @@ struct BASE_EXPORT PartitionRootBase {
// Pubic API
- // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
+ // gOomHandlingFunction is invoked when PartitionAlloc hits OutOfMemory.
static void (*gOomHandlingFunction)();
+
+ ALWAYS_INLINE static PartitionRootBase* FromPage(PartitionPage* page);
};
enum PartitionPurgeFlags {
@@ -555,24 +661,6 @@ class BASE_EXPORT PartitionAllocHooks {
static FreeHook* free_hook_;
};
-ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask(
- PartitionFreelistEntry* ptr) {
-// We use bswap on little endian as a fast mask for two reasons:
-// 1) If an object is freed and its vtable used where the attacker doesn't
-// get the chance to run allocations between the free and use, the vtable
-// dereference is likely to fault.
-// 2) If the attacker has a linear buffer overflow and elects to try and
-// corrupt a freelist pointer, partial pointer overwrite attacks are
-// thwarted.
-// For big endian, similar guarantees are arrived at with a negation.
-#if defined(ARCH_CPU_BIG_ENDIAN)
- uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
-#else
- uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
-#endif
- return reinterpret_cast<PartitionFreelistEntry*>(masked);
-}
-
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer):
@@ -702,7 +790,8 @@ ALWAYS_INLINE size_t PartitionPage::get_raw_size() const {
return 0;
}
-ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
+ALWAYS_INLINE PartitionRootBase* PartitionRootBase::FromPage(
+ PartitionPage* page) {
PartitionSuperPageExtentEntry* extent_entry =
reinterpret_cast<PartitionSuperPageExtentEntry*>(
reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
@@ -710,7 +799,7 @@ ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) {
}
ALWAYS_INLINE bool PartitionPage::IsPointerValid(PartitionPage* page) {
- PartitionRootBase* root = PartitionPageToRoot(page);
+ PartitionRootBase* root = PartitionRootBase::FromPage(page);
return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
}
@@ -728,8 +817,8 @@ ALWAYS_INLINE void* PartitionBucket::Alloc(PartitionRootBase* root,
// All large allocations must go through the slow path to correctly
// update the size metadata.
DCHECK(page->get_raw_size() == 0);
- PartitionFreelistEntry* new_head =
- PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
+ PartitionFreelistEntry* new_head = PartitionFreelistEntry::Transform(
+ static_cast<PartitionFreelistEntry*>(ret)->next);
page->freelist_head = new_head;
page->num_allocated_slots++;
} else {
@@ -802,9 +891,10 @@ ALWAYS_INLINE void PartitionPage::Free(void* ptr) {
PartitionPage::FromPointer(freelist_head)));
CHECK(ptr != freelist_head); // Catches an immediate double free.
// Look for double free one level deeper in debug.
- DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next));
+ DCHECK(!freelist_head ||
+ ptr != PartitionFreelistEntry::Transform(freelist_head->next));
PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
- entry->next = PartitionFreelistMask(freelist_head);
+ entry->next = PartitionFreelistEntry::Transform(freelist_head);
freelist_head = entry;
--this->num_allocated_slots;
if (UNLIKELY(this->num_allocated_slots <= 0)) {
@@ -973,8 +1063,6 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionRootGeneric partition_root_;
};
-BASE_EXPORT PartitionPage* GetSentinelPageForTesting();
-
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index 6ea89b7c875..b84db9b7308 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -33,7 +33,7 @@ std::unique_ptr<T[]> WrapArrayUnique(T* ptr) {
return std::unique_ptr<T[]>(ptr);
}
-const size_t kTestMaxAllocation = 4096;
+constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
bool IsLargeMemoryDevice() {
// Treat any device with 2GiB or more of physical memory as a "large memory
@@ -139,7 +139,8 @@ class PartitionAllocTest : public testing::Test {
bucket->active_pages_head->num_allocated_slots));
EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
EXPECT_TRUE(bucket->active_pages_head);
- EXPECT_TRUE(bucket->active_pages_head != GetSentinelPageForTesting());
+ EXPECT_TRUE(bucket->active_pages_head !=
+ PartitionPage::get_sentinel_page());
return bucket->active_pages_head;
}
@@ -380,7 +381,7 @@ TEST(PageAllocatorTest, MAYBE_ReserveAddressSpace) {
// Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest, Basic) {
PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex];
- PartitionPage* seedPage = GetSentinelPageForTesting();
+ PartitionPage* seedPage = PartitionPage::get_sentinel_page();
EXPECT_FALSE(bucket->empty_pages_head);
EXPECT_FALSE(bucket->decommitted_pages_head);
@@ -445,7 +446,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
PartitionPage* page = GetFullPage(kTestAllocSize);
FreeFullPage(page);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_EQ(nullptr, page->next_page);
EXPECT_EQ(0, page->num_allocated_slots);
@@ -464,7 +465,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
FreeFullPage(page);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
// Allocate a new page, it should pull from the freelist.
page = GetFullPage(kTestAllocSize);
@@ -560,7 +561,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head);
for (i = 0; i < numToFillFreeListPage; ++i)
FreeFullPage(pages[i]);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
// Allocate / free in a different bucket size so we get control of a
@@ -578,7 +579,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
for (i = 0; i < numToFillFreeListPage; ++i)
FreeFullPage(pages[i]);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
}
@@ -802,61 +803,65 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// Test that we can fetch the real allocated size after an allocation.
TEST_F(PartitionAllocTest, GenericAllocGetSize) {
void* ptr;
- size_t requestedSize, actualSize, predictedSize;
+ size_t requested_size, actual_size, predicted_size;
EXPECT_TRUE(PartitionAllocSupportsGetSize());
// Allocate something small.
- requestedSize = 511 - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = 511 - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_LT(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_LT(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2.
- requestedSize = (256 * 1024) - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = (256 * 1024) - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_EQ(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_EQ(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now.
- requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ size_t num = 64;
+ while (num * kSystemPageSize >= 1024 * 1024) {
+ num /= 2;
+ }
+ requested_size = num * kSystemPageSize - kSystemPageSize - kExtraAllocSize;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_EQ(requestedSize + kSystemPageSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
// Check that we can write at the end of the reported size too.
char* charPtr = reinterpret_cast<char*>(ptr);
- *(charPtr + (actualSize - 1)) = 'A';
+ *(charPtr + (actual_size - 1)) = 'A';
generic_allocator.root()->Free(ptr);
// Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) {
- requestedSize = 512 * 1024 * 1024 - 1;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- ptr = generic_allocator.root()->Alloc(requestedSize, type_name);
+ requested_size = 512 * 1024 * 1024 - 1;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ ptr = generic_allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actualSize = PartitionAllocGetSize(ptr);
- EXPECT_EQ(predictedSize, actualSize);
- EXPECT_LT(requestedSize, actualSize);
+ actual_size = PartitionAllocGetSize(ptr);
+ EXPECT_EQ(predicted_size, actual_size);
+ EXPECT_LT(requested_size, actual_size);
generic_allocator.root()->Free(ptr);
}
// Too large allocation.
- requestedSize = kGenericMaxDirectMapped + 1;
- predictedSize = generic_allocator.root()->ActualSize(requestedSize);
- EXPECT_EQ(requestedSize, predictedSize);
+ requested_size = kGenericMaxDirectMapped + 1;
+ predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ EXPECT_EQ(requested_size, predicted_size);
}
// Test the realloc() contract.
@@ -903,18 +908,18 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize;
ptr = generic_allocator.root()->Alloc(size, type_name);
- size_t actualSize = PartitionAllocGetSize(ptr);
+ size_t actual_size = PartitionAllocGetSize(ptr);
ptr2 = generic_allocator.root()->Realloc(
ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
EXPECT_EQ(ptr, ptr2);
- EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+ EXPECT_EQ(actual_size - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size.
ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
type_name);
EXPECT_EQ(ptr2, ptr);
- EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr));
+ EXPECT_EQ(actual_size - kSystemPageSize, PartitionAllocGetSize(ptr));
// Test that a direct mapped allocation is performed not in-place when the
// new size is small enough.
@@ -1301,14 +1306,14 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list.
ptr = generic_allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
generic_allocator.root()->Free(ptr);
- EXPECT_EQ(GetSentinelPageForTesting(), bucket->active_pages_head);
+ EXPECT_EQ(PartitionPage::get_sentinel_page(), bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->decommitted_pages_head);
@@ -1636,7 +1641,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks large-but-not-quite-direct allocations.
{
- void* ptr = generic_allocator.root()->Alloc(65536 + 1, type_name);
+ constexpr size_t requested_size = 16 * kSystemPageSize;
+ void* ptr = generic_allocator.root()->Alloc(requested_size + 1, type_name);
{
MockPartitionStatsDumper dumper;
@@ -1644,14 +1650,15 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_FALSE(stats->is_direct_map);
EXPECT_EQ(slot_size, stats->bucket_slot_size);
- EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->active_bytes);
+ EXPECT_EQ(requested_size + 1 + kExtraAllocSize, stats->active_bytes);
EXPECT_EQ(slot_size, stats->resident_bytes);
EXPECT_EQ(0u, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
@@ -1669,7 +1676,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
@@ -1685,8 +1693,8 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- void* ptr2 =
- generic_allocator.root()->Alloc(65536 + kSystemPageSize + 1, type_name);
+ void* ptr2 = generic_allocator.root()->Alloc(
+ requested_size + kSystemPageSize + 1, type_name);
EXPECT_EQ(ptr, ptr2);
{
@@ -1695,14 +1703,15 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder);
+ size_t slot_size =
+ requested_size + (requested_size / kGenericNumBucketsPerOrder);
const PartitionBucketMemoryStats* stats =
dumper.GetBucketStats(slot_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_FALSE(stats->is_direct_map);
EXPECT_EQ(slot_size, stats->bucket_slot_size);
- EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize,
+ EXPECT_EQ(requested_size + kSystemPageSize + 1 + kExtraAllocSize,
stats->active_bytes);
EXPECT_EQ(slot_size, stats->resident_bytes);
EXPECT_EQ(0u, stats->decommittable_bytes);
@@ -1880,16 +1889,17 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr2);
}
{
- char* ptr1 = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name));
- void* ptr2 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- void* ptr3 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- void* ptr4 =
- generic_allocator.root()->Alloc(9216 - kExtraAllocSize, type_name);
- memset(ptr1, 'A', 9216 - kExtraAllocSize);
- memset(ptr2, 'A', 9216 - kExtraAllocSize);
+ constexpr size_t requested_size = 2.25 * kSystemPageSize;
+ char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name));
+ void* ptr2 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ void* ptr3 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ void* ptr4 = generic_allocator.root()->Alloc(
+ requested_size - kExtraAllocSize, type_name);
+ memset(ptr1, 'A', requested_size - kExtraAllocSize);
+ memset(ptr2, 'A', requested_size - kExtraAllocSize);
generic_allocator.root()->Free(ptr2);
generic_allocator.root()->Free(ptr1);
{
@@ -1898,12 +1908,13 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
false /* detailed dump */, &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
- const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216);
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(requested_size);
EXPECT_TRUE(stats);
EXPECT_TRUE(stats->is_valid);
EXPECT_EQ(0u, stats->decommittable_bytes);
EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes);
- EXPECT_EQ(9216u * 2, stats->active_bytes);
+ EXPECT_EQ(requested_size * 2, stats->active_bytes);
EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
@@ -1922,6 +1933,49 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr3);
generic_allocator.root()->Free(ptr4);
}
+
+// When kSystemPageSize = 16384 (as on _MIPS_ARCH_LOONGSON), 64 *
+// kSystemPageSize (see the #else branch below) caused this test to OOM.
+// Therefore, for systems with 16 KiB pages, use 32 * kSystemPageSize.
+//
+// TODO(palmer): Refactor this to branch on page size instead of architecture,
+// for clarity of purpose and for applicability to more architectures.
+#if defined(_MIPS_ARCH_LOONGSON)
+ {
+ char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (32 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric(
+ generic_allocator.root(), (31 * kSystemPageSize) - kExtraAllocSize,
+ type_name));
+ {
+ MockPartitionStatsDumper dumper;
+ PartitionDumpStatsGeneric(generic_allocator.root(),
+ "mock_generic_allocator",
+ false /* detailed dump */, &dumper);
+ EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
+
+ const PartitionBucketMemoryStats* stats =
+ dumper.GetBucketStats(32 * kSystemPageSize);
+ EXPECT_TRUE(stats);
+ EXPECT_TRUE(stats->is_valid);
+ EXPECT_EQ(0u, stats->decommittable_bytes);
+ EXPECT_EQ(kSystemPageSize, stats->discardable_bytes);
+ EXPECT_EQ(31 * kSystemPageSize, stats->active_bytes);
+ EXPECT_EQ(32 * kSystemPageSize, stats->resident_bytes);
+ }
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
+ PartitionPurgeMemoryGeneric(generic_allocator.root(),
+ PartitionPurgeDiscardUnusedSystemPages);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
+ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
+
+ PartitionFreeGeneric(generic_allocator.root(), ptr1);
+ }
+#else
{
char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
(64 * kSystemPageSize) - kExtraAllocSize, type_name));
@@ -1957,6 +2011,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
generic_allocator.root()->Free(ptr1);
}
+#endif
// This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten.
generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
diff --git a/chromium/base/allocator/partition_allocator/spin_lock.cc b/chromium/base/allocator/partition_allocator/spin_lock.cc
index f127610c18f..c30d6cd43ad 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock.cc
+++ b/chromium/base/allocator/partition_allocator/spin_lock.cc
@@ -62,9 +62,6 @@
namespace base {
namespace subtle {
-SpinLock::SpinLock() = default;
-SpinLock::~SpinLock() = default;
-
void SpinLock::LockSlow() {
// The value of |kYieldProcessorTries| is cargo culted from TCMalloc, Windows
// critical section defaults, and various other recommendations.
diff --git a/chromium/base/allocator/partition_allocator/spin_lock.h b/chromium/base/allocator/partition_allocator/spin_lock.h
index d0afc477620..e698b565b07 100644
--- a/chromium/base/allocator/partition_allocator/spin_lock.h
+++ b/chromium/base/allocator/partition_allocator/spin_lock.h
@@ -22,8 +22,8 @@ namespace subtle {
class BASE_EXPORT SpinLock {
public:
- SpinLock();
- ~SpinLock();
+ constexpr SpinLock() = default;
+ ~SpinLock() = default;
using Guard = std::lock_guard<SpinLock>;
ALWAYS_INLINE void lock() {
diff --git a/chromium/base/android/jni_generator/BUILD.gn b/chromium/base/android/jni_generator/BUILD.gn
index 68e5519a7d1..4dda6cd32d5 100644
--- a/chromium/base/android/jni_generator/BUILD.gn
+++ b/chromium/base/android/jni_generator/BUILD.gn
@@ -64,7 +64,7 @@ shared_library("jni_sample_lib") {
android_apk("sample_jni_apk") {
apk_name = "SampleJni"
- android_manifest = "//build/android/AndroidManifest.xml"
+ android_manifest = "AndroidManifest.xml"
deps = [
":jni_sample_java",
"//base:base_java",
diff --git a/chromium/base/android/linker/BUILD.gn b/chromium/base/android/linker/BUILD.gn
index fcebc7e27e3..f66404569c1 100644
--- a/chromium/base/android/linker/BUILD.gn
+++ b/chromium/base/android/linker/BUILD.gn
@@ -6,17 +6,6 @@ import("//build/config/android/config.gni")
assert(is_android)
-declare_args() {
- # Set this variable to true to enable GDB support in release builds.
- #
- # The default is to disable it to reduce the likelyhood of runtime crashes
- # on devices that use machine translation (i.e. that run ARM binaries on
- # x86 CPUs with a translation layer like Intel's Houdini). For full details
- # see https://crbug.com/796938.
- #
- chromium_android_linker_enable_release_debugging = false
-}
-
shared_library("chromium_android_linker") {
sources = [
"android_dlext.h",
@@ -24,15 +13,8 @@ shared_library("chromium_android_linker") {
"legacy_linker_jni.h",
"linker_jni.cc",
"linker_jni.h",
- "modern_linker_jni.cc",
- "modern_linker_jni.h",
]
- # Disable GDB support for release builds, unless explicitly wanted.
- if (!is_debug && !chromium_android_linker_enable_release_debugging) {
- defines = [ "LEGACY_LINKER_DISABLE_DEBUGGER_SUPPORT" ]
- }
-
# The NDK contains the crazy_linker here:
# '<(android_ndk_root)/crazy_linker.gyp:crazy_linker'
# However, we use our own fork. See bug 384700.
diff --git a/chromium/base/atomic_sequence_num.h b/chromium/base/atomic_sequence_num.h
index 7d9632a123a..717e37a60b1 100644
--- a/chromium/base/atomic_sequence_num.h
+++ b/chromium/base/atomic_sequence_num.h
@@ -16,7 +16,7 @@ namespace base {
// global variable or static member.
class AtomicSequenceNumber {
public:
- constexpr AtomicSequenceNumber() {}
+ constexpr AtomicSequenceNumber() = default;
// Returns an increasing sequence number starts from 0 for each call.
// This function can be called from any thread without data race.
diff --git a/chromium/base/atomicops_internals_x86_msvc.h b/chromium/base/atomicops_internals_x86_msvc.h
index 9f05b7e78d0..ee9043e9686 100644
--- a/chromium/base/atomicops_internals_x86_msvc.h
+++ b/chromium/base/atomicops_internals_x86_msvc.h
@@ -7,7 +7,7 @@
#ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
#define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
-#include <windows.h>
+#include "base/win/windows_types.h"
#include <intrin.h>
@@ -61,8 +61,10 @@ inline void MemoryBarrier() {
// See #undef and note at the top of this file.
__faststorefence();
#else
- // We use MemoryBarrier from WinNT.h
- ::MemoryBarrier();
+ // We use the implementation of MemoryBarrier from WinNT.h
+ LONG barrier;
+
+ _InterlockedOr(&barrier, 0);
#endif
}
@@ -115,25 +117,25 @@ static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
Atomic64 old_value,
Atomic64 new_value) {
- PVOID result = InterlockedCompareExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
+ PVOID result = _InterlockedCompareExchangePointer(
+ reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
Atomic64 new_value) {
- PVOID result = InterlockedExchangePointer(
- reinterpret_cast<volatile PVOID*>(ptr),
- reinterpret_cast<PVOID>(new_value));
+ PVOID result =
+ _InterlockedExchangePointer(reinterpret_cast<volatile PVOID*>(ptr),
+ reinterpret_cast<PVOID>(new_value));
return reinterpret_cast<Atomic64>(result);
}
inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
Atomic64 increment) {
- return InterlockedExchangeAdd64(
- reinterpret_cast<volatile LONGLONG*>(ptr),
- static_cast<LONGLONG>(increment)) + increment;
+ return _InterlockedExchangeAdd64(reinterpret_cast<volatile LONGLONG*>(ptr),
+ static_cast<LONGLONG>(increment)) +
+ increment;
}
inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
diff --git a/chromium/base/auto_reset.h b/chromium/base/auto_reset.h
index 9116537bfb3..8515fe9cd72 100644
--- a/chromium/base/auto_reset.h
+++ b/chromium/base/auto_reset.h
@@ -5,6 +5,8 @@
#ifndef BASE_AUTO_RESET_H_
#define BASE_AUTO_RESET_H_
+#include <utility>
+
#include "base/macros.h"
// base::AutoReset<> is useful for setting a variable to a new value only within
@@ -23,11 +25,11 @@ class AutoReset {
public:
AutoReset(T* scoped_variable, T new_value)
: scoped_variable_(scoped_variable),
- original_value_(*scoped_variable) {
- *scoped_variable_ = new_value;
+ original_value_(std::move(*scoped_variable)) {
+ *scoped_variable_ = std::move(new_value);
}
- ~AutoReset() { *scoped_variable_ = original_value_; }
+ ~AutoReset() { *scoped_variable_ = std::move(original_value_); }
private:
T* scoped_variable_;
diff --git a/chromium/base/base_paths_win.cc b/chromium/base/base_paths_win.cc
index 62e1972e5f2..4029a49f96c 100644
--- a/chromium/base/base_paths_win.cc
+++ b/chromium/base/base_paths_win.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include <windows.h>
+#include <KnownFolders.h>
#include <shlobj.h>
#include "base/base_paths.h"
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 9554233b41c..870d55c606f 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -112,6 +112,14 @@ const char kProfilingFile[] = "profiling-file";
const char kDisableUsbKeyboardDetect[] = "disable-usb-keyboard-detect";
#endif
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+// The /dev/shm partition is too small in certain VM environments, causing
+// Chrome to fail or crash (see http://crbug.com/715363). Use this flag to
+// work-around this issue (a temporary directory will always be used to create
+// anonymous shared memory files).
+const char kDisableDevShmUsage[] = "disable-dev-shm-usage";
+#endif
+
#if defined(OS_POSIX)
// Used for turning on Breakpad crash reporting in a debug environment where
// crash reporting is typically compiled but disabled.
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index 56be3060f68..55c8ed23b85 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -37,6 +37,10 @@ extern const char kWaitForDebugger[];
extern const char kDisableUsbKeyboardDetect[];
#endif
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+extern const char kDisableDevShmUsage[];
+#endif
+
#if defined(OS_POSIX)
extern const char kEnableCrashReporterForTesting[];
#endif
diff --git a/chromium/base/bind.h b/chromium/base/bind.h
index 944b7d8ad72..b8e3d2a4f89 100644
--- a/chromium/base/bind.h
+++ b/chromium/base/bind.h
@@ -239,8 +239,8 @@ BindRepeating(Functor&& functor, Args&&... args) {
template <typename Functor, typename... Args>
inline Callback<MakeUnboundRunType<Functor, Args...>>
Bind(Functor&& functor, Args&&... args) {
- return BindRepeating(std::forward<Functor>(functor),
- std::forward<Args>(args)...);
+ return base::BindRepeating(std::forward<Functor>(functor),
+ std::forward<Args>(args)...);
}
// Special cases for binding to a base::Callback without extra bound arguments.
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index 172ce5315d7..9d5720fa489 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -50,7 +50,12 @@ template <typename Callable,
struct ExtractCallableRunTypeImpl;
template <typename Callable, typename R, typename... Args>
-struct ExtractCallableRunTypeImpl<Callable, R(Callable::*)(Args...) const> {
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...)> {
+ using Type = R(Args...);
+};
+
+template <typename Callable, typename R, typename... Args>
+struct ExtractCallableRunTypeImpl<Callable, R (Callable::*)(Args...) const> {
using Type = R(Args...);
};
@@ -64,27 +69,23 @@ template <typename Callable>
using ExtractCallableRunType =
typename ExtractCallableRunTypeImpl<Callable>::Type;
-// IsConvertibleToRunType<Functor> is std::true_type if |Functor| has operator()
-// and convertible to the corresponding function pointer. Otherwise, it's
-// std::false_type.
+// IsCallableObject<Functor> is std::true_type if |Functor| has operator().
+// Otherwise, it's std::false_type.
// Example:
-// IsConvertibleToRunType<void(*)()>::value is false.
+// IsCallableObject<void(*)()>::value is false.
//
// struct Foo {};
-// IsConvertibleToRunType<void(Foo::*)()>::value is false.
-//
-// auto f = []() {};
-// IsConvertibleToRunType<decltype(f)>::value is true.
+// IsCallableObject<void(Foo::*)()>::value is false.
//
// int i = 0;
-// auto g = [i]() {};
-// IsConvertibleToRunType<decltype(g)>::value is false.
+// auto f = [i]() {};
+// IsCallableObject<decltype(f)>::value is false.
template <typename Functor, typename SFINAE = void>
-struct IsConvertibleToRunType : std::false_type {};
+struct IsCallableObject : std::false_type {};
template <typename Callable>
-struct IsConvertibleToRunType<Callable, void_t<decltype(&Callable::operator())>>
- : std::is_convertible<Callable, ExtractCallableRunType<Callable>*> {};
+struct IsCallableObject<Callable, void_t<decltype(&Callable::operator())>>
+ : std::true_type {};
// HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
// pointer to a RefCounted type.
@@ -119,21 +120,37 @@ struct ForceVoidReturn<R(Args...)> {
template <typename Functor, typename SFINAE>
struct FunctorTraits;
-// For a callable type that is convertible to the corresponding function type.
+// For empty callable types.
// This specialization is intended to allow binding captureless lambdas by
-// base::Bind(), based on the fact that captureless lambdas can be convertible
-// to the function type while capturing lambdas can't.
+// base::Bind(), based on the fact that captureless lambdas are empty while
+// capturing lambdas are not. This also allows any functors as far as it's an
+// empty class.
+// Example:
+//
+// // Captureless lambdas are allowed.
+// []() {return 42;};
+//
+// // Capturing lambdas are *not* allowed.
+// int x;
+// [x]() {return x;};
+//
+// // Any empty class with operator() is allowed.
+// struct Foo {
+// void operator()() const {}
+// // No non-static member variable and no virtual functions.
+// };
template <typename Functor>
struct FunctorTraits<Functor,
- std::enable_if_t<IsConvertibleToRunType<Functor>::value>> {
+ std::enable_if_t<IsCallableObject<Functor>::value &&
+ std::is_empty<Functor>::value>> {
using RunType = ExtractCallableRunType<Functor>;
static constexpr bool is_method = false;
static constexpr bool is_nullable = false;
- template <typename... RunArgs>
- static ExtractReturnType<RunType>
- Invoke(const Functor& functor, RunArgs&&... args) {
- return functor(std::forward<RunArgs>(args)...);
+ template <typename RunFunctor, typename... RunArgs>
+ static ExtractReturnType<RunType> Invoke(RunFunctor&& functor,
+ RunArgs&&... args) {
+ return std::forward<RunFunctor>(functor)(std::forward<RunArgs>(args)...);
}
};
@@ -437,8 +454,7 @@ struct BindState final : BindStateBase {
: BindState(IsCancellable{},
invoke_func,
std::forward<ForwardFunctor>(functor),
- std::forward<ForwardBoundArgs>(bound_args)...) {
- }
+ std::forward<ForwardBoundArgs>(bound_args)...) {}
Functor functor_;
std::tuple<BoundArgs...> bound_args_;
@@ -468,7 +484,7 @@ struct BindState final : BindStateBase {
DCHECK(!IsNull(functor_));
}
- ~BindState() {}
+ ~BindState() = default;
static void Destroy(const BindStateBase* self) {
delete static_cast<const BindState*>(self);
diff --git a/chromium/base/bind_unittest.cc b/chromium/base/bind_unittest.cc
index 7deba473e97..046f03e44da 100644
--- a/chromium/base/bind_unittest.cc
+++ b/chromium/base/bind_unittest.cc
@@ -13,6 +13,7 @@
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
+#include "base/test/bind_test_util.h"
#include "base/test/gtest_util.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -1228,17 +1229,17 @@ TEST_F(BindTest, ArgumentCopiesAndMoves) {
}
TEST_F(BindTest, CapturelessLambda) {
- EXPECT_FALSE(internal::IsConvertibleToRunType<void>::value);
- EXPECT_FALSE(internal::IsConvertibleToRunType<int>::value);
- EXPECT_FALSE(internal::IsConvertibleToRunType<void(*)()>::value);
- EXPECT_FALSE(internal::IsConvertibleToRunType<void(NoRef::*)()>::value);
+ EXPECT_FALSE(internal::IsCallableObject<void>::value);
+ EXPECT_FALSE(internal::IsCallableObject<int>::value);
+ EXPECT_FALSE(internal::IsCallableObject<void (*)()>::value);
+ EXPECT_FALSE(internal::IsCallableObject<void (NoRef::*)()>::value);
auto f = []() {};
- EXPECT_TRUE(internal::IsConvertibleToRunType<decltype(f)>::value);
+ EXPECT_TRUE(internal::IsCallableObject<decltype(f)>::value);
int i = 0;
auto g = [i]() { (void)i; };
- EXPECT_FALSE(internal::IsConvertibleToRunType<decltype(g)>::value);
+ EXPECT_TRUE(internal::IsCallableObject<decltype(g)>::value);
auto h = [](int, double) { return 'k'; };
EXPECT_TRUE((std::is_same<
@@ -1257,6 +1258,36 @@ TEST_F(BindTest, CapturelessLambda) {
EXPECT_EQ(42, x);
}
+TEST_F(BindTest, EmptyFunctor) {
+ struct NonEmptyFunctor {
+ int operator()() const { return x; }
+ int x = 42;
+ };
+
+ struct EmptyFunctor {
+ int operator()() { return 42; }
+ };
+
+ struct EmptyFunctorConst {
+ int operator()() const { return 42; }
+ };
+
+ EXPECT_TRUE(internal::IsCallableObject<NonEmptyFunctor>::value);
+ EXPECT_TRUE(internal::IsCallableObject<EmptyFunctor>::value);
+ EXPECT_TRUE(internal::IsCallableObject<EmptyFunctorConst>::value);
+ EXPECT_EQ(42, BindOnce(EmptyFunctor()).Run());
+ EXPECT_EQ(42, BindOnce(EmptyFunctorConst()).Run());
+ EXPECT_EQ(42, BindRepeating(EmptyFunctorConst()).Run());
+}
+
+TEST_F(BindTest, CapturingLambdaForTesting) {
+ int x = 6;
+ EXPECT_EQ(42, BindLambdaForTesting([=](int y) { return x * y; }).Run(7));
+
+ auto f = [x](std::unique_ptr<int> y) { return x * *y; };
+ EXPECT_EQ(42, BindLambdaForTesting(f).Run(std::make_unique<int>(7)));
+}
+
TEST_F(BindTest, Cancellation) {
EXPECT_CALL(no_ref_, VoidMethodWithIntArg(_)).Times(2);
diff --git a/chromium/base/bind_unittest.nc b/chromium/base/bind_unittest.nc
index 8e26c1c8d97..e4ac60925eb 100644
--- a/chromium/base/bind_unittest.nc
+++ b/chromium/base/bind_unittest.nc
@@ -11,6 +11,7 @@
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/test/bind_test_util.h"
namespace base {
@@ -73,6 +74,11 @@ void VoidPolymorphic1(T t) {
void TakesMoveOnly(std::unique_ptr<int>) {
}
+struct NonEmptyFunctor {
+ int x;
+ void operator()() const {}
+};
+
// TODO(hans): Remove .* and update the static_assert expectations once we roll
// past Clang r313315. https://crbug.com/765692.
@@ -304,6 +310,11 @@ void WontCompile() {
Bind(&TakesMoveOnly, std::move(x));
}
+#elif defined(NCTEST_BIND_NON_EMPTY_FUNCTOR) // [r"fatal error: implicit instantiation of undefined template 'base::internal::FunctorTraits<base::NonEmptyFunctor, void>'"]
+
+void WontCompile() {
+ Bind(NonEmptyFunctor());
+}
#endif
diff --git a/chromium/base/callback_internal.h b/chromium/base/callback_internal.h
index 616abddd3e5..ab2182e01f6 100644
--- a/chromium/base/callback_internal.h
+++ b/chromium/base/callback_internal.h
@@ -161,7 +161,7 @@ class BASE_EXPORT CallbackBaseCopyable : public CallbackBase {
protected:
explicit CallbackBaseCopyable(BindStateBase* bind_state)
: CallbackBase(bind_state) {}
- ~CallbackBaseCopyable() {}
+ ~CallbackBaseCopyable() = default;
};
} // namespace internal
diff --git a/chromium/base/callback_list.h b/chromium/base/callback_list.h
index 7ab79dd8e1f..092aa2af160 100644
--- a/chromium/base/callback_list.h
+++ b/chromium/base/callback_list.h
@@ -208,7 +208,7 @@ class CallbackList<void(Args...)>
public:
typedef Callback<void(Args...)> CallbackType;
- CallbackList() {}
+ CallbackList() = default;
template <typename... RunArgs>
void Notify(RunArgs&&... args) {
diff --git a/chromium/base/command_line.h b/chromium/base/command_line.h
index 31b22d6b9a9..aef9badf824 100644
--- a/chromium/base/command_line.h
+++ b/chromium/base/command_line.h
@@ -204,7 +204,7 @@ class BASE_EXPORT CommandLine {
void AppendArguments(const CommandLine& other, bool include_program);
// Insert a command before the current command.
- // Common for debuggers, like "valgrind" or "gdb --args".
+ // Common for debuggers, like "gdb --args".
void PrependWrapper(const StringType& wrapper);
#if defined(OS_WIN)
@@ -215,7 +215,7 @@ class BASE_EXPORT CommandLine {
private:
// Disallow default constructor; a program name must be explicitly specified.
- CommandLine();
+ CommandLine() = delete;
// Allow the copy constructor. A common pattern is to copy of the current
// process's command line and then add some flags to it. For example:
// CommandLine cl(*CommandLine::ForCurrentProcess());
diff --git a/chromium/base/containers/README.md b/chromium/base/containers/README.md
index 59dd93540dc..092a264c47e 100644
--- a/chromium/base/containers/README.md
+++ b/chromium/base/containers/README.md
@@ -46,7 +46,7 @@ Google naming. Be sure to use the base namespace.
* **base::small\_map** has better runtime memory usage without the poor
mutation performance of large containers that base::flat\_map has. But this
advantage is partially offset by additional code size. Prefer in cases
- where you make many objects so that the code/heap tradeoff is good.
+ where you make many objects so that the code/heap tradeoff is good.
* Use **std::map** and **std::set** if you can't decide. Even if they're not
great, they're unlikely to be bad or surprising.
@@ -136,31 +136,10 @@ http://en.cppreference.com/w/cpp/utility/functional/less_void
Example, smart pointer set:
```cpp
-// Define a custom comparator.
-struct UniquePtrComparator {
- // Mark your comparison as transparent.
- using is_transparent = int;
-
- template <typename T>
- bool operator()(const std::unique_ptr<T>& lhs,
- const std::unique_ptr<T>& rhs) const {
- return lhs < rhs;
- }
-
- template <typename T>
- bool operator()(const T* lhs, const std::unique_ptr<T>& rhs) const {
- return lhs < rhs.get();
- }
-
- template <typename T>
- bool operator()(const std::unique_ptr<T>& lhs, const T* rhs) const {
- return lhs.get() < rhs;
- }
-};
-
-// Declare a typedef.
+// Declare a type alias using base::UniquePtrComparator.
template <typename T>
-using UniquePtrSet = base::flat_set<std::unique_ptr<T>, UniquePtrComparator>;
+using UniquePtrSet = base::flat_set<std::unique_ptr<T>,
+ base::UniquePtrComparator>;
// ...
// Collect data.
diff --git a/chromium/base/containers/circular_deque.h b/chromium/base/containers/circular_deque.h
index 9883316d6ab..bf42a958448 100644
--- a/chromium/base/containers/circular_deque.h
+++ b/chromium/base/containers/circular_deque.h
@@ -106,8 +106,8 @@
// void push_front(T&&);
// void push_back(const T&);
// void push_back(T&&);
-// void emplace_front(Args&&...);
-// void emplace_back(Args&&...);
+// T& emplace_front(Args&&...);
+// T& emplace_back(Args&&...);
// void pop_front();
// void pop_back();
//
@@ -417,7 +417,7 @@ class circular_deque {
// ---------------------------------------------------------------------------
// Constructor
- circular_deque() = default;
+ constexpr circular_deque() = default;
// Constructs with |count| copies of |value| or default constructed version.
circular_deque(size_type count) { resize(count); }
@@ -835,7 +835,7 @@ class circular_deque {
void push_back(T&& value) { emplace_back(std::move(value)); }
template <class... Args>
- void emplace_front(Args&&... args) {
+ reference emplace_front(Args&&... args) {
ExpandCapacityIfNecessary(1);
if (begin_ == 0)
begin_ = buffer_.capacity() - 1;
@@ -843,10 +843,11 @@ class circular_deque {
begin_--;
IncrementGeneration();
new (&buffer_[begin_]) T(std::forward<Args>(args)...);
+ return front();
}
template <class... Args>
- void emplace_back(Args&&... args) {
+ reference emplace_back(Args&&... args) {
ExpandCapacityIfNecessary(1);
new (&buffer_[end_]) T(std::forward<Args>(args)...);
if (end_ == buffer_.capacity() - 1)
@@ -854,6 +855,7 @@ class circular_deque {
else
end_++;
IncrementGeneration();
+ return back();
}
void pop_front() {
diff --git a/chromium/base/containers/circular_deque_unittest.cc b/chromium/base/containers/circular_deque_unittest.cc
index 413b27c22d7..df960c3fc08 100644
--- a/chromium/base/containers/circular_deque_unittest.cc
+++ b/chromium/base/containers/circular_deque_unittest.cc
@@ -839,6 +839,30 @@ TEST(CircularDeque, EmplaceMoveOnly) {
EXPECT_EQ(4, q[4].data());
}
+TEST(CircularDeque, EmplaceFrontBackReturnsReference) {
+ circular_deque<int> q;
+ q.reserve(2);
+
+ int& front = q.emplace_front(1);
+ int& back = q.emplace_back(2);
+ ASSERT_EQ(2u, q.size());
+ EXPECT_EQ(1, q[0]);
+ EXPECT_EQ(2, q[1]);
+
+ EXPECT_EQ(&front, &q.front());
+ EXPECT_EQ(&back, &q.back());
+
+ front = 3;
+ back = 4;
+
+ ASSERT_EQ(2u, q.size());
+ EXPECT_EQ(3, q[0]);
+ EXPECT_EQ(4, q[1]);
+
+ EXPECT_EQ(&front, &q.front());
+ EXPECT_EQ(&back, &q.back());
+}
+
/*
This test should assert in a debug build. It tries to dereference an iterator
after mutating the container. Uncomment to double-check that this works.
diff --git a/chromium/base/containers/mru_cache.h b/chromium/base/containers/mru_cache.h
index d20ef132b52..2a858e1d302 100644
--- a/chromium/base/containers/mru_cache.h
+++ b/chromium/base/containers/mru_cache.h
@@ -74,7 +74,7 @@ class MRUCacheBase {
// can pass NO_AUTO_EVICT to not restrict the cache size.
explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
- virtual ~MRUCacheBase() {}
+ virtual ~MRUCacheBase() = default;
size_type max_size() const { return max_size_; }
@@ -218,7 +218,7 @@ class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit MRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {}
- virtual ~MRUCache() {}
+ virtual ~MRUCache() = default;
private:
DISALLOW_COPY_AND_ASSIGN(MRUCache);
@@ -245,7 +245,7 @@ class HashingMRUCache
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit HashingMRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {}
- virtual ~HashingMRUCache() {}
+ virtual ~HashingMRUCache() = default;
private:
DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
diff --git a/chromium/base/containers/unique_ptr_comparator.h b/chromium/base/containers/unique_ptr_comparator.h
new file mode 100644
index 00000000000..ceb1aa846db
--- /dev/null
+++ b/chromium/base/containers/unique_ptr_comparator.h
@@ -0,0 +1,49 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_CONTAINERS_UNIQUE_PTR_COMPARATOR_H_
+#define BASE_CONTAINERS_UNIQUE_PTR_COMPARATOR_H_
+
+#include <memory>
+
+namespace base {
+
+// This transparent comparator allows to lookup by raw pointer in
+// a container of unique pointers. This functionality is based on C++14
+// extensions to std::set/std::map interface, and can also be used
+// with base::flat_set/base::flat_map.
+//
+// Example usage:
+// Foo* foo = ...
+// std::set<std::unique_ptr<Foo>, base::UniquePtrComparator> set;
+// set.insert(std::unique_ptr<Foo>(foo));
+// ...
+// auto it = set.find(foo);
+// EXPECT_EQ(foo, it->get());
+//
+// You can find more information about transparent comparisons here:
+// http://en.cppreference.com/w/cpp/utility/functional/less_void
+struct UniquePtrComparator {
+ using is_transparent = int;
+
+ template <typename T>
+ bool operator()(const std::unique_ptr<T>& lhs,
+ const std::unique_ptr<T>& rhs) const {
+ return lhs < rhs;
+ }
+
+ template <typename T>
+ bool operator()(const T* lhs, const std::unique_ptr<T>& rhs) const {
+ return lhs < rhs.get();
+ }
+
+ template <typename T>
+ bool operator()(const std::unique_ptr<T>& lhs, const T* rhs) const {
+ return lhs.get() < rhs;
+ }
+};
+
+} // namespace base
+
+#endif // BASE_CONTAINERS_UNIQUE_PTR_COMPARATOR_H_
diff --git a/chromium/base/containers/unique_ptr_comparator_unittest.cc b/chromium/base/containers/unique_ptr_comparator_unittest.cc
new file mode 100644
index 00000000000..e019419119b
--- /dev/null
+++ b/chromium/base/containers/unique_ptr_comparator_unittest.cc
@@ -0,0 +1,66 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/containers/unique_ptr_comparator.h"
+#include <memory>
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+class Foo {
+ public:
+ Foo() { instance_count++; }
+ ~Foo() { instance_count--; }
+ static int instance_count;
+};
+
+int Foo::instance_count = 0;
+
+TEST(UniquePtrComparatorTest, Basic) {
+ std::set<std::unique_ptr<Foo>, UniquePtrComparator> set;
+ Foo* foo1 = new Foo();
+ Foo* foo2 = new Foo();
+ Foo* foo3 = new Foo();
+ EXPECT_EQ(3, Foo::instance_count);
+
+ set.emplace(foo1);
+ set.emplace(foo2);
+
+ auto it1 = set.find(foo1);
+ EXPECT_TRUE(it1 != set.end());
+ EXPECT_EQ(foo1, it1->get());
+
+ {
+ auto it2 = set.find(foo2);
+ EXPECT_TRUE(it2 != set.end());
+ EXPECT_EQ(foo2, it2->get());
+ }
+
+ EXPECT_TRUE(set.find(foo3) == set.end());
+
+ set.erase(it1);
+ EXPECT_EQ(2, Foo::instance_count);
+
+ EXPECT_TRUE(set.find(foo1) == set.end());
+
+ {
+ auto it2 = set.find(foo2);
+ EXPECT_TRUE(it2 != set.end());
+ EXPECT_EQ(foo2, it2->get());
+ }
+
+ set.clear();
+ EXPECT_EQ(1, Foo::instance_count);
+
+ EXPECT_TRUE(set.find(foo1) == set.end());
+ EXPECT_TRUE(set.find(foo2) == set.end());
+ EXPECT_TRUE(set.find(foo3) == set.end());
+
+ delete foo3;
+ EXPECT_EQ(0, Foo::instance_count);
+}
+
+} // namespace
+} // namespace base
diff --git a/chromium/base/containers/vector_buffer.h b/chromium/base/containers/vector_buffer.h
index 60a5ee9e8bf..a72c1ed95e8 100644
--- a/chromium/base/containers/vector_buffer.h
+++ b/chromium/base/containers/vector_buffer.h
@@ -37,7 +37,7 @@ namespace internal {
template <typename T>
class VectorBuffer {
public:
- VectorBuffer() {}
+ constexpr VectorBuffer() = default;
#if defined(__clang__) && !defined(__native_client__)
// This constructor converts an uninitialized void* to a T* which triggers
diff --git a/chromium/base/debug/alias.h b/chromium/base/debug/alias.h
index d38f12b66da..128fdaa05d6 100644
--- a/chromium/base/debug/alias.h
+++ b/chromium/base/debug/alias.h
@@ -6,6 +6,7 @@
#define BASE_DEBUG_ALIAS_H_
#include "base/base_export.h"
+#include "base/strings/string_util.h"
namespace base {
namespace debug {
@@ -24,14 +25,19 @@ namespace debug {
// copy the object or its fields to local variables. Example usage:
// int last_error = err_;
// base::debug::Alias(&last_error);
-// char name_copy[16];
-// strncpy(name_copy, p->name, sizeof(name_copy) - 1);
-// name_copy[sizeof(name_copy) - 1] = '\0';
-// base::debug::Alias(name_copy);
+// DEBUG_ALIAS_FOR_CSTR(name_copy, p->name, 16);
// CHECK(false);
void BASE_EXPORT Alias(const void* var);
} // namespace debug
} // namespace base
+// Convenience macro that copies the null-terminated string from |c_str| into a
+// stack-allocated char array named |var_name| that holds up to |char_count|
+// characters and should be preserved in memory dumps.
+#define DEBUG_ALIAS_FOR_CSTR(var_name, c_str, char_count) \
+ char var_name[char_count]; \
+ ::base::strlcpy(var_name, (c_str), arraysize(var_name)); \
+ ::base::debug::Alias(var_name);
+
#endif // BASE_DEBUG_ALIAS_H_
diff --git a/chromium/base/debug/alias_unittest.cc b/chromium/base/debug/alias_unittest.cc
new file mode 100644
index 00000000000..66682f1f028
--- /dev/null
+++ b/chromium/base/debug/alias_unittest.cc
@@ -0,0 +1,28 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <memory>
+#include <string>
+
+#include "base/debug/alias.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(DebugAlias, Test) {
+ std::unique_ptr<std::string> input =
+ std::make_unique<std::string>("string contents");
+
+ // Verify the contents get copied + the new local variable has the right type.
+ DEBUG_ALIAS_FOR_CSTR(copy1, input->c_str(), 100 /* > input->size() */);
+ static_assert(sizeof(copy1) == 100,
+ "Verification that copy1 has expected size");
+ EXPECT_STREQ("string contents", copy1);
+
+ // Verify that the copy is properly null-terminated even when it is smaller
+ // than the input string.
+ DEBUG_ALIAS_FOR_CSTR(copy2, input->c_str(), 3 /* < input->size() */);
+ static_assert(sizeof(copy2) == 3,
+ "Verification that copy2 has expected size");
+ EXPECT_STREQ("st", copy2);
+ EXPECT_EQ('\0', copy2[2]);
+}
diff --git a/chromium/base/debug/crash_logging.cc b/chromium/base/debug/crash_logging.cc
index 4abbada0080..1dabb6b9639 100644
--- a/chromium/base/debug/crash_logging.cc
+++ b/chromium/base/debug/crash_logging.cc
@@ -4,18 +4,6 @@
#include "base/debug/crash_logging.h"
-#include <cmath>
-#include <unordered_map>
-
-#include "base/debug/stack_trace.h"
-#include "base/format_macros.h"
-#include "base/logging.h"
-#include "base/strings/string_util.h"
-#include "base/strings/stringprintf.h"
-
-// Undef the macro so the preprocessor doesn't garble the constructor.
-#undef ScopedCrashKey
-
namespace base {
namespace debug {
@@ -23,32 +11,6 @@ namespace {
CrashKeyImplementation* g_crash_key_impl = nullptr;
-// Global map of crash key names to registration entries.
-typedef std::unordered_map<base::StringPiece, CrashKey, base::StringPieceHash>
- CrashKeyMap;
-CrashKeyMap* g_crash_keys_ = nullptr;
-
-// The maximum length of a single chunk.
-size_t g_chunk_max_length_ = 0;
-
-// String used to format chunked key names.
-const char kChunkFormatString[] = "%s-%" PRIuS;
-
-// The functions that are called to actually set the key-value pairs in the
-// crash reportng system.
-SetCrashKeyValueFuncT g_set_key_func_ = nullptr;
-ClearCrashKeyValueFuncT g_clear_key_func_ = nullptr;
-
-// For a given |length|, computes the number of chunks a value of that size
-// will occupy.
-size_t NumChunksForLength(size_t length) {
- // Compute (length / g_chunk_max_length_), rounded up.
- return (length + g_chunk_max_length_ - 1) / g_chunk_max_length_;
-}
-
-// The longest max_length allowed by the system.
-const size_t kLargestValueAllowed = 2048;
-
} // namespace
CrashKeyString* AllocateCrashKeyString(const char name[],
@@ -73,163 +35,19 @@ void ClearCrashKeyString(CrashKeyString* crash_key) {
g_crash_key_impl->Clear(crash_key);
}
-void SetCrashKeyImplementation(std::unique_ptr<CrashKeyImplementation> impl) {
- delete g_crash_key_impl;
- g_crash_key_impl = impl.release();
-}
-
-void SetCrashKeyValue(const base::StringPiece& key,
- const base::StringPiece& value) {
- if (!g_set_key_func_ || !g_crash_keys_)
- return;
-
- const CrashKey* crash_key = LookupCrashKey(key);
-
- DCHECK(crash_key) << "All crash keys must be registered before use "
- << "(key = " << key << ")";
-
- // Handle the un-chunked case.
- if (!crash_key || crash_key->max_length <= g_chunk_max_length_) {
- g_set_key_func_(key, value);
- return;
- }
-
- // Unset the unused chunks.
- std::vector<std::string> chunks =
- ChunkCrashKeyValue(*crash_key, value, g_chunk_max_length_);
- for (size_t i = chunks.size();
- i < NumChunksForLength(crash_key->max_length);
- ++i) {
- g_clear_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1));
- }
-
- // Set the chunked keys.
- for (size_t i = 0; i < chunks.size(); ++i) {
- g_set_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1),
- chunks[i]);
- }
-}
-
-void ClearCrashKey(const base::StringPiece& key) {
- if (!g_clear_key_func_ || !g_crash_keys_)
- return;
-
- const CrashKey* crash_key = LookupCrashKey(key);
-
- // Handle the un-chunked case.
- if (!crash_key || crash_key->max_length <= g_chunk_max_length_) {
- g_clear_key_func_(key);
- return;
- }
-
- for (size_t i = 0; i < NumChunksForLength(crash_key->max_length); ++i) {
- g_clear_key_func_(base::StringPrintf(kChunkFormatString, key.data(), i+1));
- }
-}
-
-void SetCrashKeyToStackTrace(const base::StringPiece& key,
- const StackTrace& trace) {
- size_t count = 0;
- const void* const* addresses = trace.Addresses(&count);
- SetCrashKeyFromAddresses(key, addresses, count);
-}
-
-void SetCrashKeyFromAddresses(const base::StringPiece& key,
- const void* const* addresses,
- size_t count) {
- std::string value = "<null>";
- if (addresses && count) {
- const size_t kBreakpadValueMax = 255;
-
- std::vector<std::string> hex_backtrace;
- size_t length = 0;
-
- for (size_t i = 0; i < count; ++i) {
- std::string s = base::StringPrintf("%p", addresses[i]);
- length += s.length() + 1;
- if (length > kBreakpadValueMax)
- break;
- hex_backtrace.push_back(s);
- }
-
- value = base::JoinString(hex_backtrace, " ");
-
- // Warn if this exceeds the breakpad limits.
- DCHECK_LE(value.length(), kBreakpadValueMax);
- }
-
- SetCrashKeyValue(key, value);
-}
-
-ScopedCrashKey::ScopedCrashKey(const base::StringPiece& key,
- const base::StringPiece& value)
- : key_(key.as_string()) {
- SetCrashKeyValue(key, value);
-}
-
-ScopedCrashKey::~ScopedCrashKey() {
- ClearCrashKey(key_);
+ScopedCrashKeyString::ScopedCrashKeyString(CrashKeyString* crash_key,
+ base::StringPiece value)
+ : crash_key_(crash_key) {
+ SetCrashKeyString(crash_key_, value);
}
-size_t InitCrashKeys(const CrashKey* const keys, size_t count,
- size_t chunk_max_length) {
- DCHECK(!g_crash_keys_) << "Crash logging may only be initialized once";
- if (!keys) {
- delete g_crash_keys_;
- g_crash_keys_ = nullptr;
- return 0;
- }
-
- g_crash_keys_ = new CrashKeyMap;
- g_chunk_max_length_ = chunk_max_length;
-
- size_t total_keys = 0;
- for (size_t i = 0; i < count; ++i) {
- g_crash_keys_->insert(std::make_pair(keys[i].key_name, keys[i]));
- total_keys += NumChunksForLength(keys[i].max_length);
- DCHECK_LT(keys[i].max_length, kLargestValueAllowed);
- }
- DCHECK_EQ(count, g_crash_keys_->size())
- << "Duplicate crash keys were registered";
-
- return total_keys;
-}
-
-const CrashKey* LookupCrashKey(const base::StringPiece& key) {
- if (!g_crash_keys_)
- return nullptr;
- CrashKeyMap::const_iterator it = g_crash_keys_->find(key.as_string());
- if (it == g_crash_keys_->end())
- return nullptr;
- return &(it->second);
+ScopedCrashKeyString::~ScopedCrashKeyString() {
+ ClearCrashKeyString(crash_key_);
}
-void SetCrashKeyReportingFunctions(
- SetCrashKeyValueFuncT set_key_func,
- ClearCrashKeyValueFuncT clear_key_func) {
- g_set_key_func_ = set_key_func;
- g_clear_key_func_ = clear_key_func;
-}
-
-std::vector<std::string> ChunkCrashKeyValue(const CrashKey& crash_key,
- const base::StringPiece& value,
- size_t chunk_max_length) {
- std::string value_string = value.substr(0, crash_key.max_length).as_string();
- std::vector<std::string> chunks;
- for (size_t offset = 0; offset < value_string.length(); ) {
- std::string chunk = value_string.substr(offset, chunk_max_length);
- chunks.push_back(chunk);
- offset += chunk.length();
- }
- return chunks;
-}
-
-void ResetCrashLoggingForTesting() {
- delete g_crash_keys_;
- g_crash_keys_ = nullptr;
- g_chunk_max_length_ = 0;
- g_set_key_func_ = nullptr;
- g_clear_key_func_ = nullptr;
+void SetCrashKeyImplementation(std::unique_ptr<CrashKeyImplementation> impl) {
+ delete g_crash_key_impl;
+ g_crash_key_impl = impl.release();
}
} // namespace debug
diff --git a/chromium/base/debug/crash_logging.h b/chromium/base/debug/crash_logging.h
index 1377e18c497..9c6cd758daa 100644
--- a/chromium/base/debug/crash_logging.h
+++ b/chromium/base/debug/crash_logging.h
@@ -8,9 +8,6 @@
#include <stddef.h>
#include <memory>
-#include <string>
-#include <type_traits>
-#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
@@ -59,6 +56,19 @@ BASE_EXPORT void SetCrashKeyString(CrashKeyString* crash_key,
// null.
BASE_EXPORT void ClearCrashKeyString(CrashKeyString* crash_key);
+// A scoper that sets the specified key to value for the lifetime of the
+// object, and clears it on destruction.
+class BASE_EXPORT ScopedCrashKeyString {
+ public:
+ ScopedCrashKeyString(CrashKeyString* crash_key, base::StringPiece value);
+ ~ScopedCrashKeyString();
+
+ private:
+ CrashKeyString* const crash_key_;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedCrashKeyString);
+};
+
////////////////////////////////////////////////////////////////////////////////
// The following declarations are used to initialize the crash key system
// in //base by providing implementations for the above functions.
@@ -68,7 +78,7 @@ BASE_EXPORT void ClearCrashKeyString(CrashKeyString* crash_key);
// set using the function below.
class CrashKeyImplementation {
public:
- virtual ~CrashKeyImplementation() {}
+ virtual ~CrashKeyImplementation() = default;
virtual CrashKeyString* Allocate(const char name[], CrashKeySize size) = 0;
virtual void Set(CrashKeyString* crash_key, base::StringPiece value) = 0;
@@ -88,121 +98,6 @@ struct CrashKeyString {
const CrashKeySize size;
};
-// The API below is deprecated.
-////////////////////////////////////////////////////////////////////////////////
-
-class StackTrace;
-
-// Sets or clears a specific key-value pair from the crash metadata. Keys and
-// values are terminated at the null byte.
-BASE_EXPORT void SetCrashKeyValue(const base::StringPiece& key,
- const base::StringPiece& value);
-BASE_EXPORT void ClearCrashKey(const base::StringPiece& key);
-
-// Records the given StackTrace into a crash key.
-BASE_EXPORT void SetCrashKeyToStackTrace(const base::StringPiece& key,
- const StackTrace& trace);
-
-// Formats |count| instruction pointers from |addresses| using %p and
-// sets the resulting string as a value for crash key |key|. A maximum of 23
-// items will be encoded, since breakpad limits values to 255 bytes.
-BASE_EXPORT void SetCrashKeyFromAddresses(const base::StringPiece& key,
- const void* const* addresses,
- size_t count);
-
-// A scoper that sets the specified key to value for the lifetime of the
-// object, and clears it on destruction.
-class BASE_EXPORT ScopedCrashKey {
- public:
- ScopedCrashKey(const base::StringPiece& key, const base::StringPiece& value);
- ~ScopedCrashKey();
-
- // Helper to force a static_assert when instantiating a ScopedCrashKey
- // temporary without a name. The usual idiom is to just #define a macro that
- // static_asserts with the message; however, that doesn't work well when the
- // type is in a namespace.
- //
- // Instead, we use a templated helper to trigger the static_assert, observing
- // two rules:
- // - The static_assert needs to be in a normally uninstantiated template;
- // otherwise, it will fail to compile =)
- // - Similarly, the static_assert must be dependent on the template argument,
- // to prevent it from being evaluated until the template is instantiated.
- //
- // To prevent this constructor from being accidentally invoked, it takes a
- // special enum as an argument.
-
- // Finally, note that this can't just be a template function that takes only
- // one parameter, because this ends up triggering the vexing parse issue.
- enum ScopedCrashKeyNeedsNameTag {
- KEY_NEEDS_NAME,
- };
-
- template <typename... Args>
- explicit ScopedCrashKey(ScopedCrashKeyNeedsNameTag, const Args&...) {
- constexpr bool always_false = sizeof...(Args) == 0 && sizeof...(Args) != 0;
- static_assert(
- always_false,
- "scoped crash key objects should not be unnamed temporaries.");
- }
-
- private:
- std::string key_;
-
- DISALLOW_COPY_AND_ASSIGN(ScopedCrashKey);
-};
-
-// Disallow an instantation of ScopedCrashKey without a name, since this results
-// in a temporary that is immediately destroyed. Doing so will trigger the
-// static_assert in the templated constructor helper in ScopedCrashKey.
-#define ScopedCrashKey(...) \
- ScopedCrashKey(base::debug::ScopedCrashKey::KEY_NEEDS_NAME, __VA_ARGS__)
-
-// Before setting values for a key, all the keys must be registered.
-struct BASE_EXPORT CrashKey {
- // The name of the crash key, used in the above functions.
- const char* key_name;
-
- // The maximum length for a value. If the value is longer than this, it will
- // be truncated. If the value is larger than the |chunk_max_length| passed to
- // InitCrashKeys() but less than this value, it will be split into multiple
- // numbered chunks.
- size_t max_length;
-};
-
-// Before the crash key logging mechanism can be used, all crash keys must be
-// registered with this function. The function returns the amount of space
-// the crash reporting implementation should allocate space for the registered
-// crash keys. |chunk_max_length| is the maximum size that a value in a single
-// chunk can be.
-BASE_EXPORT size_t InitCrashKeys(const CrashKey* const keys, size_t count,
- size_t chunk_max_length);
-
-// Returns the corresponding crash key object or NULL for a given key.
-BASE_EXPORT const CrashKey* LookupCrashKey(const base::StringPiece& key);
-
-// In the platform crash reporting implementation, these functions set and
-// clear the NUL-terminated key-value pairs.
-typedef void (*SetCrashKeyValueFuncT)(const base::StringPiece&,
- const base::StringPiece&);
-typedef void (*ClearCrashKeyValueFuncT)(const base::StringPiece&);
-
-// Sets the function pointers that are used to integrate with the platform-
-// specific crash reporting libraries.
-BASE_EXPORT void SetCrashKeyReportingFunctions(
- SetCrashKeyValueFuncT set_key_func,
- ClearCrashKeyValueFuncT clear_key_func);
-
-// Helper function that breaks up a value according to the parameters
-// specified by the crash key object.
-BASE_EXPORT std::vector<std::string> ChunkCrashKeyValue(
- const CrashKey& crash_key,
- const base::StringPiece& value,
- size_t chunk_max_length);
-
-// Resets the crash key system so it can be reinitialized. For testing only.
-BASE_EXPORT void ResetCrashLoggingForTesting();
-
} // namespace debug
} // namespace base
diff --git a/chromium/base/debug/crash_logging_unittest.cc b/chromium/base/debug/crash_logging_unittest.cc
index d877187be3d..c10d36e3684 100644
--- a/chromium/base/debug/crash_logging_unittest.cc
+++ b/chromium/base/debug/crash_logging_unittest.cc
@@ -4,187 +4,9 @@
#include "base/debug/crash_logging.h"
-#include <stddef.h>
-
-#include <map>
-#include <string>
-
-#include "base/bind.h"
-#include "base/macros.h"
#include "testing/gtest/include/gtest/gtest.h"
-namespace {
-
-std::map<std::string, std::string>* key_values_ = nullptr;
-
-} // namespace
-
-class CrashLoggingTest : public testing::Test {
- public:
- void SetUp() override {
- key_values_ = new std::map<std::string, std::string>;
- base::debug::SetCrashKeyReportingFunctions(
- &CrashLoggingTest::SetKeyValue,
- &CrashLoggingTest::ClearKeyValue);
- }
-
- void TearDown() override {
- base::debug::ResetCrashLoggingForTesting();
-
- delete key_values_;
- key_values_ = nullptr;
- }
-
- private:
- static void SetKeyValue(const base::StringPiece& key,
- const base::StringPiece& value) {
- (*key_values_)[key.as_string()] = value.as_string();
- }
-
- static void ClearKeyValue(const base::StringPiece& key) {
- key_values_->erase(key.as_string());
- }
-};
-
-TEST_F(CrashLoggingTest, SetClearSingle) {
- const char kTestKey[] = "test-key";
- base::debug::CrashKey keys[] = { { kTestKey, 255 } };
- base::debug::InitCrashKeys(keys, arraysize(keys), 255);
-
- base::debug::SetCrashKeyValue(kTestKey, "value");
- EXPECT_EQ("value", (*key_values_)[kTestKey]);
-
- base::debug::ClearCrashKey(kTestKey);
- EXPECT_TRUE(key_values_->end() == key_values_->find(kTestKey));
-}
-
-TEST_F(CrashLoggingTest, SetChunked) {
- const char kTestKey[] = "chunky";
- const char kChunk1[] = "chunky-1";
- const char kChunk2[] = "chunky-2";
- const char kChunk3[] = "chunky-3";
- base::debug::CrashKey keys[] = { { kTestKey, 15 } };
- base::debug::InitCrashKeys(keys, arraysize(keys), 5);
-
- std::map<std::string, std::string>& values = *key_values_;
-
- // Fill only the first chunk.
- base::debug::SetCrashKeyValue(kTestKey, "foo");
- EXPECT_EQ(1u, values.size());
- EXPECT_EQ("foo", values[kChunk1]);
- EXPECT_TRUE(values.end() == values.find(kChunk2));
- EXPECT_TRUE(values.end() == values.find(kChunk3));
-
- // Fill three chunks with truncation (max length is 15, this string is 20).
- base::debug::SetCrashKeyValue(kTestKey, "five four three two");
- EXPECT_EQ(3u, values.size());
- EXPECT_EQ("five ", values[kChunk1]);
- EXPECT_EQ("four ", values[kChunk2]);
- EXPECT_EQ("three", values[kChunk3]);
-
- // Clear everything.
- base::debug::ClearCrashKey(kTestKey);
- EXPECT_EQ(0u, values.size());
- EXPECT_TRUE(values.end() == values.find(kChunk1));
- EXPECT_TRUE(values.end() == values.find(kChunk2));
- EXPECT_TRUE(values.end() == values.find(kChunk3));
-
- // Refill all three chunks with truncation, then test that setting a smaller
- // value clears the third chunk.
- base::debug::SetCrashKeyValue(kTestKey, "five four three two");
- base::debug::SetCrashKeyValue(kTestKey, "allays");
- EXPECT_EQ(2u, values.size());
- EXPECT_EQ("allay", values[kChunk1]);
- EXPECT_EQ("s", values[kChunk2]);
- EXPECT_TRUE(values.end() == values.find(kChunk3));
-
- // Clear everything.
- base::debug::ClearCrashKey(kTestKey);
- EXPECT_EQ(0u, values.size());
- EXPECT_TRUE(values.end() == values.find(kChunk1));
- EXPECT_TRUE(values.end() == values.find(kChunk2));
- EXPECT_TRUE(values.end() == values.find(kChunk3));
-}
-
-TEST_F(CrashLoggingTest, ScopedCrashKey) {
- const char kTestKey[] = "test-key";
- base::debug::CrashKey keys[] = { { kTestKey, 255 } };
- base::debug::InitCrashKeys(keys, arraysize(keys), 255);
-
- EXPECT_EQ(0u, key_values_->size());
- EXPECT_TRUE(key_values_->end() == key_values_->find(kTestKey));
- {
- base::debug::ScopedCrashKey scoped_crash_key(kTestKey, "value");
- EXPECT_EQ("value", (*key_values_)[kTestKey]);
- EXPECT_EQ(1u, key_values_->size());
- }
- EXPECT_EQ(0u, key_values_->size());
- EXPECT_TRUE(key_values_->end() == key_values_->find(kTestKey));
-}
-
-TEST_F(CrashLoggingTest, InitSize) {
- base::debug::CrashKey keys[] = {
- { "chunked-3", 15 },
- { "single", 5 },
- { "chunked-6", 30 },
- };
-
- size_t num_keys = base::debug::InitCrashKeys(keys, arraysize(keys), 5);
-
- EXPECT_EQ(10u, num_keys);
-
- EXPECT_TRUE(base::debug::LookupCrashKey("chunked-3"));
- EXPECT_TRUE(base::debug::LookupCrashKey("single"));
- EXPECT_TRUE(base::debug::LookupCrashKey("chunked-6"));
- EXPECT_FALSE(base::debug::LookupCrashKey("chunked-6-4"));
-}
-
-TEST_F(CrashLoggingTest, ChunkValue) {
- using base::debug::ChunkCrashKeyValue;
-
- // Test truncation.
- base::debug::CrashKey key = { "chunky", 10 };
- std::vector<std::string> results =
- ChunkCrashKeyValue(key, "hello world", 64);
- ASSERT_EQ(1u, results.size());
- EXPECT_EQ("hello worl", results[0]);
-
- // Test short string.
- results = ChunkCrashKeyValue(key, "hi", 10);
- ASSERT_EQ(1u, results.size());
- EXPECT_EQ("hi", results[0]);
-
- // Test chunk pair.
- key.max_length = 6;
- results = ChunkCrashKeyValue(key, "foobar", 3);
- ASSERT_EQ(2u, results.size());
- EXPECT_EQ("foo", results[0]);
- EXPECT_EQ("bar", results[1]);
-
- // Test chunk pair truncation.
- results = ChunkCrashKeyValue(key, "foobared", 3);
- ASSERT_EQ(2u, results.size());
- EXPECT_EQ("foo", results[0]);
- EXPECT_EQ("bar", results[1]);
-
- // Test extra chunks.
- key.max_length = 100;
- results = ChunkCrashKeyValue(key, "hello world", 3);
- ASSERT_EQ(4u, results.size());
- EXPECT_EQ("hel", results[0]);
- EXPECT_EQ("lo ", results[1]);
- EXPECT_EQ("wor", results[2]);
- EXPECT_EQ("ld", results[3]);
-}
-
-TEST_F(CrashLoggingTest, ChunkRounding) {
- // If max_length=12 and max_chunk_length=5, there should be 3 chunks,
- // not 2.
- base::debug::CrashKey key = { "round", 12 };
- EXPECT_EQ(3u, base::debug::InitCrashKeys(&key, 1, 5));
-}
-
-TEST_F(CrashLoggingTest, UninitializedCrashKeyStringSupport) {
+TEST(CrashLoggingTest, UninitializedCrashKeyStringSupport) {
auto* crash_key = base::debug::AllocateCrashKeyString(
"test", base::debug::CrashKeySize::Size32);
EXPECT_FALSE(crash_key);
diff --git a/chromium/base/debug/leak_tracker.h b/chromium/base/debug/leak_tracker.h
index 9dd16229393..7ddd5b62d1a 100644
--- a/chromium/base/debug/leak_tracker.h
+++ b/chromium/base/debug/leak_tracker.h
@@ -56,6 +56,8 @@ namespace debug {
template<typename T>
class LeakTracker {
public:
+ // This destructor suppresses warnings about instances of this class not being
+ // used.
~LeakTracker() {}
static void CheckForLeaks() {}
static int NumLiveInstances() { return -1; }
diff --git a/chromium/base/debug/proc_maps_linux_unittest.cc b/chromium/base/debug/proc_maps_linux_unittest.cc
index 9b5bcaca0eb..7abf152b0e0 100644
--- a/chromium/base/debug/proc_maps_linux_unittest.cc
+++ b/chromium/base/debug/proc_maps_linux_unittest.cc
@@ -10,7 +10,6 @@
#include "base/macros.h"
#include "base/path_service.h"
#include "base/strings/stringprintf.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -210,23 +209,7 @@ void CheckProcMapsRegions(const std::vector<MappedMemoryRegion> &regions) {
found_exe = true;
}
- // Valgrind uses its own allocated stacks instead of the kernel-provided
- // stack without letting the kernel know via prctl(PR_SET_MM_START_STACK).
- // Depending on which kernel you're running it'll impact the output of
- // /proc/self/maps.
- //
- // Prior to version 3.4, the kernel completely ignores other stacks and
- // always prints out the vma lying within mm->start_stack as [stack] even
- // if the program was currently executing on a different stack.
- //
- // Starting in 3.4, the kernel will print out the vma containing the current
- // stack pointer as [stack:TID] as long as that vma does not lie within
- // mm->start_stack.
- //
- // Because this has gotten too complicated and brittle of a test, completely
- // ignore checking for the stack and address when running under Valgrind.
- // See http://crbug.com/431702 for more details.
- if (!RunningOnValgrind() && regions[i].path == "[stack]") {
+ if (regions[i].path == "[stack]") {
// On Android the test is run on a background thread, since [stack] is for
// the main thread, we cannot test this.
#if !defined(OS_ANDROID)
@@ -248,10 +231,8 @@ void CheckProcMapsRegions(const std::vector<MappedMemoryRegion> &regions) {
}
EXPECT_TRUE(found_exe);
- if (!RunningOnValgrind()) {
- EXPECT_TRUE(found_stack);
- EXPECT_TRUE(found_address);
- }
+ EXPECT_TRUE(found_stack);
+ EXPECT_TRUE(found_address);
}
TEST(ProcMapsTest, ReadProcMaps) {
diff --git a/chromium/base/debug/stack_trace_fuchsia.cc b/chromium/base/debug/stack_trace_fuchsia.cc
index ea428b98f77..f9969331b5e 100644
--- a/chromium/base/debug/stack_trace_fuchsia.cc
+++ b/chromium/base/debug/stack_trace_fuchsia.cc
@@ -63,7 +63,9 @@ class SymbolMap {
Entry* GetForAddress(void* address);
private:
- static const size_t kMaxMapEntries = 64;
+ // Component builds of Chrome pull about 250 shared libraries (on Linux), so
+ // 512 entries should be enough in most cases.
+ static const size_t kMaxMapEntries = 512;
void Populate();
@@ -116,7 +118,6 @@ void SymbolMap::Populate() {
}
// Retrieve the debug info struct.
- constexpr size_t map_capacity = sizeof(entries_);
uintptr_t debug_addr;
status = zx_object_get_property(process, ZX_PROP_PROCESS_DEBUG_ADDR,
&debug_addr, sizeof(debug_addr));
@@ -135,7 +136,7 @@ void SymbolMap::Populate() {
// Copy the contents of the link map linked list to |entries_|.
while (lmap != nullptr) {
- if (count_ == map_capacity) {
+ if (count_ >= arraysize(entries_)) {
break;
}
SymbolMap::Entry* next_entry = &entries_[count_];
diff --git a/chromium/base/debug/stack_trace_posix.cc b/chromium/base/debug/stack_trace_posix.cc
index 1d7df1f62f1..1d341c06106 100644
--- a/chromium/base/debug/stack_trace_posix.cc
+++ b/chromium/base/debug/stack_trace_posix.cc
@@ -242,9 +242,16 @@ void StackDumpSignalHandler(int signal, siginfo_t* info, void* void_context) {
return;
}
+// Do not take the "in signal handler" code path on Mac in a DCHECK-enabled
+// build, as this prevents seeing a useful (symbolized) stack trace on a crash
+// or DCHECK() failure. While it may not be fully safe to run the stack symbol
+// printing code, in practice it's better to provide meaningful stack traces -
+// and the risk is low given we're likely crashing already.
+#if !defined(OS_MACOSX) || !DCHECK_IS_ON()
// Record the fact that we are in the signal handler now, so that the rest
// of StackTrace can behave in an async-signal-safe manner.
in_signal_handler = 1;
+#endif
if (BeingDebugged())
BreakDebugger();
diff --git a/chromium/base/files/dir_reader_posix.h b/chromium/base/files/dir_reader_posix.h
index 6a32d9fd480..15fc744f6f9 100644
--- a/chromium/base/files/dir_reader_posix.h
+++ b/chromium/base/files/dir_reader_posix.h
@@ -17,7 +17,7 @@
// seems worse than falling back to enumerating all file descriptors so we will
// probably never implement this on the Mac.
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
#include "base/files/dir_reader_linux.h"
#else
#include "base/files/dir_reader_fallback.h"
@@ -25,7 +25,7 @@
namespace base {
-#if defined(OS_LINUX)
+#if defined(OS_LINUX) || defined(OS_ANDROID)
typedef DirReaderLinux DirReaderPosix;
#else
typedef DirReaderFallback DirReaderPosix;
diff --git a/chromium/base/files/file.cc b/chromium/base/files/file.cc
index 50b4370d309..c1ce182fc6d 100644
--- a/chromium/base/files/file.cc
+++ b/chromium/base/files/file.cc
@@ -9,6 +9,10 @@
#include "base/timer/elapsed_timer.h"
#include "build/build_config.h"
+#if defined(OS_POSIX)
+#include <errno.h>
+#endif
+
namespace base {
File::Info::Info()
@@ -82,6 +86,13 @@ File& File::operator=(File&& other) {
#if !defined(OS_NACL)
void File::Initialize(const FilePath& path, uint32_t flags) {
if (path.ReferencesParent()) {
+#if defined(OS_WIN)
+ ::SetLastError(ERROR_ACCESS_DENIED);
+#elif defined(OS_POSIX)
+ errno = EACCES;
+#else
+#error Unsupported platform
+#endif
error_details_ = FILE_ERROR_ACCESS_DENIED;
return;
}
diff --git a/chromium/base/files/file.h b/chromium/base/files/file.h
index 8b058157611..1b2f6d4cec2 100644
--- a/chromium/base/files/file.h
+++ b/chromium/base/files/file.h
@@ -24,7 +24,8 @@
namespace base {
-#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+ defined(OS_ANDROID) && __ANDROID_API__ < 21
typedef struct stat stat_wrapper_t;
#elif defined(OS_POSIX)
typedef struct stat64 stat_wrapper_t;
@@ -334,6 +335,12 @@ class BASE_EXPORT File {
static Error OSErrorToFileError(int saved_errno);
#endif
+ // Gets the last global error (errno or GetLastError()) and converts it to the
+ // closest base::File::Error equivalent via OSErrorToFileError(). The returned
+ // value is only trustworthy immediately after another base::File method
+ // fails. base::File never resets the global error to zero.
+ static Error GetLastFileError();
+
// Converts an error value to a human-readable form. Used for logging.
static std::string ErrorToString(Error error);
diff --git a/chromium/base/files/file_path_watcher_win.cc b/chromium/base/files/file_path_watcher_win.cc
index 7096c7aa63d..46147509e44 100644
--- a/chromium/base/files/file_path_watcher_win.cc
+++ b/chromium/base/files/file_path_watcher_win.cc
@@ -15,6 +15,8 @@
#include "base/time/time.h"
#include "base/win/object_watcher.h"
+#include <windows.h>
+
namespace base {
namespace {
diff --git a/chromium/base/files/file_posix.cc b/chromium/base/files/file_posix.cc
index 20d59cbc054..d538b667cac 100644
--- a/chromium/base/files/file_posix.cc
+++ b/chromium/base/files/file_posix.cc
@@ -11,7 +11,7 @@
#include <unistd.h>
#include "base/logging.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/thread_restrictions.h"
@@ -30,7 +30,8 @@ static_assert(File::FROM_BEGIN == SEEK_SET && File::FROM_CURRENT == SEEK_CUR &&
namespace {
-#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+ defined(OS_ANDROID) && __ANDROID_API__ < 21
int CallFstat(int fd, stat_wrapper_t *sb) {
AssertBlockingAllowed();
return fstat(fd, sb);
@@ -78,7 +79,7 @@ File::Error CallFcntlFlock(PlatformFile file, bool do_lock) {
lock.l_start = 0;
lock.l_len = 0; // Lock entire file.
if (HANDLE_EINTR(fcntl(file, F_SETLK, &lock)) == -1)
- return File::OSErrorToFileError(errno);
+ return File::GetLastFileError();
return File::FILE_OK;
}
#endif
@@ -383,7 +384,7 @@ File File::Duplicate() const {
PlatformFile other_fd = HANDLE_EINTR(dup(GetPlatformFile()));
if (other_fd == -1)
- return File(OSErrorToFileError(errno));
+ return File(File::GetLastFileError());
File other(other_fd);
if (async())
@@ -421,9 +422,10 @@ File::Error File::OSErrorToFileError(int saved_errno) {
return FILE_ERROR_NOT_A_DIRECTORY;
default:
#if !defined(OS_NACL) // NaCl build has no metrics code.
- UMA_HISTOGRAM_SPARSE_SLOWLY("PlatformFile.UnknownErrors.Posix",
- saved_errno);
+ UmaHistogramSparse("PlatformFile.UnknownErrors.Posix", saved_errno);
#endif
+ // This function should only be called for errors.
+ DCHECK_NE(0, saved_errno);
return FILE_ERROR_FAILED;
}
}
@@ -501,7 +503,7 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
}
if (descriptor < 0) {
- error_details_ = File::OSErrorToFileError(errno);
+ error_details_ = File::GetLastFileError();
return;
}
@@ -537,4 +539,9 @@ void File::SetPlatformFile(PlatformFile file) {
file_.reset(file);
}
+// static
+File::Error File::GetLastFileError() {
+ return base::File::OSErrorToFileError(errno);
+}
+
} // namespace base
diff --git a/chromium/base/files/file_unittest.cc b/chromium/base/files/file_unittest.cc
index 757e1b223cf..1bc09facd03 100644
--- a/chromium/base/files/file_unittest.cc
+++ b/chromium/base/files/file_unittest.cc
@@ -39,6 +39,7 @@ TEST(FileTest, Create) {
File file(file_path, base::File::FLAG_OPEN | base::File::FLAG_READ);
EXPECT_FALSE(file.IsValid());
EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, file.error_details());
+ EXPECT_EQ(base::File::FILE_ERROR_NOT_FOUND, base::File::GetLastFileError());
}
{
@@ -80,6 +81,7 @@ TEST(FileTest, Create) {
EXPECT_FALSE(file.IsValid());
EXPECT_FALSE(file.created());
EXPECT_EQ(base::File::FILE_ERROR_EXISTS, file.error_details());
+ EXPECT_EQ(base::File::FILE_ERROR_EXISTS, base::File::GetLastFileError());
}
{
@@ -237,6 +239,25 @@ TEST(FileTest, ReadWrite) {
EXPECT_EQ(data_to_write[i - kOffsetBeyondEndOfFile], data_read_2[i]);
}
+TEST(FileTest, GetLastFileError) {
+#if defined(OS_WIN)
+ ::SetLastError(ERROR_ACCESS_DENIED);
+#else
+ errno = EACCES;
+#endif
+ EXPECT_EQ(File::FILE_ERROR_ACCESS_DENIED, File::GetLastFileError());
+
+ base::ScopedTempDir temp_dir;
+ EXPECT_TRUE(temp_dir.CreateUniqueTempDir());
+
+ FilePath nonexistent_path(temp_dir.GetPath().AppendASCII("nonexistent"));
+ File file(nonexistent_path, File::FLAG_OPEN | File::FLAG_READ);
+ File::Error last_error = File::GetLastFileError();
+ EXPECT_FALSE(file.IsValid());
+ EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, file.error_details());
+ EXPECT_EQ(File::FILE_ERROR_NOT_FOUND, last_error);
+}
+
TEST(FileTest, Append) {
base::ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index 950d24ba000..780bb22cbf5 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -23,7 +23,7 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#elif defined(OS_POSIX)
#include <sys/stat.h>
#include <unistd.h>
@@ -130,6 +130,12 @@ BASE_EXPORT bool CopyDirectory(const FilePath& from_path,
const FilePath& to_path,
bool recursive);
+// Like CopyDirectory() except trying to overwrite an existing file will not
+// work and will return false.
+BASE_EXPORT bool CopyDirectoryExcl(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive);
+
// Returns true if the given path exists on the local filesystem,
// false otherwise.
BASE_EXPORT bool PathExists(const FilePath& path);
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index 5adac6b4508..27cd58a9df3 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -21,6 +21,8 @@
#include <time.h>
#include <unistd.h>
+#include "base/base_switches.h"
+#include "base/command_line.h"
#include "base/containers/stack.h"
#include "base/environment.h"
#include "base/files/file_enumerator.h"
@@ -66,7 +68,8 @@ namespace base {
namespace {
-#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+#if defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) || \
+ defined(OS_ANDROID) && __ANDROID_API__ < 21
static int CallStat(const char *path, stat_wrapper_t *sb) {
AssertBlockingAllowed();
return stat(path, sb);
@@ -75,7 +78,8 @@ static int CallLstat(const char *path, stat_wrapper_t *sb) {
AssertBlockingAllowed();
return lstat(path, sb);
}
-#else // defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL)
+#else // defined(OS_BSD) || defined(OS_MACOSX) || defined(OS_NACL) ||
+// defined(OS_ANDROID) && __ANDROID_API__ < 21
static int CallStat(const char *path, stat_wrapper_t *sb) {
AssertBlockingAllowed();
return stat64(path, sb);
@@ -217,85 +221,11 @@ bool CopyFileContents(File* infile, File* outfile) {
NOTREACHED();
return false;
}
-#endif // !defined(OS_NACL_NONSFI)
-
-#if !defined(OS_MACOSX)
-// Appends |mode_char| to |mode| before the optional character set encoding; see
-// https://www.gnu.org/software/libc/manual/html_node/Opening-Streams.html for
-// details.
-std::string AppendModeCharacter(StringPiece mode, char mode_char) {
- std::string result(mode.as_string());
- size_t comma_pos = result.find(',');
- result.insert(comma_pos == std::string::npos ? result.length() : comma_pos, 1,
- mode_char);
- return result;
-}
-#endif
-
-} // namespace
-
-#if !defined(OS_NACL_NONSFI)
-FilePath MakeAbsoluteFilePath(const FilePath& input) {
- AssertBlockingAllowed();
- char full_path[PATH_MAX];
- if (realpath(input.value().c_str(), full_path) == nullptr)
- return FilePath();
- return FilePath(full_path);
-}
-
-// TODO(erikkay): The Windows version of this accepts paths like "foo/bar/*"
-// which works both with and without the recursive flag. I'm not sure we need
-// that functionality. If not, remove from file_util_win.cc, otherwise add it
-// here.
-bool DeleteFile(const FilePath& path, bool recursive) {
- AssertBlockingAllowed();
- const char* path_str = path.value().c_str();
- stat_wrapper_t file_info;
- if (CallLstat(path_str, &file_info) != 0) {
- // The Windows version defines this condition as success.
- return (errno == ENOENT || errno == ENOTDIR);
- }
- if (!S_ISDIR(file_info.st_mode))
- return (unlink(path_str) == 0);
- if (!recursive)
- return (rmdir(path_str) == 0);
- bool success = true;
- base::stack<std::string> directories;
- directories.push(path.value());
- FileEnumerator traversal(path, true,
- FileEnumerator::FILES | FileEnumerator::DIRECTORIES |
- FileEnumerator::SHOW_SYM_LINKS);
- for (FilePath current = traversal.Next(); !current.empty();
- current = traversal.Next()) {
- if (traversal.GetInfo().IsDirectory())
- directories.push(current.value());
- else
- success &= (unlink(current.value().c_str()) == 0);
- }
-
- while (!directories.empty()) {
- FilePath dir = FilePath(directories.top());
- directories.pop();
- success &= (rmdir(dir.value().c_str()) == 0);
- }
- return success;
-}
-
-bool ReplaceFile(const FilePath& from_path,
- const FilePath& to_path,
- File::Error* error) {
- AssertBlockingAllowed();
- if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
- return true;
- if (error)
- *error = File::OSErrorToFileError(errno);
- return false;
-}
-
-bool CopyDirectory(const FilePath& from_path,
- const FilePath& to_path,
- bool recursive) {
+bool DoCopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive,
+ bool open_exclusive) {
AssertBlockingAllowed();
// Some old callers of CopyDirectory want it to support wildcards.
// After some discussion, we decided to fix those callers.
@@ -360,12 +290,11 @@ bool CopyDirectory(const FilePath& from_path,
}
if (S_ISDIR(from_stat.st_mode)) {
- if (mkdir(target_path.value().c_str(),
- (from_stat.st_mode & 01777) | S_IRUSR | S_IXUSR | S_IWUSR) ==
- 0 ||
- errno == EEXIST) {
+ mode_t mode = (from_stat.st_mode & 01777) | S_IRUSR | S_IXUSR | S_IWUSR;
+ if (mkdir(target_path.value().c_str(), mode) == 0)
+ continue;
+ if (errno == EEXIST && !open_exclusive)
continue;
- }
DPLOG(ERROR) << "CopyDirectory() couldn't create directory: "
<< target_path.value();
@@ -397,6 +326,16 @@ bool CopyDirectory(const FilePath& from_path,
continue;
}
+ int open_flags = O_WRONLY | O_CREAT;
+ // If |open_exclusive| is set then we should always create the destination
+ // file, so O_NONBLOCK is not necessary to ensure we don't block on the
+ // open call for the target file below, and since the destination will
+ // always be a regular file it wouldn't affect the behavior of the
+ // subsequent write calls anyway.
+ if (open_exclusive)
+ open_flags |= O_EXCL;
+ else
+ open_flags |= O_TRUNC | O_NONBLOCK;
// Each platform has different default file opening modes for CopyFile which
// we want to replicate here. On OS X, we use copyfile(3) which takes the
// source file's permissions into account. On the other platforms, we just
@@ -409,9 +348,7 @@ bool CopyDirectory(const FilePath& from_path,
#else
int mode = 0600;
#endif
- base::File outfile(
- open(target_path.value().c_str(),
- O_WRONLY | O_CREAT | O_TRUNC | O_NONBLOCK, mode));
+ base::File outfile(open(target_path.value().c_str(), open_flags, mode));
if (!outfile.IsValid()) {
DPLOG(ERROR) << "CopyDirectory() couldn't create file: "
<< target_path.value();
@@ -428,6 +365,93 @@ bool CopyDirectory(const FilePath& from_path,
}
#endif // !defined(OS_NACL_NONSFI)
+#if !defined(OS_MACOSX)
+// Appends |mode_char| to |mode| before the optional character set encoding; see
+// https://www.gnu.org/software/libc/manual/html_node/Opening-Streams.html for
+// details.
+std::string AppendModeCharacter(StringPiece mode, char mode_char) {
+ std::string result(mode.as_string());
+ size_t comma_pos = result.find(',');
+ result.insert(comma_pos == std::string::npos ? result.length() : comma_pos, 1,
+ mode_char);
+ return result;
+}
+#endif
+
+} // namespace
+
+#if !defined(OS_NACL_NONSFI)
+FilePath MakeAbsoluteFilePath(const FilePath& input) {
+ AssertBlockingAllowed();
+ char full_path[PATH_MAX];
+ if (realpath(input.value().c_str(), full_path) == nullptr)
+ return FilePath();
+ return FilePath(full_path);
+}
+
+// TODO(erikkay): The Windows version of this accepts paths like "foo/bar/*"
+// which works both with and without the recursive flag. I'm not sure we need
+// that functionality. If not, remove from file_util_win.cc, otherwise add it
+// here.
+bool DeleteFile(const FilePath& path, bool recursive) {
+ AssertBlockingAllowed();
+ const char* path_str = path.value().c_str();
+ stat_wrapper_t file_info;
+ if (CallLstat(path_str, &file_info) != 0) {
+ // The Windows version defines this condition as success.
+ return (errno == ENOENT || errno == ENOTDIR);
+ }
+ if (!S_ISDIR(file_info.st_mode))
+ return (unlink(path_str) == 0);
+ if (!recursive)
+ return (rmdir(path_str) == 0);
+
+ bool success = true;
+ base::stack<std::string> directories;
+ directories.push(path.value());
+ FileEnumerator traversal(path, true,
+ FileEnumerator::FILES | FileEnumerator::DIRECTORIES |
+ FileEnumerator::SHOW_SYM_LINKS);
+ for (FilePath current = traversal.Next(); !current.empty();
+ current = traversal.Next()) {
+ if (traversal.GetInfo().IsDirectory())
+ directories.push(current.value());
+ else
+ success &= (unlink(current.value().c_str()) == 0);
+ }
+
+ while (!directories.empty()) {
+ FilePath dir = FilePath(directories.top());
+ directories.pop();
+ success &= (rmdir(dir.value().c_str()) == 0);
+ }
+ return success;
+}
+
+bool ReplaceFile(const FilePath& from_path,
+ const FilePath& to_path,
+ File::Error* error) {
+ AssertBlockingAllowed();
+ if (rename(from_path.value().c_str(), to_path.value().c_str()) == 0)
+ return true;
+ if (error)
+ *error = File::GetLastFileError();
+ return false;
+}
+
+bool CopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive) {
+ return DoCopyDirectory(from_path, to_path, recursive, false);
+}
+
+bool CopyDirectoryExcl(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive) {
+ return DoCopyDirectory(from_path, to_path, recursive, true);
+}
+#endif // !defined(OS_NACL_NONSFI)
+
bool CreateLocalNonBlockingPipe(int fds[2]) {
#if defined(OS_LINUX)
return pipe2(fds, O_CLOEXEC | O_NONBLOCK) == 0;
@@ -985,16 +1009,21 @@ int GetMaximumPathComponentLength(const FilePath& path) {
// This is implemented in file_util_android.cc for that platform.
bool GetShmemTempDir(bool executable, FilePath* path) {
#if defined(OS_LINUX) || defined(OS_AIX)
+ bool disable_dev_shm = false;
+#if !defined(OS_CHROMEOS)
+ disable_dev_shm = CommandLine::ForCurrentProcess()->HasSwitch(
+ switches::kDisableDevShmUsage);
+#endif
bool use_dev_shm = true;
if (executable) {
static const bool s_dev_shm_executable = DetermineDevShmExecutable();
use_dev_shm = s_dev_shm_executable;
}
- if (use_dev_shm) {
+ if (use_dev_shm && !disable_dev_shm) {
*path = FilePath("/dev/shm");
return true;
}
-#endif
+#endif // defined(OS_LINUX) || defined(OS_AIX)
return GetTempDir(path);
}
#endif // !defined(OS_ANDROID)
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 118e4491927..ae93125d7e1 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -1051,6 +1051,92 @@ TEST_F(FileUtilTest, CopyDirectoryPermissionsOverExistingFile) {
EXPECT_EQ(0777, mode);
}
+TEST_F(FileUtilTest, CopyDirectoryExclDoesNotOverwrite) {
+ // Create source directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ CreateDirectory(dir_name_from);
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a file under the directory.
+ FilePath file_name_from =
+ dir_name_from.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+ CreateTextFile(file_name_from, L"Mordecai");
+ ASSERT_TRUE(PathExists(file_name_from));
+
+ // Create destination directory.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ CreateDirectory(dir_name_to);
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ // Create a file under the directory with the same name.
+ FilePath file_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Reggy-1.txt"));
+ CreateTextFile(file_name_to, L"Rigby");
+ ASSERT_TRUE(PathExists(file_name_to));
+
+ // Ensure that copying failed and the file was not overwritten.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+ ASSERT_TRUE(PathExists(file_name_to));
+ ASSERT_EQ(L"Rigby", ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverExistingFile) {
+ // Create source directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ CreateDirectory(dir_name_from);
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a subdirectory.
+ FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateDirectory(subdir_name_from);
+ ASSERT_TRUE(PathExists(subdir_name_from));
+
+ // Create destination directory.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ CreateDirectory(dir_name_to);
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ // Create a regular file under the directory with the same name.
+ FilePath file_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateTextFile(file_name_to, L"Rigby");
+ ASSERT_TRUE(PathExists(file_name_to));
+
+ // Ensure that copying failed and the file was not overwritten.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+ ASSERT_TRUE(PathExists(file_name_to));
+ ASSERT_EQ(L"Rigby", ReadTextFile(file_name_to));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverExistingDirectory) {
+ // Create source directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ CreateDirectory(dir_name_from);
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a subdirectory.
+ FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateDirectory(subdir_name_from);
+ ASSERT_TRUE(PathExists(subdir_name_from));
+
+ // Create destination directory.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ CreateDirectory(dir_name_to);
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ // Create a subdirectory under the directory with the same name.
+ FilePath subdir_name_to = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateDirectory(subdir_name_to);
+ ASSERT_TRUE(PathExists(subdir_name_to));
+
+ // Ensure that copying failed and the file was not overwritten.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
TEST_F(FileUtilTest, CopyFileExecutablePermission) {
FilePath src = temp_dir_.GetPath().Append(FPL("src.txt"));
const std::wstring file_contents(L"Gooooooooooooooooooooogle");
@@ -1364,10 +1450,14 @@ TEST_F(FileUtilTest, DeleteDirRecursiveWithOpenFile) {
// this is best-effort because it's not supported by all file systems. Both
// files will have the same flags so no need to get them individually.
int flags;
- CHECK_EQ(0, ioctl(file1.GetPlatformFile(), FS_IOC_GETFLAGS, &flags));
- flags |= FS_IMMUTABLE_FL;
- ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
- ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ bool file_attrs_supported =
+ ioctl(file1.GetPlatformFile(), FS_IOC_GETFLAGS, &flags) == 0;
+ // Some filesystems (e.g. tmpfs) don't support file attributes.
+ if (file_attrs_supported) {
+ flags |= FS_IMMUTABLE_FL;
+ ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ }
#endif
// Delete recursively and check that at least the second file got deleted.
@@ -1377,9 +1467,11 @@ TEST_F(FileUtilTest, DeleteDirRecursiveWithOpenFile) {
#if defined(OS_LINUX)
// Make sure that the test can clean up after itself.
- flags &= ~FS_IMMUTABLE_FL;
- ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
- ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ if (file_attrs_supported) {
+ flags &= ~FS_IMMUTABLE_FL;
+ ioctl(file1.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ ioctl(file3.GetPlatformFile(), FS_IOC_SETFLAGS, &flags);
+ }
#endif
}
@@ -1776,6 +1868,24 @@ TEST_F(FileUtilTest, CopyFileWithCopyDirectoryRecursiveToExistingDirectory) {
EXPECT_TRUE(PathExists(file_name_to));
}
+TEST_F(FileUtilTest, CopyFileFailureWithCopyDirectoryExcl) {
+ // Create a file
+ FilePath file_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+ ASSERT_TRUE(PathExists(file_name_from));
+
+ // Make a destination file.
+ FilePath file_name_to = temp_dir_.GetPath().Append(
+ FILE_PATH_LITERAL("Copy_Test_File_Destination.txt"));
+ CreateTextFile(file_name_to, L"Old file content");
+ ASSERT_TRUE(PathExists(file_name_to));
+
+ // Overwriting the destination should fail.
+ EXPECT_FALSE(CopyDirectoryExcl(file_name_from, file_name_to, true));
+ EXPECT_EQ(L"Old file content", ReadTextFile(file_name_to));
+}
+
TEST_F(FileUtilTest, CopyDirectoryWithTrailingSeparators) {
// Create a directory.
FilePath dir_name_from =
@@ -1863,6 +1973,169 @@ TEST_F(FileUtilTest, CopyDirectoryWithNonRegularFiles) {
EXPECT_FALSE(PathExists(symlink_name_to));
EXPECT_FALSE(PathExists(fifo_name_to));
}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverSymlink) {
+ // Create a directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_from));
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a file under the directory.
+ FilePath file_name_from =
+ dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+ ASSERT_TRUE(PathExists(file_name_from));
+
+ // Create a destination directory with a symlink of the same name.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_to));
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ FilePath symlink_target =
+ dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+ CreateTextFile(symlink_target, L"asdf");
+ ASSERT_TRUE(PathExists(symlink_target));
+
+ FilePath symlink_name_to =
+ dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+ ASSERT_TRUE(PathExists(symlink_name_to));
+
+ // Check that copying fails.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverSymlink) {
+ // Create a directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_from));
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a subdirectory.
+ FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateDirectory(subdir_name_from);
+ ASSERT_TRUE(PathExists(subdir_name_from));
+
+ // Create a destination directory with a symlink of the same name.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_to));
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ FilePath symlink_target = dir_name_to.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateTextFile(symlink_target, L"asdf");
+ ASSERT_TRUE(PathExists(symlink_target));
+
+ FilePath symlink_name_to =
+ dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+ ASSERT_TRUE(PathExists(symlink_name_to));
+
+ // Check that copying fails.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverDanglingSymlink) {
+ // Create a directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_from));
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a file under the directory.
+ FilePath file_name_from =
+ dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+ ASSERT_TRUE(PathExists(file_name_from));
+
+ // Create a destination directory with a dangling symlink of the same name.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_to));
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ FilePath symlink_target =
+ dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+ CreateTextFile(symlink_target, L"asdf");
+ ASSERT_TRUE(PathExists(symlink_target));
+
+ FilePath symlink_name_to =
+ dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+ ASSERT_TRUE(PathExists(symlink_name_to));
+ ASSERT_TRUE(DeleteFile(symlink_target, false));
+
+ // Check that copying fails and that no file was created for the symlink's
+ // referent.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+ EXPECT_FALSE(PathExists(symlink_target));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverDanglingSymlink) {
+ // Create a directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_from));
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a subdirectory.
+ FilePath subdir_name_from = dir_name_from.Append(FILE_PATH_LITERAL("Subsub"));
+ CreateDirectory(subdir_name_from);
+ ASSERT_TRUE(PathExists(subdir_name_from));
+
+ // Create a destination directory with a dangling symlink of the same name.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_to));
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ FilePath symlink_target =
+ dir_name_to.Append(FILE_PATH_LITERAL("Symlink_Target.txt"));
+ CreateTextFile(symlink_target, L"asdf");
+ ASSERT_TRUE(PathExists(symlink_target));
+
+ FilePath symlink_name_to =
+ dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
+ ASSERT_TRUE(PathExists(symlink_name_to));
+ ASSERT_TRUE(DeleteFile(symlink_target, false));
+
+ // Check that copying fails and that no directory was created for the
+ // symlink's referent.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+ EXPECT_FALSE(PathExists(symlink_target));
+}
+
+TEST_F(FileUtilTest, CopyDirectoryExclFileOverFifo) {
+ // Create a directory.
+ FilePath dir_name_from =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_From_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_from));
+ ASSERT_TRUE(PathExists(dir_name_from));
+
+ // Create a file under the directory.
+ FilePath file_name_from =
+ dir_name_from.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ CreateTextFile(file_name_from, L"Gooooooooooooooooooooogle");
+ ASSERT_TRUE(PathExists(file_name_from));
+
+ // Create a destination directory with a fifo of the same name.
+ FilePath dir_name_to =
+ temp_dir_.GetPath().Append(FILE_PATH_LITERAL("Copy_To_Subdir"));
+ ASSERT_TRUE(CreateDirectory(dir_name_to));
+ ASSERT_TRUE(PathExists(dir_name_to));
+
+ FilePath fifo_name_to =
+ dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
+ ASSERT_EQ(0, mkfifo(fifo_name_to.value().c_str(), 0644));
+ ASSERT_TRUE(PathExists(fifo_name_to));
+
+ // Check that copying fails.
+ EXPECT_FALSE(CopyDirectoryExcl(dir_name_from, dir_name_to, false));
+}
#endif // !defined(OS_FUCHSIA) && defined(OS_POSIX)
TEST_F(FileUtilTest, CopyFile) {
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index 1f63211c1e9..6b268cdb9ad 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -80,93 +80,43 @@ void AppendModeCharacter(base::char16 mode_char, base::string16* mode) {
1, mode_char);
}
-} // namespace
-
-FilePath MakeAbsoluteFilePath(const FilePath& input) {
- AssertBlockingAllowed();
- wchar_t file_path[MAX_PATH];
- if (!_wfullpath(file_path, input.value().c_str(), MAX_PATH))
- return FilePath();
- return FilePath(file_path);
-}
-
-bool DeleteFile(const FilePath& path, bool recursive) {
+bool DoCopyFile(const FilePath& from_path,
+ const FilePath& to_path,
+ bool fail_if_exists) {
AssertBlockingAllowed();
-
- if (path.empty())
- return true;
-
- if (path.value().length() >= MAX_PATH)
+ if (from_path.ReferencesParent() || to_path.ReferencesParent())
return false;
- // Handle any path with wildcards.
- if (path.BaseName().value().find_first_of(L"*?") !=
- FilePath::StringType::npos) {
- return DeleteFileRecursive(path.DirName(), path.BaseName().value(),
- recursive);
- }
- DWORD attr = GetFileAttributes(path.value().c_str());
- // We're done if we can't find the path.
- if (attr == INVALID_FILE_ATTRIBUTES)
- return true;
- // We may need to clear the read-only bit.
- if ((attr & FILE_ATTRIBUTE_READONLY) &&
- !SetFileAttributes(path.value().c_str(),
- attr & ~FILE_ATTRIBUTE_READONLY)) {
+ // NOTE: I suspect we could support longer paths, but that would involve
+ // analyzing all our usage of files.
+ if (from_path.value().length() >= MAX_PATH ||
+ to_path.value().length() >= MAX_PATH) {
return false;
}
- // Directories are handled differently if they're recursive.
- if (!(attr & FILE_ATTRIBUTE_DIRECTORY))
- return !!::DeleteFile(path.value().c_str());
- // Handle a simple, single file delete.
- if (!recursive || DeleteFileRecursive(path, L"*", true))
- return !!RemoveDirectory(path.value().c_str());
-
- return false;
-}
-
-bool DeleteFileAfterReboot(const FilePath& path) {
- AssertBlockingAllowed();
- if (path.value().length() >= MAX_PATH)
+ // Unlike the posix implementation that copies the file manually and discards
+ // the ACL bits, CopyFile() copies the complete SECURITY_DESCRIPTOR and access
+ // bits, which is usually not what we want. We can't do much about the
+ // SECURITY_DESCRIPTOR but at least remove the read only bit.
+ const wchar_t* dest = to_path.value().c_str();
+ if (!::CopyFile(from_path.value().c_str(), dest, fail_if_exists)) {
+ // Copy failed.
return false;
-
- return MoveFileEx(path.value().c_str(), NULL,
- MOVEFILE_DELAY_UNTIL_REBOOT |
- MOVEFILE_REPLACE_EXISTING) != FALSE;
-}
-
-bool ReplaceFile(const FilePath& from_path,
- const FilePath& to_path,
- File::Error* error) {
- AssertBlockingAllowed();
- // Try a simple move first. It will only succeed when |to_path| doesn't
- // already exist.
- if (::MoveFile(from_path.value().c_str(), to_path.value().c_str()))
- return true;
- File::Error move_error = File::OSErrorToFileError(GetLastError());
-
- // Try the full-blown replace if the move fails, as ReplaceFile will only
- // succeed when |to_path| does exist. When writing to a network share, we may
- // not be able to change the ACLs. Ignore ACL errors then
- // (REPLACEFILE_IGNORE_MERGE_ERRORS).
- if (::ReplaceFile(to_path.value().c_str(), from_path.value().c_str(), NULL,
- REPLACEFILE_IGNORE_MERGE_ERRORS, NULL, NULL)) {
- return true;
}
- // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
- // |to_path| does not exist. In this case, the more relevant error comes
- // from the call to MoveFile.
- if (error) {
- File::Error replace_error = File::OSErrorToFileError(GetLastError());
- *error = replace_error == File::FILE_ERROR_NOT_FOUND ? move_error
- : replace_error;
+ DWORD attrs = GetFileAttributes(dest);
+ if (attrs == INVALID_FILE_ATTRIBUTES) {
+ return false;
}
- return false;
+ if (attrs & FILE_ATTRIBUTE_READONLY) {
+ SetFileAttributes(dest, attrs & ~FILE_ATTRIBUTE_READONLY);
+ }
+ return true;
}
-bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
- bool recursive) {
+bool DoCopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive,
+ bool fail_if_exists) {
// NOTE(maruel): Previous version of this function used to call
// SHFileOperation(). This used to copy the file attributes and extended
// attributes, OLE structured storage, NTFS file system alternate data
@@ -239,7 +189,7 @@ bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
<< target_path.value().c_str();
success = false;
}
- } else if (!CopyFile(current, target_path)) {
+ } else if (!DoCopyFile(current, target_path, fail_if_exists)) {
DLOG(ERROR) << "CopyDirectory() couldn't create file: "
<< target_path.value().c_str();
success = false;
@@ -253,6 +203,103 @@ bool CopyDirectory(const FilePath& from_path, const FilePath& to_path,
return success;
}
+} // namespace
+
+FilePath MakeAbsoluteFilePath(const FilePath& input) {
+ AssertBlockingAllowed();
+ wchar_t file_path[MAX_PATH];
+ if (!_wfullpath(file_path, input.value().c_str(), MAX_PATH))
+ return FilePath();
+ return FilePath(file_path);
+}
+
+bool DeleteFile(const FilePath& path, bool recursive) {
+ AssertBlockingAllowed();
+
+ if (path.empty())
+ return true;
+
+ if (path.value().length() >= MAX_PATH)
+ return false;
+
+ // Handle any path with wildcards.
+ if (path.BaseName().value().find_first_of(L"*?") !=
+ FilePath::StringType::npos) {
+ return DeleteFileRecursive(path.DirName(), path.BaseName().value(),
+ recursive);
+ }
+ DWORD attr = GetFileAttributes(path.value().c_str());
+ // We're done if we can't find the path.
+ if (attr == INVALID_FILE_ATTRIBUTES)
+ return true;
+ // We may need to clear the read-only bit.
+ if ((attr & FILE_ATTRIBUTE_READONLY) &&
+ !SetFileAttributes(path.value().c_str(),
+ attr & ~FILE_ATTRIBUTE_READONLY)) {
+ return false;
+ }
+ // Directories are handled differently if they're recursive.
+ if (!(attr & FILE_ATTRIBUTE_DIRECTORY))
+ return !!::DeleteFile(path.value().c_str());
+ // Handle a simple, single file delete.
+ if (!recursive || DeleteFileRecursive(path, L"*", true))
+ return !!RemoveDirectory(path.value().c_str());
+
+ return false;
+}
+
+bool DeleteFileAfterReboot(const FilePath& path) {
+ AssertBlockingAllowed();
+
+ if (path.value().length() >= MAX_PATH)
+ return false;
+
+ return MoveFileEx(path.value().c_str(), NULL,
+ MOVEFILE_DELAY_UNTIL_REBOOT |
+ MOVEFILE_REPLACE_EXISTING) != FALSE;
+}
+
+bool ReplaceFile(const FilePath& from_path,
+ const FilePath& to_path,
+ File::Error* error) {
+ AssertBlockingAllowed();
+ // Try a simple move first. It will only succeed when |to_path| doesn't
+ // already exist.
+ if (::MoveFile(from_path.value().c_str(), to_path.value().c_str()))
+ return true;
+ File::Error move_error = File::OSErrorToFileError(GetLastError());
+
+ // Try the full-blown replace if the move fails, as ReplaceFile will only
+ // succeed when |to_path| does exist. When writing to a network share, we may
+ // not be able to change the ACLs. Ignore ACL errors then
+ // (REPLACEFILE_IGNORE_MERGE_ERRORS).
+ if (::ReplaceFile(to_path.value().c_str(), from_path.value().c_str(), NULL,
+ REPLACEFILE_IGNORE_MERGE_ERRORS, NULL, NULL)) {
+ return true;
+ }
+ // In the case of FILE_ERROR_NOT_FOUND from ReplaceFile, it is likely that
+ // |to_path| does not exist. In this case, the more relevant error comes
+ // from the call to MoveFile.
+ if (error) {
+ File::Error replace_error = File::OSErrorToFileError(GetLastError());
+ *error = replace_error == File::FILE_ERROR_NOT_FOUND ? move_error
+ : replace_error;
+ }
+ return false;
+}
+
+bool CopyDirectory(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive) {
+ return DoCopyDirectory(from_path, to_path, recursive, false);
+}
+
+bool CopyDirectoryExcl(const FilePath& from_path,
+ const FilePath& to_path,
+ bool recursive) {
+ return DoCopyDirectory(from_path, to_path, recursive, true);
+}
+
bool PathExists(const FilePath& path) {
AssertBlockingAllowed();
return (GetFileAttributes(path.value().c_str()) != INVALID_FILE_ATTRIBUTES);
@@ -774,34 +821,7 @@ int GetMaximumPathComponentLength(const FilePath& path) {
}
bool CopyFile(const FilePath& from_path, const FilePath& to_path) {
- AssertBlockingAllowed();
- if (from_path.ReferencesParent() || to_path.ReferencesParent())
- return false;
-
- // NOTE: I suspect we could support longer paths, but that would involve
- // analyzing all our usage of files.
- if (from_path.value().length() >= MAX_PATH ||
- to_path.value().length() >= MAX_PATH) {
- return false;
- }
-
- // Unlike the posix implementation that copies the file manually and discards
- // the ACL bits, CopyFile() copies the complete SECURITY_DESCRIPTOR and access
- // bits, which is usually not what we want. We can't do much about the
- // SECURITY_DESCRIPTOR but at least remove the read only bit.
- const wchar_t* dest = to_path.value().c_str();
- if (!::CopyFile(from_path.value().c_str(), dest, false)) {
- // Copy failed.
- return false;
- }
- DWORD attrs = GetFileAttributes(dest);
- if (attrs == INVALID_FILE_ATTRIBUTES) {
- return false;
- }
- if (attrs & FILE_ATTRIBUTE_READONLY) {
- SetFileAttributes(dest, attrs & ~FILE_ATTRIBUTE_READONLY);
- }
- return true;
+ return DoCopyFile(from_path, to_path, false);
}
bool SetNonBlocking(int fd) {
diff --git a/chromium/base/files/file_win.cc b/chromium/base/files/file_win.cc
index 6e7c38362d9..d7bffc3b512 100644
--- a/chromium/base/files/file_win.cc
+++ b/chromium/base/files/file_win.cc
@@ -8,9 +8,11 @@
#include <stdint.h>
#include "base/logging.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
#include "base/threading/thread_restrictions.h"
+#include <windows.h>
+
namespace base {
// Make sure our Whence mappings match the system headers.
@@ -234,7 +236,7 @@ File::Error File::Lock() {
BOOL result = LockFile(file_.Get(), 0, 0, MAXDWORD, MAXDWORD);
if (!result)
- return OSErrorToFileError(GetLastError());
+ return GetLastFileError();
return FILE_OK;
}
@@ -245,7 +247,7 @@ File::Error File::Unlock() {
BOOL result = UnlockFile(file_.Get(), 0, 0, MAXDWORD, MAXDWORD);
if (!result)
- return OSErrorToFileError(GetLastError());
+ return GetLastFileError();
return FILE_OK;
}
@@ -264,7 +266,7 @@ File File::Duplicate() const {
0, // dwDesiredAccess ignored due to SAME_ACCESS
FALSE, // !bInheritHandle
DUPLICATE_SAME_ACCESS)) {
- return File(OSErrorToFileError(GetLastError()));
+ return File(GetLastFileError());
}
File other(other_handle);
@@ -284,6 +286,7 @@ File::Error File::OSErrorToFileError(DWORD last_error) {
switch (last_error) {
case ERROR_SHARING_VIOLATION:
return FILE_ERROR_IN_USE;
+ case ERROR_ALREADY_EXISTS:
case ERROR_FILE_EXISTS:
return FILE_ERROR_EXISTS;
case ERROR_FILE_NOT_FOUND:
@@ -310,8 +313,9 @@ File::Error File::OSErrorToFileError(DWORD last_error) {
case ERROR_DISK_CORRUPT:
return FILE_ERROR_IO;
default:
- UMA_HISTOGRAM_SPARSE_SLOWLY("PlatformFile.UnknownErrors.Windows",
- last_error);
+ UmaHistogramSparse("PlatformFile.UnknownErrors.Windows", last_error);
+ // This function should only be called for errors.
+ DCHECK_NE(static_cast<DWORD>(ERROR_SUCCESS), last_error);
return FILE_ERROR_FAILED;
}
}
@@ -348,6 +352,8 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
}
if (!disposition) {
+ ::SetLastError(ERROR_INVALID_PARAMETER);
+ error_details_ = FILE_ERROR_FAILED;
NOTREACHED();
return;
}
@@ -400,7 +406,7 @@ void File::DoInitialize(const FilePath& path, uint32_t flags) {
else if (flags & (FLAG_CREATE_ALWAYS | FLAG_CREATE))
created_ = true;
} else {
- error_details_ = OSErrorToFileError(GetLastError());
+ error_details_ = GetLastFileError();
}
}
@@ -415,4 +421,9 @@ void File::SetPlatformFile(PlatformFile file) {
file_.Set(file);
}
+// static
+File::Error File::GetLastFileError() {
+ return File::OSErrorToFileError(GetLastError());
+}
+
} // namespace base
diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc
index 688b6d2d4ac..64f189d8541 100644
--- a/chromium/base/files/important_file_writer.cc
+++ b/chromium/base/files/important_file_writer.cc
@@ -99,22 +99,12 @@ void WriteScopedStringToFileAtomically(
after_write_callback.Run(result);
}
-base::File::Error GetLastFileError() {
-#if defined(OS_WIN)
- return base::File::OSErrorToFileError(::GetLastError());
-#elif defined(OS_POSIX)
- return base::File::OSErrorToFileError(errno);
-#else
- return base::File::FILE_OK;
-#endif
-}
-
void DeleteTmpFile(const FilePath& tmp_file_path,
StringPiece histogram_suffix) {
if (!DeleteFile(tmp_file_path, false)) {
- UmaHistogramExactLinearWithSuffix("ImportantFile.FileDeleteError",
- histogram_suffix, -GetLastFileError(),
- -base::File::FILE_ERROR_MAX);
+ UmaHistogramExactLinearWithSuffix(
+ "ImportantFile.FileDeleteError", histogram_suffix,
+ -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
}
}
@@ -144,9 +134,9 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
// is securely created.
FilePath tmp_file_path;
if (!CreateTemporaryFileInDir(path.DirName(), &tmp_file_path)) {
- UmaHistogramExactLinearWithSuffix("ImportantFile.FileCreateError",
- histogram_suffix, -GetLastFileError(),
- -base::File::FILE_ERROR_MAX);
+ UmaHistogramExactLinearWithSuffix(
+ "ImportantFile.FileCreateError", histogram_suffix,
+ -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
LogFailure(path, histogram_suffix, FAILED_CREATING,
"could not create temporary file");
return false;
@@ -167,9 +157,9 @@ bool ImportantFileWriter::WriteFileAtomically(const FilePath& path,
const int data_length = checked_cast<int32_t>(data.length());
int bytes_written = tmp_file.Write(0, data.data(), data_length);
if (bytes_written < data_length) {
- UmaHistogramExactLinearWithSuffix("ImportantFile.FileWriteError",
- histogram_suffix, -GetLastFileError(),
- -base::File::FILE_ERROR_MAX);
+ UmaHistogramExactLinearWithSuffix(
+ "ImportantFile.FileWriteError", histogram_suffix,
+ -base::File::GetLastFileError(), -base::File::FILE_ERROR_MAX);
}
bool flush_success = tmp_file.Flush();
tmp_file.Close();
diff --git a/chromium/base/files/important_file_writer.h b/chromium/base/files/important_file_writer.h
index 3d04eecbc6f..08a7ee34bee 100644
--- a/chromium/base/files/important_file_writer.h
+++ b/chromium/base/files/important_file_writer.h
@@ -47,7 +47,7 @@ class BASE_EXPORT ImportantFileWriter {
virtual bool SerializeData(std::string* data) = 0;
protected:
- virtual ~DataSerializer() {}
+ virtual ~DataSerializer() = default;
};
// Save |data| to |path| in an atomic manner. Blocks and writes data on the
diff --git a/chromium/base/files/memory_mapped_file_win.cc b/chromium/base/files/memory_mapped_file_win.cc
index 087ca9ffe2a..26869f6acdd 100644
--- a/chromium/base/files/memory_mapped_file_win.cc
+++ b/chromium/base/files/memory_mapped_file_win.cc
@@ -13,6 +13,8 @@
#include "base/strings/string16.h"
#include "base/threading/thread_restrictions.h"
+#include <windows.h>
+
namespace base {
MemoryMappedFile::MemoryMappedFile() : data_(NULL), length_(0) {
diff --git a/chromium/base/files/platform_file.h b/chromium/base/files/platform_file.h
index 6b4a0c2199d..4b8b539bf0c 100644
--- a/chromium/base/files/platform_file.h
+++ b/chromium/base/files/platform_file.h
@@ -9,8 +9,8 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
#endif
// This file defines platform-independent types for dealing with
diff --git a/chromium/base/fuchsia/OWNERS b/chromium/base/fuchsia/OWNERS
new file mode 100644
index 00000000000..e7034eabb1e
--- /dev/null
+++ b/chromium/base/fuchsia/OWNERS
@@ -0,0 +1 @@
+file://build/fuchsia/OWNERS
diff --git a/chromium/base/fuchsia/fuchsia_logging.h b/chromium/base/fuchsia/fuchsia_logging.h
index 728b0bb6486..ba55f8db86e 100644
--- a/chromium/base/fuchsia/fuchsia_logging.h
+++ b/chromium/base/fuchsia/fuchsia_logging.h
@@ -49,12 +49,17 @@ class BASE_EXPORT ZxLogMessage : public logging::LogMessage {
#define ZX_DLOG(severity, zx_err) \
LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), DLOG_IS_ON(severity))
+
+#if DCHECK_IS_ON()
#define ZX_DLOG_IF(severity, condition, zx_err) \
LAZY_STREAM(ZX_LOG_STREAM(severity, zx_err), \
DLOG_IS_ON(severity) && (condition))
+#else // DCHECK_IS_ON()
+#define ZX_DLOG_IF(severity, condition, zx_err) EAT_STREAM_PARAMETERS
+#endif // DCHECK_IS_ON()
-#define ZX_DCHECK(condition, zx_err) \
- LAZY_STREAM(ZX_LOG_STREAM(FATAL, zx_err), DCHECK_IS_ON() && !(condition)) \
+#define ZX_DCHECK(condition, zx_err) \
+ LAZY_STREAM(ZX_LOG_STREAM(DCHECK, zx_err), DCHECK_IS_ON() && !(condition)) \
<< "Check failed: " #condition << ". "
#endif // BASE_FUCHSIA_FUCHSIA_LOGGING_H_
diff --git a/chromium/base/fuchsia/scoped_zx_handle.h b/chromium/base/fuchsia/scoped_zx_handle.h
index dfde686c5e9..fd42bc780fd 100644
--- a/chromium/base/fuchsia/scoped_zx_handle.h
+++ b/chromium/base/fuchsia/scoped_zx_handle.h
@@ -8,7 +8,7 @@
#include <zircon/status.h>
#include <zircon/syscalls.h>
-#include "base/logging.h"
+#include "base/fuchsia/fuchsia_logging.h"
#include "base/scoped_generic.h"
namespace base {
@@ -19,7 +19,7 @@ struct ScopedZxHandleTraits {
static zx_handle_t InvalidValue() { return ZX_HANDLE_INVALID; }
static void Free(zx_handle_t object) {
zx_status_t status = zx_handle_close(object);
- CHECK_EQ(ZX_OK, status) << zx_status_get_string(status);
+ ZX_CHECK(status == ZX_OK, status) << "zx_handle_close";
}
};
diff --git a/chromium/base/guid.cc b/chromium/base/guid.cc
index 45dd209c2b8..2a23658378a 100644
--- a/chromium/base/guid.cc
+++ b/chromium/base/guid.cc
@@ -51,13 +51,13 @@ std::string GenerateGUID() {
// where y is one of [8, 9, A, B].
// Clear the version bits and set the version to 4:
- sixteen_bytes[0] &= 0xffffffffffff0fffULL;
- sixteen_bytes[0] |= 0x0000000000004000ULL;
+ sixteen_bytes[0] &= 0xffffffff'ffff0fffULL;
+ sixteen_bytes[0] |= 0x00000000'00004000ULL;
// Set the two most significant bits (bits 6 and 7) of the
// clock_seq_hi_and_reserved to zero and one, respectively:
- sixteen_bytes[1] &= 0x3fffffffffffffffULL;
- sixteen_bytes[1] |= 0x8000000000000000ULL;
+ sixteen_bytes[1] &= 0x3fffffff'ffffffffULL;
+ sixteen_bytes[1] |= 0x80000000'00000000ULL;
return RandomDataToGUIDString(sixteen_bytes);
}
@@ -76,7 +76,7 @@ std::string RandomDataToGUIDString(const uint64_t bytes[2]) {
static_cast<unsigned int>((bytes[0] >> 16) & 0x0000ffff),
static_cast<unsigned int>(bytes[0] & 0x0000ffff),
static_cast<unsigned int>(bytes[1] >> 48),
- bytes[1] & 0x0000ffffffffffffULL);
+ bytes[1] & 0x0000ffff'ffffffffULL);
}
} // namespace base
diff --git a/chromium/base/i18n/encoding_detection.cc b/chromium/base/i18n/encoding_detection.cc
index cad67ce0ac9..fef34e4ab1f 100644
--- a/chromium/base/i18n/encoding_detection.cc
+++ b/chromium/base/i18n/encoding_detection.cc
@@ -4,8 +4,19 @@
#include "base/i18n/encoding_detection.h"
+#include "build/build_config.h"
#include "third_party/ced/src/compact_enc_det/compact_enc_det.h"
+// third_party/ced/src/util/encodings/encodings.h, which is included
+// by the include above, undefs UNICODE because that is a macro used
+// internally in ced. If we later in the same translation unit do
+// anything related to Windows or Windows headers those will then use
+// the ASCII versions which we do not want. To avoid that happening in
+// jumbo builds, we redefine UNICODE again here.
+#if defined(OS_WIN)
+#define UNICODE 1
+#endif // OS_WIN
+
namespace base {
bool DetectEncoding(const std::string& text, std::string* encoding) {
diff --git a/chromium/base/i18n/icu_string_conversions.h b/chromium/base/i18n/icu_string_conversions.h
index 9135da86a21..cbdcb99e4cd 100644
--- a/chromium/base/i18n/icu_string_conversions.h
+++ b/chromium/base/i18n/icu_string_conversions.h
@@ -31,7 +31,7 @@ class OnStringConversionError {
};
private:
- OnStringConversionError();
+ OnStringConversionError() = delete;
};
// Converts between UTF-16 strings and the encoding specified. If the
diff --git a/chromium/base/i18n/message_formatter.h b/chromium/base/i18n/message_formatter.h
index d24d42e2740..36a656d7713 100644
--- a/chromium/base/i18n/message_formatter.h
+++ b/chromium/base/i18n/message_formatter.h
@@ -118,7 +118,7 @@ class BASE_I18N_EXPORT MessageFormatter {
const internal::MessageArg& arg6 = internal::MessageArg());
private:
- MessageFormatter() {}
+ MessageFormatter() = delete;
DISALLOW_COPY_AND_ASSIGN(MessageFormatter);
};
diff --git a/chromium/base/i18n/time_formatting_unittest.cc b/chromium/base/i18n/time_formatting_unittest.cc
index 8224f6605bc..027b7c949da 100644
--- a/chromium/base/i18n/time_formatting_unittest.cc
+++ b/chromium/base/i18n/time_formatting_unittest.cc
@@ -175,6 +175,34 @@ TEST(TimeFormattingTest, TimeFormatTimeOfDayJP) {
EXPECT_EQ(clock24h, TimeFormatTimeOfDay(time));
EXPECT_EQ(k24HourClock, GetHourClockType());
// k{Keep,Drop}AmPm should not affect for 24h clock.
+ EXPECT_EQ(clock24h, TimeFormatTimeOfDayWithHourClockType(time, k24HourClock,
+ kKeepAmPm));
+ EXPECT_EQ(clock24h, TimeFormatTimeOfDayWithHourClockType(time, k24HourClock,
+ kDropAmPm));
+ // k{Keep,Drop}AmPm affects for 12h clock.
+ EXPECT_EQ(clock12h_pm, TimeFormatTimeOfDayWithHourClockType(
+ time, k12HourClock, kKeepAmPm));
+ EXPECT_EQ(clock12h, TimeFormatTimeOfDayWithHourClockType(time, k12HourClock,
+ kDropAmPm));
+}
+
+TEST(TimeFormattingTest, TimeFormatTimeOfDayDE) {
+ // Test for a locale that uses different mark than "AM" and "PM".
+ // As an instance, we use third_party/icu/source/data/locales/de.txt.
+ test::ScopedRestoreICUDefaultLocale restore_locale;
+ i18n::SetICUDefaultLocale("de");
+ ScopedRestoreDefaultTimezone la_time("America/Los_Angeles");
+
+ Time time;
+ EXPECT_TRUE(Time::FromUTCExploded(kTestDateTimeExploded, &time));
+ string16 clock24h(ASCIIToUTF16("15:42"));
+ string16 clock12h_pm(UTF8ToUTF16("3:42 nachm."));
+ string16 clock12h(ASCIIToUTF16("3:42"));
+
+ // The default is 24h clock.
+ EXPECT_EQ(clock24h, TimeFormatTimeOfDay(time));
+ EXPECT_EQ(k24HourClock, GetHourClockType());
+ // k{Keep,Drop}AmPm should not affect for 24h clock.
EXPECT_EQ(clock24h,
TimeFormatTimeOfDayWithHourClockType(time,
k24HourClock,
diff --git a/chromium/base/i18n/timezone.cc b/chromium/base/i18n/timezone.cc
index 95e7aee34c4..8624e07e7c7 100644
--- a/chromium/base/i18n/timezone.cc
+++ b/chromium/base/i18n/timezone.cc
@@ -4,615 +4,31 @@
#include "base/i18n/timezone.h"
-#include <stddef.h>
-#include <string.h>
+#include <memory>
+#include <string>
-#include <map>
-
-#include "base/macros.h"
-#include "base/memory/singleton.h"
-#include "base/strings/string16.h"
-#include "base/strings/utf_string_conversions.h"
+#include "third_party/icu/source/common/unicode/unistr.h"
#include "third_party/icu/source/i18n/unicode/timezone.h"
namespace base {
-namespace {
-
-class TimezoneMap {
- public:
- static TimezoneMap* GetInstance() {
- return Singleton<TimezoneMap>::get();
- }
-
- std::string CountryCodeForTimezone(const std::string& olson_code) {
- std::map<const char*, const char*, CompareCStrings>::iterator iter =
- map_.find(olson_code.c_str());
- if (iter != map_.end())
- return iter->second;
-
- return std::string();
- }
-
- private:
- TimezoneMap() {
- // These mappings are adapted from zone.tab, which is available at
- // <http://www.ietf.org/timezones/data/zone.tab> and is a part of public
- // domain.
- struct OlsonCodeData {
- const char* country_code;
- const char* olson_code;
- };
- static const OlsonCodeData olson_code_data[] = {
- { "AD", "Europe/Andorra" },
- { "AE", "Asia/Dubai" },
- { "AF", "Asia/Kabul" },
- { "AG", "America/Antigua" },
- { "AI", "America/Anguilla" },
- { "AL", "Europe/Tirane" },
- { "AM", "Asia/Yerevan" },
- { "AO", "Africa/Luanda" },
- { "AQ", "Antarctica/McMurdo" },
- { "AQ", "Antarctica/Rothera" },
- { "AQ", "Antarctica/Palmer" },
- { "AQ", "Antarctica/Mawson" },
- { "AQ", "Antarctica/Davis" },
- { "AQ", "Antarctica/Casey" },
- { "AQ", "Antarctica/Vostok" },
- { "AQ", "Antarctica/DumontDUrville" },
- { "AQ", "Antarctica/Syowa" },
- { "AR", "America/Argentina/Buenos_Aires" },
- { "AR", "America/Argentina/Cordoba" },
- { "AR", "America/Argentina/Salta" },
- { "AR", "America/Argentina/Jujuy" },
- { "AR", "America/Argentina/Tucuman" },
- { "AR", "America/Argentina/Catamarca" },
- { "AR", "America/Argentina/La_Rioja" },
- { "AR", "America/Argentina/San_Juan" },
- { "AR", "America/Argentina/Mendoza" },
- { "AR", "America/Argentina/San_Luis" },
- { "AR", "America/Argentina/Rio_Gallegos" },
- { "AR", "America/Argentina/Ushuaia" },
- { "AS", "Pacific/Pago_Pago" },
- { "AT", "Europe/Vienna" },
- { "AU", "Australia/Lord_Howe" },
- { "AU", "Antarctica/Macquarie" },
- { "AU", "Australia/Hobart" },
- { "AU", "Australia/Currie" },
- { "AU", "Australia/Melbourne" },
- { "AU", "Australia/Sydney" },
- { "AU", "Australia/Broken_Hill" },
- { "AU", "Australia/Brisbane" },
- { "AU", "Australia/Lindeman" },
- { "AU", "Australia/Adelaide" },
- { "AU", "Australia/Darwin" },
- { "AU", "Australia/Perth" },
- { "AU", "Australia/Eucla" },
- { "AW", "America/Aruba" },
- { "AX", "Europe/Mariehamn" },
- { "AZ", "Asia/Baku" },
- { "BA", "Europe/Sarajevo" },
- { "BB", "America/Barbados" },
- { "BD", "Asia/Dhaka" },
- { "BE", "Europe/Brussels" },
- { "BF", "Africa/Ouagadougou" },
- { "BG", "Europe/Sofia" },
- { "BH", "Asia/Bahrain" },
- { "BI", "Africa/Bujumbura" },
- { "BJ", "Africa/Porto-Novo" },
- { "BL", "America/St_Barthelemy" },
- { "BM", "Atlantic/Bermuda" },
- { "BN", "Asia/Brunei" },
- { "BO", "America/La_Paz" },
- { "BQ", "America/Kralendijk" },
- { "BR", "America/Noronha" },
- { "BR", "America/Belem" },
- { "BR", "America/Fortaleza" },
- { "BR", "America/Recife" },
- { "BR", "America/Araguaina" },
- { "BR", "America/Maceio" },
- { "BR", "America/Bahia" },
- { "BR", "America/Sao_Paulo" },
- { "BR", "America/Campo_Grande" },
- { "BR", "America/Cuiaba" },
- { "BR", "America/Santarem" },
- { "BR", "America/Porto_Velho" },
- { "BR", "America/Boa_Vista" },
- { "BR", "America/Manaus" },
- { "BR", "America/Eirunepe" },
- { "BR", "America/Rio_Branco" },
- { "BS", "America/Nassau" },
- { "BT", "Asia/Thimphu" },
- { "BW", "Africa/Gaborone" },
- { "BY", "Europe/Minsk" },
- { "BZ", "America/Belize" },
- { "CA", "America/St_Johns" },
- { "CA", "America/Halifax" },
- { "CA", "America/Glace_Bay" },
- { "CA", "America/Moncton" },
- { "CA", "America/Goose_Bay" },
- { "CA", "America/Blanc-Sablon" },
- { "CA", "America/Toronto" },
- { "CA", "America/Nipigon" },
- { "CA", "America/Thunder_Bay" },
- { "CA", "America/Iqaluit" },
- { "CA", "America/Pangnirtung" },
- { "CA", "America/Resolute" },
- { "CA", "America/Atikokan" },
- { "CA", "America/Rankin_Inlet" },
- { "CA", "America/Winnipeg" },
- { "CA", "America/Rainy_River" },
- { "CA", "America/Regina" },
- { "CA", "America/Swift_Current" },
- { "CA", "America/Edmonton" },
- { "CA", "America/Cambridge_Bay" },
- { "CA", "America/Yellowknife" },
- { "CA", "America/Inuvik" },
- { "CA", "America/Creston" },
- { "CA", "America/Dawson_Creek" },
- { "CA", "America/Vancouver" },
- { "CA", "America/Whitehorse" },
- { "CA", "America/Dawson" },
- { "CC", "Indian/Cocos" },
- { "CD", "Africa/Kinshasa" },
- { "CD", "Africa/Lubumbashi" },
- { "CF", "Africa/Bangui" },
- { "CG", "Africa/Brazzaville" },
- { "CH", "Europe/Zurich" },
- { "CI", "Africa/Abidjan" },
- { "CK", "Pacific/Rarotonga" },
- { "CL", "America/Santiago" },
- { "CL", "Pacific/Easter" },
- { "CM", "Africa/Douala" },
- { "CN", "Asia/Shanghai" },
- { "CN", "Asia/Harbin" },
- { "CN", "Asia/Chongqing" },
- { "CN", "Asia/Urumqi" },
- { "CN", "Asia/Kashgar" },
- { "CO", "America/Bogota" },
- { "CR", "America/Costa_Rica" },
- { "CU", "America/Havana" },
- { "CV", "Atlantic/Cape_Verde" },
- { "CW", "America/Curacao" },
- { "CX", "Indian/Christmas" },
- { "CY", "Asia/Nicosia" },
- { "CZ", "Europe/Prague" },
- { "DE", "Europe/Berlin" },
- { "DE", "Europe/Busingen" },
- { "DJ", "Africa/Djibouti" },
- { "DK", "Europe/Copenhagen" },
- { "DM", "America/Dominica" },
- { "DO", "America/Santo_Domingo" },
- { "DZ", "Africa/Algiers" },
- { "EC", "America/Guayaquil" },
- { "EC", "Pacific/Galapagos" },
- { "EE", "Europe/Tallinn" },
- { "EG", "Africa/Cairo" },
- { "EH", "Africa/El_Aaiun" },
- { "ER", "Africa/Asmara" },
- { "ES", "Europe/Madrid" },
- { "ES", "Africa/Ceuta" },
- { "ES", "Atlantic/Canary" },
- { "ET", "Africa/Addis_Ababa" },
- { "FI", "Europe/Helsinki" },
- { "FJ", "Pacific/Fiji" },
- { "FK", "Atlantic/Stanley" },
- { "FM", "Pacific/Chuuk" },
- { "FM", "Pacific/Pohnpei" },
- { "FM", "Pacific/Kosrae" },
- { "FO", "Atlantic/Faroe" },
- { "FR", "Europe/Paris" },
- { "GA", "Africa/Libreville" },
- { "GB", "Europe/London" },
- { "GD", "America/Grenada" },
- { "GE", "Asia/Tbilisi" },
- { "GF", "America/Cayenne" },
- { "GG", "Europe/Guernsey" },
- { "GH", "Africa/Accra" },
- { "GI", "Europe/Gibraltar" },
- { "GL", "America/Godthab" },
- { "GL", "America/Danmarkshavn" },
- { "GL", "America/Scoresbysund" },
- { "GL", "America/Thule" },
- { "GM", "Africa/Banjul" },
- { "GN", "Africa/Conakry" },
- { "GP", "America/Guadeloupe" },
- { "GQ", "Africa/Malabo" },
- { "GR", "Europe/Athens" },
- { "GS", "Atlantic/South_Georgia" },
- { "GT", "America/Guatemala" },
- { "GU", "Pacific/Guam" },
- { "GW", "Africa/Bissau" },
- { "GY", "America/Guyana" },
- { "HK", "Asia/Hong_Kong" },
- { "HN", "America/Tegucigalpa" },
- { "HR", "Europe/Zagreb" },
- { "HT", "America/Port-au-Prince" },
- { "HU", "Europe/Budapest" },
- { "ID", "Asia/Jakarta" },
- { "ID", "Asia/Pontianak" },
- { "ID", "Asia/Makassar" },
- { "ID", "Asia/Jayapura" },
- { "IE", "Europe/Dublin" },
- { "IL", "Asia/Jerusalem" },
- { "IM", "Europe/Isle_of_Man" },
- { "IN", "Asia/Kolkata" },
- { "IO", "Indian/Chagos" },
- { "IQ", "Asia/Baghdad" },
- { "IR", "Asia/Tehran" },
- { "IS", "Atlantic/Reykjavik" },
- { "IT", "Europe/Rome" },
- { "JE", "Europe/Jersey" },
- { "JM", "America/Jamaica" },
- { "JO", "Asia/Amman" },
- { "JP", "Asia/Tokyo" },
- { "KE", "Africa/Nairobi" },
- { "KG", "Asia/Bishkek" },
- { "KH", "Asia/Phnom_Penh" },
- { "KI", "Pacific/Tarawa" },
- { "KI", "Pacific/Enderbury" },
- { "KI", "Pacific/Kiritimati" },
- { "KM", "Indian/Comoro" },
- { "KN", "America/St_Kitts" },
- { "KP", "Asia/Pyongyang" },
- { "KR", "Asia/Seoul" },
- { "KW", "Asia/Kuwait" },
- { "KY", "America/Cayman" },
- { "KZ", "Asia/Almaty" },
- { "KZ", "Asia/Qyzylorda" },
- { "KZ", "Asia/Aqtobe" },
- { "KZ", "Asia/Aqtau" },
- { "KZ", "Asia/Oral" },
- { "LA", "Asia/Vientiane" },
- { "LB", "Asia/Beirut" },
- { "LC", "America/St_Lucia" },
- { "LI", "Europe/Vaduz" },
- { "LK", "Asia/Colombo" },
- { "LR", "Africa/Monrovia" },
- { "LS", "Africa/Maseru" },
- { "LT", "Europe/Vilnius" },
- { "LU", "Europe/Luxembourg" },
- { "LV", "Europe/Riga" },
- { "LY", "Africa/Tripoli" },
- { "MA", "Africa/Casablanca" },
- { "MC", "Europe/Monaco" },
- { "MD", "Europe/Chisinau" },
- { "ME", "Europe/Podgorica" },
- { "MF", "America/Marigot" },
- { "MG", "Indian/Antananarivo" },
- { "MH", "Pacific/Majuro" },
- { "MH", "Pacific/Kwajalein" },
- { "MK", "Europe/Skopje" },
- { "ML", "Africa/Bamako" },
- { "MM", "Asia/Rangoon" },
- { "MN", "Asia/Ulaanbaatar" },
- { "MN", "Asia/Hovd" },
- { "MN", "Asia/Choibalsan" },
- { "MO", "Asia/Macau" },
- { "MP", "Pacific/Saipan" },
- { "MQ", "America/Martinique" },
- { "MR", "Africa/Nouakchott" },
- { "MS", "America/Montserrat" },
- { "MT", "Europe/Malta" },
- { "MU", "Indian/Mauritius" },
- { "MV", "Indian/Maldives" },
- { "MW", "Africa/Blantyre" },
- { "MX", "America/Mexico_City" },
- { "MX", "America/Cancun" },
- { "MX", "America/Merida" },
- { "MX", "America/Monterrey" },
- { "MX", "America/Matamoros" },
- { "MX", "America/Mazatlan" },
- { "MX", "America/Chihuahua" },
- { "MX", "America/Ojinaga" },
- { "MX", "America/Hermosillo" },
- { "MX", "America/Tijuana" },
- { "MX", "America/Santa_Isabel" },
- { "MX", "America/Bahia_Banderas" },
- { "MY", "Asia/Kuala_Lumpur" },
- { "MY", "Asia/Kuching" },
- { "MZ", "Africa/Maputo" },
- { "NA", "Africa/Windhoek" },
- { "NC", "Pacific/Noumea" },
- { "NE", "Africa/Niamey" },
- { "NF", "Pacific/Norfolk" },
- { "NG", "Africa/Lagos" },
- { "NI", "America/Managua" },
- { "NL", "Europe/Amsterdam" },
- { "NO", "Europe/Oslo" },
- { "NP", "Asia/Kathmandu" },
- { "NR", "Pacific/Nauru" },
- { "NU", "Pacific/Niue" },
- { "NZ", "Pacific/Auckland" },
- { "NZ", "Pacific/Chatham" },
- { "OM", "Asia/Muscat" },
- { "PA", "America/Panama" },
- { "PE", "America/Lima" },
- { "PF", "Pacific/Tahiti" },
- { "PF", "Pacific/Marquesas" },
- { "PF", "Pacific/Gambier" },
- { "PG", "Pacific/Port_Moresby" },
- { "PH", "Asia/Manila" },
- { "PK", "Asia/Karachi" },
- { "PL", "Europe/Warsaw" },
- { "PM", "America/Miquelon" },
- { "PN", "Pacific/Pitcairn" },
- { "PR", "America/Puerto_Rico" },
- { "PS", "Asia/Gaza" },
- { "PS", "Asia/Hebron" },
- { "PT", "Europe/Lisbon" },
- { "PT", "Atlantic/Madeira" },
- { "PT", "Atlantic/Azores" },
- { "PW", "Pacific/Palau" },
- { "PY", "America/Asuncion" },
- { "QA", "Asia/Qatar" },
- { "RE", "Indian/Reunion" },
- { "RO", "Europe/Bucharest" },
- { "RS", "Europe/Belgrade" },
- { "RU", "Europe/Kaliningrad" },
- { "RU", "Europe/Moscow" },
- { "RU", "Europe/Volgograd" },
- { "RU", "Europe/Samara" },
- { "RU", "Asia/Yekaterinburg" },
- { "RU", "Asia/Omsk" },
- { "RU", "Asia/Novosibirsk" },
- { "RU", "Asia/Novokuznetsk" },
- { "RU", "Asia/Krasnoyarsk" },
- { "RU", "Asia/Irkutsk" },
- { "RU", "Asia/Yakutsk" },
- { "RU", "Asia/Khandyga" },
- { "RU", "Asia/Vladivostok" },
- { "RU", "Asia/Sakhalin" },
- { "RU", "Asia/Ust-Nera" },
- { "RU", "Asia/Magadan" },
- { "RU", "Asia/Kamchatka" },
- { "RU", "Asia/Anadyr" },
- { "RW", "Africa/Kigali" },
- { "SA", "Asia/Riyadh" },
- { "SB", "Pacific/Guadalcanal" },
- { "SC", "Indian/Mahe" },
- { "SD", "Africa/Khartoum" },
- { "SE", "Europe/Stockholm" },
- { "SG", "Asia/Singapore" },
- { "SH", "Atlantic/St_Helena" },
- { "SI", "Europe/Ljubljana" },
- { "SJ", "Arctic/Longyearbyen" },
- { "SK", "Europe/Bratislava" },
- { "SL", "Africa/Freetown" },
- { "SM", "Europe/San_Marino" },
- { "SN", "Africa/Dakar" },
- { "SO", "Africa/Mogadishu" },
- { "SR", "America/Paramaribo" },
- { "SS", "Africa/Juba" },
- { "ST", "Africa/Sao_Tome" },
- { "SV", "America/El_Salvador" },
- { "SX", "America/Lower_Princes" },
- { "SY", "Asia/Damascus" },
- { "SZ", "Africa/Mbabane" },
- { "TC", "America/Grand_Turk" },
- { "TD", "Africa/Ndjamena" },
- { "TF", "Indian/Kerguelen" },
- { "TG", "Africa/Lome" },
- { "TH", "Asia/Bangkok" },
- { "TJ", "Asia/Dushanbe" },
- { "TK", "Pacific/Fakaofo" },
- { "TL", "Asia/Dili" },
- { "TM", "Asia/Ashgabat" },
- { "TN", "Africa/Tunis" },
- { "TO", "Pacific/Tongatapu" },
- { "TR", "Europe/Istanbul" },
- { "TT", "America/Port_of_Spain" },
- { "TV", "Pacific/Funafuti" },
- { "TW", "Asia/Taipei" },
- { "TZ", "Africa/Dar_es_Salaam" },
- { "UA", "Europe/Kiev" },
- { "UA", "Europe/Uzhgorod" },
- { "UA", "Europe/Zaporozhye" },
- { "UA", "Europe/Simferopol" },
- { "UG", "Africa/Kampala" },
- { "UM", "Pacific/Johnston" },
- { "UM", "Pacific/Midway" },
- { "UM", "Pacific/Wake" },
- { "US", "America/New_York" },
- { "US", "America/Detroit" },
- { "US", "America/Kentucky/Louisville" },
- { "US", "America/Kentucky/Monticello" },
- { "US", "America/Indiana/Indianapolis" },
- { "US", "America/Indiana/Vincennes" },
- { "US", "America/Indiana/Winamac" },
- { "US", "America/Indiana/Marengo" },
- { "US", "America/Indiana/Petersburg" },
- { "US", "America/Indiana/Vevay" },
- { "US", "America/Chicago" },
- { "US", "America/Indiana/Tell_City" },
- { "US", "America/Indiana/Knox" },
- { "US", "America/Menominee" },
- { "US", "America/North_Dakota/Center" },
- { "US", "America/North_Dakota/New_Salem" },
- { "US", "America/North_Dakota/Beulah" },
- { "US", "America/Denver" },
- { "US", "America/Boise" },
- { "US", "America/Phoenix" },
- { "US", "America/Los_Angeles" },
- { "US", "America/Anchorage" },
- { "US", "America/Juneau" },
- { "US", "America/Sitka" },
- { "US", "America/Yakutat" },
- { "US", "America/Nome" },
- { "US", "America/Adak" },
- { "US", "America/Metlakatla" },
- { "US", "Pacific/Honolulu" },
- { "UY", "America/Montevideo" },
- { "UZ", "Asia/Samarkand" },
- { "UZ", "Asia/Tashkent" },
- { "VA", "Europe/Vatican" },
- { "VC", "America/St_Vincent" },
- { "VE", "America/Caracas" },
- { "VG", "America/Tortola" },
- { "VI", "America/St_Thomas" },
- { "VN", "Asia/Ho_Chi_Minh" },
- { "VU", "Pacific/Efate" },
- { "WF", "Pacific/Wallis" },
- { "WS", "Pacific/Apia" },
- { "YE", "Asia/Aden" },
- { "YT", "Indian/Mayotte" },
- { "ZA", "Africa/Johannesburg" },
- { "ZM", "Africa/Lusaka" },
- { "ZW", "Africa/Harare" },
- // The mappings below are custom additions to zone.tab.
- { "GB", "Etc/GMT" },
- { "GB", "Etc/UTC" },
- { "GB", "Etc/UCT" },
- };
-
- for (size_t i = 0; i < arraysize(olson_code_data); ++i)
- map_[olson_code_data[i].olson_code] = olson_code_data[i].country_code;
-
- // These are mapping from old codenames to new codenames. They are also
- // part of public domain, and available at
- // <http://www.ietf.org/timezones/data/backward>.
- struct LinkData {
- const char* old_code;
- const char* new_code;
- };
- static const LinkData link_data[] = {
- { "Africa/Asmera", "Africa/Asmara" },
- { "Africa/Timbuktu", "Africa/Bamako" },
- { "America/Argentina/ComodRivadavia", "America/Argentina/Catamarca" },
- { "America/Atka", "America/Adak" },
- { "America/Buenos_Aires", "America/Argentina/Buenos_Aires" },
- { "America/Catamarca", "America/Argentina/Catamarca" },
- { "America/Coral_Harbour", "America/Atikokan" },
- { "America/Cordoba", "America/Argentina/Cordoba" },
- { "America/Ensenada", "America/Tijuana" },
- { "America/Fort_Wayne", "America/Indiana/Indianapolis" },
- { "America/Indianapolis", "America/Indiana/Indianapolis" },
- { "America/Jujuy", "America/Argentina/Jujuy" },
- { "America/Knox_IN", "America/Indiana/Knox" },
- { "America/Louisville", "America/Kentucky/Louisville" },
- { "America/Mendoza", "America/Argentina/Mendoza" },
- { "America/Porto_Acre", "America/Rio_Branco" },
- { "America/Rosario", "America/Argentina/Cordoba" },
- { "America/Virgin", "America/St_Thomas" },
- { "Asia/Ashkhabad", "Asia/Ashgabat" },
- { "Asia/Chungking", "Asia/Chongqing" },
- { "Asia/Dacca", "Asia/Dhaka" },
- { "Asia/Katmandu", "Asia/Kathmandu" },
- { "Asia/Calcutta", "Asia/Kolkata" },
- { "Asia/Macao", "Asia/Macau" },
- { "Asia/Tel_Aviv", "Asia/Jerusalem" },
- { "Asia/Saigon", "Asia/Ho_Chi_Minh" },
- { "Asia/Thimbu", "Asia/Thimphu" },
- { "Asia/Ujung_Pandang", "Asia/Makassar" },
- { "Asia/Ulan_Bator", "Asia/Ulaanbaatar" },
- { "Atlantic/Faeroe", "Atlantic/Faroe" },
- { "Atlantic/Jan_Mayen", "Europe/Oslo" },
- { "Australia/ACT", "Australia/Sydney" },
- { "Australia/Canberra", "Australia/Sydney" },
- { "Australia/LHI", "Australia/Lord_Howe" },
- { "Australia/NSW", "Australia/Sydney" },
- { "Australia/North", "Australia/Darwin" },
- { "Australia/Queensland", "Australia/Brisbane" },
- { "Australia/South", "Australia/Adelaide" },
- { "Australia/Tasmania", "Australia/Hobart" },
- { "Australia/Victoria", "Australia/Melbourne" },
- { "Australia/West", "Australia/Perth" },
- { "Australia/Yancowinna", "Australia/Broken_Hill" },
- { "Brazil/Acre", "America/Rio_Branco" },
- { "Brazil/DeNoronha", "America/Noronha" },
- { "Brazil/East", "America/Sao_Paulo" },
- { "Brazil/West", "America/Manaus" },
- { "Canada/Atlantic", "America/Halifax" },
- { "Canada/Central", "America/Winnipeg" },
- { "Canada/East-Saskatchewan", "America/Regina" },
- { "Canada/Eastern", "America/Toronto" },
- { "Canada/Mountain", "America/Edmonton" },
- { "Canada/Newfoundland", "America/St_Johns" },
- { "Canada/Pacific", "America/Vancouver" },
- { "Canada/Saskatchewan", "America/Regina" },
- { "Canada/Yukon", "America/Whitehorse" },
- { "Chile/Continental", "America/Santiago" },
- { "Chile/EasterIsland", "Pacific/Easter" },
- { "Cuba", "America/Havana" },
- { "Egypt", "Africa/Cairo" },
- { "Eire", "Europe/Dublin" },
- { "Europe/Belfast", "Europe/London" },
- { "Europe/Tiraspol", "Europe/Chisinau" },
- { "GB", "Europe/London" },
- { "GB-Eire", "Europe/London" },
- { "GMT+0", "Etc/GMT" },
- { "GMT-0", "Etc/GMT" },
- { "GMT0", "Etc/GMT" },
- { "Greenwich", "Etc/GMT" },
- { "Hongkong", "Asia/Hong_Kong" },
- { "Iceland", "Atlantic/Reykjavik" },
- { "Iran", "Asia/Tehran" },
- { "Israel", "Asia/Jerusalem" },
- { "Jamaica", "America/Jamaica" },
- { "Japan", "Asia/Tokyo" },
- { "Kwajalein", "Pacific/Kwajalein" },
- { "Libya", "Africa/Tripoli" },
- { "Mexico/BajaNorte", "America/Tijuana" },
- { "Mexico/BajaSur", "America/Mazatlan" },
- { "Mexico/General", "America/Mexico_City" },
- { "NZ", "Pacific/Auckland" },
- { "NZ-CHAT", "Pacific/Chatham" },
- { "Navajo", "America/Denver" },
- { "PRC", "Asia/Shanghai" },
- { "Pacific/Samoa", "Pacific/Pago_Pago" },
- { "Pacific/Yap", "Pacific/Chuuk" },
- { "Pacific/Truk", "Pacific/Chuuk" },
- { "Pacific/Ponape", "Pacific/Pohnpei" },
- { "Poland", "Europe/Warsaw" },
- { "Portugal", "Europe/Lisbon" },
- { "ROC", "Asia/Taipei" },
- { "ROK", "Asia/Seoul" },
- { "Singapore", "Asia/Singapore" },
- { "Turkey", "Europe/Istanbul" },
- { "UCT", "Etc/UCT" },
- { "US/Alaska", "America/Anchorage" },
- { "US/Aleutian", "America/Adak" },
- { "US/Arizona", "America/Phoenix" },
- { "US/Central", "America/Chicago" },
- { "US/East-Indiana", "America/Indiana/Indianapolis" },
- { "US/Eastern", "America/New_York" },
- { "US/Hawaii", "Pacific/Honolulu" },
- { "US/Indiana-Starke", "America/Indiana/Knox" },
- { "US/Michigan", "America/Detroit" },
- { "US/Mountain", "America/Denver" },
- { "US/Pacific", "America/Los_Angeles" },
- { "US/Samoa", "Pacific/Pago_Pago" },
- { "UTC", "Etc/UTC" },
- { "Universal", "Etc/UTC" },
- { "W-SU", "Europe/Moscow" },
- { "Zulu", "Etc/UTC" },
- };
-
- for (size_t i = 0; i < arraysize(link_data); ++i)
- map_[link_data[i].old_code] = map_[link_data[i].new_code];
- }
-
- friend struct DefaultSingletonTraits<TimezoneMap>;
-
- struct CompareCStrings {
- bool operator()(const char* str1, const char* str2) const {
- return strcmp(str1, str2) < 0;
- }
- };
- std::map<const char*, const char*, CompareCStrings> map_;
-
- DISALLOW_COPY_AND_ASSIGN(TimezoneMap);
-};
-
-} // namespace
-
std::string CountryCodeForCurrentTimezone() {
std::unique_ptr<icu::TimeZone> zone(icu::TimeZone::createDefault());
icu::UnicodeString id;
- zone->getID(id);
- std::string olson_code;
- return TimezoneMap::GetInstance()->CountryCodeForTimezone(
- id.toUTF8String(olson_code));
+ // ICU returns '001' (world) for Etc/GMT. Preserve the old behavior
+ // only for Etc/GMT while returning an empty string for Etc/UTC and
+ // Etc/UCT because they're less likely to be chosen by mistake in UK in
+ // place of Europe/London (Briitish Time).
+ if (zone->getID(id) == UNICODE_STRING_SIMPLE("Etc/GMT"))
+ return "GB";
+ char region_code[4];
+ UErrorCode status = U_ZERO_ERROR;
+ int length = zone->getRegion(id, region_code, 4, status);
+ // Return an empty string if region_code is a 3-digit numeric code such
+ // as 001 (World) for Etc/UTC, Etc/UCT.
+ return (U_SUCCESS(status) && length == 2)
+ ? std::string(region_code, static_cast<size_t>(length))
+ : std::string();
}
} // namespace base
diff --git a/chromium/base/i18n/timezone.h b/chromium/base/i18n/timezone.h
index f7fda941188..7557d44f364 100644
--- a/chromium/base/i18n/timezone.h
+++ b/chromium/base/i18n/timezone.h
@@ -11,9 +11,12 @@
namespace base {
-// Checks the system timezone and turns it into a two-character ASCII country
-// code. This may fail (for example, it will always fail on Android), in which
-// case it will return an empty string.
+// Checks the system timezone and turns it into a two-character ISO 3166 country
+// code. This may fail (for example, it used to always fail on Android), in
+// which case it will return an empty string. It'll also return an empty string
+// when the timezone is Etc/UTC or Etc/UCT, but will return 'GB" for Etc/GMT
+// because people in the UK tends to select Etc/GMT by mistake instead of
+// Europe/London (British Time).
BASE_I18N_EXPORT std::string CountryCodeForCurrentTimezone();
} // namespace base
diff --git a/chromium/base/i18n/timezone_unittest.cc b/chromium/base/i18n/timezone_unittest.cc
index 2cdcc422985..57467dced1b 100644
--- a/chromium/base/i18n/timezone_unittest.cc
+++ b/chromium/base/i18n/timezone_unittest.cc
@@ -11,10 +11,16 @@ namespace {
TEST(TimezoneTest, CountryCodeForCurrentTimezone) {
std::string country_code = CountryCodeForCurrentTimezone();
- // On some systems (such as Android or some flavors of Linux), icu may come up
- // empty.
+ // On some systems (such as Android or some flavors of Linux), ICU may come up
+ // empty. With https://chromium-review.googlesource.com/c/512282/ , ICU will
+ // not fail any more. See also http://bugs.icu-project.org/trac/ticket/13208 .
+ // Even with that, ICU returns '001' (world) for region-agnostic timezones
+ // such as Etc/UTC and |CountryCodeForCurrentTimezone| returns an empty
+ // string so that the next fallback can be tried by a customer.
+ // TODO(jshin): Revise this to test for actual timezones using
+ // use ScopedRestoreICUDefaultTimezone.
if (!country_code.empty())
- EXPECT_EQ(2U, country_code.size());
+ EXPECT_EQ(2U, country_code.size()) << "country_code = " << country_code;
}
} // namespace
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index ceb64a62c05..d6bea4a3bcd 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -6,10 +6,11 @@
#include <cmath>
#include <utility>
+#include <vector>
+#include "base/debug/alias.h"
#include "base/logging.h"
#include "base/macros.h"
-#include "base/memory/ptr_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
#include "base/strings/string_util.h"
@@ -72,7 +73,7 @@ JSONParser::JSONParser(int options)
JSONParser::~JSONParser() = default;
-std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
+Optional<Value> JSONParser::Parse(StringPiece input) {
start_pos_ = input.data();
pos_ = start_pos_;
end_pos_ = start_pos_ + input.length();
@@ -95,15 +96,15 @@ std::unique_ptr<Value> JSONParser::Parse(StringPiece input) {
}
// Parse the first and any nested tokens.
- std::unique_ptr<Value> root(ParseNextToken());
+ Optional<Value> root(ParseNextToken());
if (!root)
- return nullptr;
+ return nullopt;
// Make sure the input stream is at an end.
if (GetNextToken() != T_END_OF_INPUT) {
if (!CanConsume(1) || (NextChar() && GetNextToken() != T_END_OF_INPUT)) {
ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 1);
- return nullptr;
+ return nullopt;
}
}
@@ -302,11 +303,11 @@ bool JSONParser::EatComment() {
return false;
}
-std::unique_ptr<Value> JSONParser::ParseNextToken() {
+Optional<Value> JSONParser::ParseNextToken() {
return ParseToken(GetNextToken());
}
-std::unique_ptr<Value> JSONParser::ParseToken(Token token) {
+Optional<Value> JSONParser::ParseToken(Token token) {
switch (token) {
case T_OBJECT_BEGIN:
return ConsumeDictionary();
@@ -322,20 +323,30 @@ std::unique_ptr<Value> JSONParser::ParseToken(Token token) {
return ConsumeLiteral();
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return nullptr;
+ return nullopt;
}
}
-std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
+Optional<Value> JSONParser::ConsumeDictionary() {
+ // Attempt to alias 4KB of the buffer about to be read. Need to alias multiple
+ // sites as the crashpad heuristics only grab a few hundred bytes in
+ // front/behind heap pointers on the stack.
+ // TODO(gab): Remove this after diagnosis of https://crbug.com/791487 is
+ // complete.
+ const char* initial_pos[16];
+ for (size_t i = 0; i < arraysize(initial_pos); ++i)
+ initial_pos[i] = pos_ + i * 256;
+ debug::Alias(&initial_pos);
+
if (*pos_ != '{') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return nullptr;
+ return nullopt;
}
StackMarker depth_check(&stack_depth_);
if (depth_check.IsTooDeep()) {
ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
- return nullptr;
+ return nullopt;
}
std::vector<Value::DictStorage::value_type> dict_storage;
@@ -345,13 +356,13 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
while (token != T_OBJECT_END) {
if (token != T_STRING) {
ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 1);
- return nullptr;
+ return nullopt;
}
// First consume the key.
StringBuilder key;
if (!ConsumeStringRaw(&key)) {
- return nullptr;
+ return nullopt;
}
// Read the separator.
@@ -359,18 +370,19 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
token = GetNextToken();
if (token != T_OBJECT_PAIR_SEPARATOR) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
// The next token is the value. Ownership transfers to |dict|.
NextChar();
- std::unique_ptr<Value> value = ParseNextToken();
+ Optional<Value> value = ParseNextToken();
if (!value) {
// ReportError from deeper level.
- return nullptr;
+ return nullopt;
}
- dict_storage.emplace_back(key.DestructiveAsString(), std::move(value));
+ dict_storage.emplace_back(key.DestructiveAsString(),
+ std::make_unique<Value>(std::move(*value)));
NextChar();
token = GetNextToken();
@@ -379,42 +391,41 @@ std::unique_ptr<Value> JSONParser::ConsumeDictionary() {
token = GetNextToken();
if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
- return nullptr;
+ return nullopt;
}
} else if (token != T_OBJECT_END) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
- return nullptr;
+ return nullopt;
}
}
- return std::make_unique<Value>(
- Value::DictStorage(std::move(dict_storage), KEEP_LAST_OF_DUPES));
+ return Value(Value::DictStorage(std::move(dict_storage), KEEP_LAST_OF_DUPES));
}
-std::unique_ptr<Value> JSONParser::ConsumeList() {
+Optional<Value> JSONParser::ConsumeList() {
if (*pos_ != '[') {
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return nullptr;
+ return nullopt;
}
StackMarker depth_check(&stack_depth_);
if (depth_check.IsTooDeep()) {
ReportError(JSONReader::JSON_TOO_MUCH_NESTING, 1);
- return nullptr;
+ return nullopt;
}
- std::unique_ptr<ListValue> list(new ListValue);
+ Value::ListStorage list_storage;
NextChar();
Token token = GetNextToken();
while (token != T_ARRAY_END) {
- std::unique_ptr<Value> item = ParseToken(token);
+ Optional<Value> item = ParseToken(token);
if (!item) {
// ReportError from deeper level.
- return nullptr;
+ return nullopt;
}
- list->Append(std::move(item));
+ list_storage.push_back(std::move(*item));
NextChar();
token = GetNextToken();
@@ -423,23 +434,23 @@ std::unique_ptr<Value> JSONParser::ConsumeList() {
token = GetNextToken();
if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
ReportError(JSONReader::JSON_TRAILING_COMMA, 1);
- return nullptr;
+ return nullopt;
}
} else if (token != T_ARRAY_END) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
}
- return std::move(list);
+ return Value(std::move(list_storage));
}
-std::unique_ptr<Value> JSONParser::ConsumeString() {
+Optional<Value> JSONParser::ConsumeString() {
StringBuilder string;
if (!ConsumeStringRaw(&string))
- return nullptr;
+ return nullopt;
- return std::make_unique<Value>(string.DestructiveAsString());
+ return Value(string.DestructiveAsString());
}
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
@@ -690,7 +701,7 @@ void JSONParser::DecodeUTF8(const int32_t& point, StringBuilder* dest) {
}
}
-std::unique_ptr<Value> JSONParser::ConsumeNumber() {
+Optional<Value> JSONParser::ConsumeNumber() {
const char* num_start = pos_;
const int start_index = index_;
int end_index = start_index;
@@ -700,7 +711,7 @@ std::unique_ptr<Value> JSONParser::ConsumeNumber() {
if (!ReadInt(false)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
end_index = index_;
@@ -709,7 +720,7 @@ std::unique_ptr<Value> JSONParser::ConsumeNumber() {
NextChar();
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
end_index = index_;
}
@@ -719,14 +730,14 @@ std::unique_ptr<Value> JSONParser::ConsumeNumber() {
NextChar();
if (!CanConsume(1)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
if (*pos_ == '-' || *pos_ == '+') {
NextChar();
}
if (!ReadInt(true)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
end_index = index_;
}
@@ -746,7 +757,7 @@ std::unique_ptr<Value> JSONParser::ConsumeNumber() {
break;
default:
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
pos_ = exit_pos;
@@ -756,15 +767,15 @@ std::unique_ptr<Value> JSONParser::ConsumeNumber() {
int num_int;
if (StringToInt(num_string, &num_int))
- return std::make_unique<Value>(num_int);
+ return Value(num_int);
double num_double;
if (StringToDouble(num_string.as_string(), &num_double) &&
std::isfinite(num_double)) {
- return std::make_unique<Value>(num_double);
+ return Value(num_double);
}
- return nullptr;
+ return nullopt;
}
bool JSONParser::ReadInt(bool allow_leading_zeros) {
@@ -791,7 +802,7 @@ bool JSONParser::ReadInt(bool allow_leading_zeros) {
return true;
}
-std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
+Optional<Value> JSONParser::ConsumeLiteral() {
switch (*pos_) {
case 't': {
const char kTrueLiteral[] = "true";
@@ -799,10 +810,10 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
if (!CanConsume(kTrueLen) ||
!StringsAreEqual(pos_, kTrueLiteral, kTrueLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
NextNChars(kTrueLen - 1);
- return std::make_unique<Value>(true);
+ return Value(true);
}
case 'f': {
const char kFalseLiteral[] = "false";
@@ -810,10 +821,10 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
if (!CanConsume(kFalseLen) ||
!StringsAreEqual(pos_, kFalseLiteral, kFalseLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
NextNChars(kFalseLen - 1);
- return std::make_unique<Value>(false);
+ return Value(false);
}
case 'n': {
const char kNullLiteral[] = "null";
@@ -821,14 +832,14 @@ std::unique_ptr<Value> JSONParser::ConsumeLiteral() {
if (!CanConsume(kNullLen) ||
!StringsAreEqual(pos_, kNullLiteral, kNullLen)) {
ReportError(JSONReader::JSON_SYNTAX_ERROR, 1);
- return nullptr;
+ return nullopt;
}
NextNChars(kNullLen - 1);
- return std::make_unique<Value>();
+ return Value(Value::Type::NONE);
}
default:
ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 1);
- return nullptr;
+ return nullopt;
}
}
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index d27bf7d707c..135987955b0 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -30,14 +30,11 @@ class JSONParserTest;
// The implementation behind the JSONReader interface. This class is not meant
// to be used directly; it encapsulates logic that need not be exposed publicly.
//
-// This parser guarantees O(n) time through the input string. It also optimizes
-// base::Value by using StringPiece where possible when returning Value
-// objects by using "hidden roots," discussed in the implementation.
-//
-// Iteration happens on the byte level, with the functions CanConsume and
-// NextChar. The conversion from byte to JSON token happens without advancing
-// the parser in GetNextToken/ParseToken, that is tokenization operates on
-// the current parser position without advancing.
+// This parser guarantees O(n) time through the input string. Iteration happens
+// on the byte level, with the functions CanConsume and NextChar. The conversion
+// from byte to JSON token happens without advancing the parser in
+// GetNextToken/ParseToken, that is tokenization operates on the current parser
+// position without advancing.
//
// Built on top of these are a family of Consume functions that iterate
// internally. Invariant: on entry of a Consume function, the parser is wound
@@ -54,7 +51,7 @@ class BASE_EXPORT JSONParser {
// result as a Value.
// Wrap this in base::FooValue::From() to check the Value is of type Foo and
// convert to a FooValue at the same time.
- std::unique_ptr<Value> Parse(StringPiece input);
+ Optional<Value> Parse(StringPiece input);
// Returns the error code.
JSONReader::JsonParseError error_code() const;
@@ -163,22 +160,22 @@ class BASE_EXPORT JSONParser {
bool EatComment();
// Calls GetNextToken() and then ParseToken().
- std::unique_ptr<Value> ParseNextToken();
+ Optional<Value> ParseNextToken();
// Takes a token that represents the start of a Value ("a structural token"
// in RFC terms) and consumes it, returning the result as a Value.
- std::unique_ptr<Value> ParseToken(Token token);
+ Optional<Value> ParseToken(Token token);
// Assuming that the parser is currently wound to '{', this parses a JSON
- // object into a DictionaryValue.
- std::unique_ptr<Value> ConsumeDictionary();
+ // object into a Value.
+ Optional<Value> ConsumeDictionary();
// Assuming that the parser is wound to '[', this parses a JSON list into a
- // std::unique_ptr<ListValue>.
- std::unique_ptr<Value> ConsumeList();
+ // Value.
+ Optional<Value> ConsumeList();
// Calls through ConsumeStringRaw and wraps it in a value.
- std::unique_ptr<Value> ConsumeString();
+ Optional<Value> ConsumeString();
// Assuming that the parser is wound to a double quote, this parses a string,
// decoding any escape sequences and converts UTF-16 to UTF-8. Returns true on
@@ -198,14 +195,14 @@ class BASE_EXPORT JSONParser {
// Assuming that the parser is wound to the start of a valid JSON number,
// this parses and converts it to either an int or double value.
- std::unique_ptr<Value> ConsumeNumber();
+ Optional<Value> ConsumeNumber();
// Helper that reads characters that are ints. Returns true if a number was
// read and false on error.
bool ReadInt(bool allow_leading_zeros);
// Consumes the literal values of |true|, |false|, and |null|, assuming the
// parser is wound to the first character of any of those.
- std::unique_ptr<Value> ConsumeLiteral();
+ Optional<Value> ConsumeLiteral();
// Compares two string buffers of a given length.
static bool StringsAreEqual(const char* left, const char* right, size_t len);
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index 1f854112c82..e2f9e32e0e1 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -10,6 +10,7 @@
#include "base/json/json_reader.h"
#include "base/memory/ptr_util.h"
+#include "base/optional.h"
#include "base/strings/stringprintf.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -61,12 +62,12 @@ TEST_F(JSONParserTest, NextChar) {
TEST_F(JSONParserTest, ConsumeString) {
std::string input("\"test\",|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- std::unique_ptr<Value> value(parser->ConsumeString());
+ Optional<Value> value(parser->ConsumeString());
EXPECT_EQ('"', *parser->pos_);
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
std::string str;
EXPECT_TRUE(value->GetAsString(&str));
EXPECT_EQ("test", str);
@@ -75,12 +76,12 @@ TEST_F(JSONParserTest, ConsumeString) {
TEST_F(JSONParserTest, ConsumeList) {
std::string input("[true, false],|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- std::unique_ptr<Value> value(parser->ConsumeList());
+ Optional<Value> value(parser->ConsumeList());
EXPECT_EQ(']', *parser->pos_);
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
base::ListValue* list;
EXPECT_TRUE(value->GetAsList(&list));
EXPECT_EQ(2u, list->GetSize());
@@ -89,12 +90,12 @@ TEST_F(JSONParserTest, ConsumeList) {
TEST_F(JSONParserTest, ConsumeDictionary) {
std::string input("{\"abc\":\"def\"},|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- std::unique_ptr<Value> value(parser->ConsumeDictionary());
+ Optional<Value> value(parser->ConsumeDictionary());
EXPECT_EQ('}', *parser->pos_);
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
base::DictionaryValue* dict;
EXPECT_TRUE(value->GetAsDictionary(&dict));
std::string str;
@@ -106,12 +107,12 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
// Literal |true|.
std::string input("true,|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- std::unique_ptr<Value> value(parser->ConsumeLiteral());
+ Optional<Value> value(parser->ConsumeLiteral());
EXPECT_EQ('e', *parser->pos_);
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
bool bool_value = false;
EXPECT_TRUE(value->GetAsBoolean(&bool_value));
EXPECT_TRUE(bool_value);
@@ -124,7 +125,7 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
EXPECT_TRUE(value->GetAsBoolean(&bool_value));
EXPECT_FALSE(bool_value);
@@ -136,20 +137,20 @@ TEST_F(JSONParserTest, ConsumeLiterals) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
- EXPECT_TRUE(value->IsType(Value::Type::NONE));
+ ASSERT_TRUE(value);
+ EXPECT_TRUE(value->is_none());
}
TEST_F(JSONParserTest, ConsumeNumbers) {
// Integer.
std::string input("1234,|");
std::unique_ptr<JSONParser> parser(NewTestParser(input));
- std::unique_ptr<Value> value(parser->ConsumeNumber());
+ Optional<Value> value(parser->ConsumeNumber());
EXPECT_EQ('4', *parser->pos_);
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
int number_i;
EXPECT_TRUE(value->GetAsInteger(&number_i));
EXPECT_EQ(1234, number_i);
@@ -162,7 +163,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
EXPECT_TRUE(value->GetAsInteger(&number_i));
EXPECT_EQ(-1234, number_i);
@@ -174,7 +175,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
double number_d;
EXPECT_TRUE(value->GetAsDouble(&number_d));
EXPECT_EQ(12.34, number_d);
@@ -187,7 +188,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
EXPECT_TRUE(value->GetAsDouble(&number_d));
EXPECT_EQ(42000, number_d);
@@ -199,7 +200,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
EXPECT_TRUE(value->GetAsDouble(&number_d));
EXPECT_EQ(3.14159, number_d);
@@ -211,7 +212,7 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
TestLastThree(parser.get());
- ASSERT_TRUE(value.get());
+ ASSERT_TRUE(value);
EXPECT_TRUE(value->GetAsDouble(&number_d));
EXPECT_EQ(420, number_d);
}
@@ -359,8 +360,8 @@ TEST_F(JSONParserTest, ReplaceInvalidCharacters) {
const std::string quoted_bogus_char = "\"" + bogus_char + "\"";
std::unique_ptr<JSONParser> parser(
NewTestParser(quoted_bogus_char, JSON_REPLACE_INVALID_CHARACTERS));
- std::unique_ptr<Value> value(parser->ConsumeString());
- ASSERT_TRUE(value.get());
+ Optional<Value> value(parser->ConsumeString());
+ ASSERT_TRUE(value);
std::string str;
EXPECT_TRUE(value->GetAsString(&str));
EXPECT_EQ(kUnicodeReplacementString, str);
@@ -370,8 +371,8 @@ TEST_F(JSONParserTest, ReplaceInvalidUTF16EscapeSequence) {
const std::string invalid = "\"\\ufffe\"";
std::unique_ptr<JSONParser> parser(
NewTestParser(invalid, JSON_REPLACE_INVALID_CHARACTERS));
- std::unique_ptr<Value> value(parser->ConsumeString());
- ASSERT_TRUE(value.get());
+ Optional<Value> value(parser->ConsumeString());
+ ASSERT_TRUE(value);
std::string str;
EXPECT_TRUE(value->GetAsString(&str));
EXPECT_EQ(kUnicodeReplacementString, str);
diff --git a/chromium/base/json/json_reader.cc b/chromium/base/json/json_reader.cc
index e18f4a55a5a..aa6a737da3f 100644
--- a/chromium/base/json/json_reader.cc
+++ b/chromium/base/json/json_reader.cc
@@ -4,8 +4,12 @@
#include "base/json/json_reader.h"
+#include <utility>
+#include <vector>
+
#include "base/json/json_parser.h"
#include "base/logging.h"
+#include "base/optional.h"
#include "base/values.h"
namespace base {
@@ -44,26 +48,28 @@ JSONReader::~JSONReader() = default;
// static
std::unique_ptr<Value> JSONReader::Read(StringPiece json) {
internal::JSONParser parser(JSON_PARSE_RFC);
- return parser.Parse(json);
+ Optional<Value> root = parser.Parse(json);
+ return root ? std::make_unique<Value>(std::move(*root)) : nullptr;
}
// static
std::unique_ptr<Value> JSONReader::Read(StringPiece json, int options) {
internal::JSONParser parser(options);
- return parser.Parse(json);
+ Optional<Value> root = parser.Parse(json);
+ return root ? std::make_unique<Value>(std::move(*root)) : nullptr;
}
// static
std::unique_ptr<Value> JSONReader::ReadAndReturnError(
- const StringPiece& json,
+ StringPiece json,
int options,
int* error_code_out,
std::string* error_msg_out,
int* error_line_out,
int* error_column_out) {
internal::JSONParser parser(options);
- std::unique_ptr<Value> root(parser.Parse(json));
+ Optional<Value> root = parser.Parse(json);
if (!root) {
if (error_code_out)
*error_code_out = parser.error_code();
@@ -75,7 +81,7 @@ std::unique_ptr<Value> JSONReader::ReadAndReturnError(
*error_column_out = parser.error_column();
}
- return root;
+ return root ? std::make_unique<Value>(std::move(*root)) : nullptr;
}
// static
@@ -106,7 +112,8 @@ std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
}
std::unique_ptr<Value> JSONReader::ReadToValue(StringPiece json) {
- return parser_->Parse(json);
+ Optional<Value> value = parser_->Parse(json);
+ return value ? std::make_unique<Value>(std::move(*value)) : nullptr;
}
JSONReader::JsonParseError JSONReader::error_code() const {
diff --git a/chromium/base/json/json_reader.h b/chromium/base/json/json_reader.h
index e1a9524331b..d3719d81b50 100644
--- a/chromium/base/json/json_reader.h
+++ b/chromium/base/json/json_reader.h
@@ -104,7 +104,7 @@ class BASE_EXPORT JSONReader {
// an error code and a formatted error message (including error location if
// appropriate). Otherwise, they will be unmodified.
static std::unique_ptr<Value> ReadAndReturnError(
- const StringPiece& json,
+ StringPiece json,
int options, // JSONParserOptions
int* error_code_out,
std::string* error_msg_out,
diff --git a/chromium/base/json/json_reader_unittest.cc b/chromium/base/json/json_reader_unittest.cc
index 5c206507a8a..ec23086e1b2 100644
--- a/chromium/base/json/json_reader_unittest.cc
+++ b/chromium/base/json/json_reader_unittest.cc
@@ -26,7 +26,7 @@ TEST(JSONReaderTest, Reading) {
// some whitespace checking
std::unique_ptr<Value> root = JSONReader().ReadToValue(" null ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::NONE));
+ EXPECT_TRUE(root->is_none());
}
{
@@ -38,23 +38,23 @@ TEST(JSONReaderTest, Reading) {
// Simple bool
std::unique_ptr<Value> root = JSONReader().ReadToValue("true ");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
+ EXPECT_TRUE(root->is_bool());
}
{
// Embedded comment
std::unique_ptr<Value> root = JSONReader().ReadToValue("/* comment */null");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::NONE));
+ EXPECT_TRUE(root->is_none());
root = JSONReader().ReadToValue("40 /* comment */");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+ EXPECT_TRUE(root->is_int());
root = JSONReader().ReadToValue("true // comment");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::BOOLEAN));
+ EXPECT_TRUE(root->is_bool());
root = JSONReader().ReadToValue("/* comment */\"sample string\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string value;
EXPECT_TRUE(root->GetAsString(&value));
EXPECT_EQ("sample string", value);
@@ -72,7 +72,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(3u, list->GetSize());
root = JSONReader().ReadToValue("/* comment **/42");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+ EXPECT_TRUE(root->is_int());
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(42, int_val);
root = JSONReader().ReadToValue(
@@ -80,7 +80,7 @@ TEST(JSONReaderTest, Reading) {
"// */ 43\n"
"44");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+ EXPECT_TRUE(root->is_int());
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(44, int_val);
}
@@ -89,7 +89,7 @@ TEST(JSONReaderTest, Reading) {
// Test number formats
std::unique_ptr<Value> root = JSONReader().ReadToValue("43");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+ EXPECT_TRUE(root->is_int());
int int_val = 0;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(43, int_val);
@@ -107,7 +107,7 @@ TEST(JSONReaderTest, Reading) {
// clause).
std::unique_ptr<Value> root = JSONReader().ReadToValue("0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::INTEGER));
+ EXPECT_TRUE(root->is_int());
int int_val = 1;
EXPECT_TRUE(root->GetAsInteger(&int_val));
EXPECT_EQ(0, int_val);
@@ -119,13 +119,13 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root = JSONReader().ReadToValue("2147483648");
ASSERT_TRUE(root);
double double_val;
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2147483648.0, double_val);
root = JSONReader().ReadToValue("-2147483649");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(-2147483649.0, double_val);
@@ -135,42 +135,42 @@ TEST(JSONReaderTest, Reading) {
// Parse a double
std::unique_ptr<Value> root = JSONReader().ReadToValue("43.1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(43.1, double_val);
root = JSONReader().ReadToValue("4.3e-1");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(.43, double_val);
root = JSONReader().ReadToValue("2.1e0");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(2.1, double_val);
root = JSONReader().ReadToValue("2.1e+0001");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(21.0, double_val);
root = JSONReader().ReadToValue("0.01");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(0.01, double_val);
root = JSONReader().ReadToValue("1.00");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(root->is_double());
double_val = 0.0;
EXPECT_TRUE(root->GetAsDouble(&double_val));
EXPECT_DOUBLE_EQ(1.0, double_val);
@@ -210,7 +210,7 @@ TEST(JSONReaderTest, Reading) {
// Test string parser
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"hello world\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("hello world", str_val);
@@ -220,7 +220,7 @@ TEST(JSONReaderTest, Reading) {
// Empty string
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("", str_val);
@@ -231,7 +231,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\" \\\"\\\\\\/\\b\\f\\n\\r\\t\\v\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(" \"\\/\b\f\n\r\t\v", str_val);
@@ -242,7 +242,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\\x41\\x00\\u1234\\u0000\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(std::wstring(L"A\0\x1234\0", 4), UTF8ToWide(str_val));
@@ -316,7 +316,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_EQ(1U, list->GetSize());
Value* tmp_value = nullptr;
ASSERT_TRUE(list->Get(0, &tmp_value));
- EXPECT_TRUE(tmp_value->IsType(Value::Type::BOOLEAN));
+ EXPECT_TRUE(tmp_value->is_bool());
bool bool_value = false;
EXPECT_TRUE(tmp_value->GetAsBoolean(&bool_value));
EXPECT_TRUE(bool_value);
@@ -345,7 +345,7 @@ TEST(JSONReaderTest, Reading) {
EXPECT_DOUBLE_EQ(9.87654321, double_val);
Value* null_val = nullptr;
ASSERT_TRUE(dict_val->Get("null", &null_val));
- EXPECT_TRUE(null_val->IsType(Value::Type::NONE));
+ EXPECT_TRUE(null_val->is_none());
std::string str_val;
EXPECT_TRUE(dict_val->GetString("S", &str_val));
EXPECT_EQ("str", str_val);
@@ -483,7 +483,7 @@ TEST(JSONReaderTest, Reading) {
std::unique_ptr<Value> root =
JSONReader().ReadToValue("\"\xe7\xbd\x91\xe9\xa1\xb5\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(L"\x7f51\x9875", UTF8ToWide(str_val));
@@ -507,7 +507,7 @@ TEST(JSONReaderTest, Reading) {
// Test utf16 encoded strings.
std::unique_ptr<Value> root = JSONReader().ReadToValue("\"\\u20ac3,14\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
std::string str_val;
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ(
@@ -517,7 +517,7 @@ TEST(JSONReaderTest, Reading) {
root = JSONReader().ReadToValue("\"\\ud83d\\udca9\\ud83d\\udc6c\"");
ASSERT_TRUE(root);
- EXPECT_TRUE(root->IsType(Value::Type::STRING));
+ EXPECT_TRUE(root->is_string());
str_val.clear();
EXPECT_TRUE(root->GetAsString(&str_val));
EXPECT_EQ("\xf0\x9f\x92\xa9\xf0\x9f\x91\xac", str_val);
@@ -545,7 +545,7 @@ TEST(JSONReaderTest, Reading) {
{
// Test literal root objects.
std::unique_ptr<Value> root = JSONReader::Read("null");
- EXPECT_TRUE(root->IsType(Value::Type::NONE));
+ EXPECT_TRUE(root->is_none());
root = JSONReader::Read("true");
ASSERT_TRUE(root);
@@ -579,7 +579,7 @@ TEST(JSONReaderTest, ReadFromFile) {
JSONReader reader;
std::unique_ptr<Value> root(reader.ReadToValue(input));
ASSERT_TRUE(root) << reader.GetErrorMessage();
- EXPECT_TRUE(root->IsType(Value::Type::DICTIONARY));
+ EXPECT_TRUE(root->is_dict());
}
// Tests that the root of a JSON object can be deleted safely while its
diff --git a/chromium/base/json/json_value_converter.h b/chromium/base/json/json_value_converter.h
index 966df59ec50..ef0811501b5 100644
--- a/chromium/base/json/json_value_converter.h
+++ b/chromium/base/json/json_value_converter.h
@@ -72,7 +72,7 @@
// Sometimes JSON format uses string representations for other types such
// like enum, timestamp, or URL. You can use RegisterCustomField method
// and specify a function to convert a StringPiece to your type.
-// bool ConvertFunc(const StringPiece& s, YourEnum* result) {
+// bool ConvertFunc(StringPiece s, YourEnum* result) {
// // do something and return true if succeed...
// }
// struct Message {
@@ -96,7 +96,7 @@ template<typename StructType>
class FieldConverterBase {
public:
explicit FieldConverterBase(const std::string& path) : field_path_(path) {}
- virtual ~FieldConverterBase() {}
+ virtual ~FieldConverterBase() = default;
virtual bool ConvertField(const base::Value& value, StructType* obj)
const = 0;
const std::string& field_path() const { return field_path_; }
@@ -109,7 +109,7 @@ class FieldConverterBase {
template <typename FieldType>
class ValueConverter {
public:
- virtual ~ValueConverter() {}
+ virtual ~ValueConverter() = default;
virtual bool Convert(const base::Value& value, FieldType* field) const = 0;
};
@@ -140,7 +140,7 @@ class BasicValueConverter;
template <>
class BASE_EXPORT BasicValueConverter<int> : public ValueConverter<int> {
public:
- BasicValueConverter() {}
+ BasicValueConverter() = default;
bool Convert(const base::Value& value, int* field) const override;
@@ -152,7 +152,7 @@ template <>
class BASE_EXPORT BasicValueConverter<std::string>
: public ValueConverter<std::string> {
public:
- BasicValueConverter() {}
+ BasicValueConverter() = default;
bool Convert(const base::Value& value, std::string* field) const override;
@@ -164,7 +164,7 @@ template <>
class BASE_EXPORT BasicValueConverter<string16>
: public ValueConverter<string16> {
public:
- BasicValueConverter() {}
+ BasicValueConverter() = default;
bool Convert(const base::Value& value, string16* field) const override;
@@ -175,7 +175,7 @@ class BASE_EXPORT BasicValueConverter<string16>
template <>
class BASE_EXPORT BasicValueConverter<double> : public ValueConverter<double> {
public:
- BasicValueConverter() {}
+ BasicValueConverter() = default;
bool Convert(const base::Value& value, double* field) const override;
@@ -186,7 +186,7 @@ class BASE_EXPORT BasicValueConverter<double> : public ValueConverter<double> {
template <>
class BASE_EXPORT BasicValueConverter<bool> : public ValueConverter<bool> {
public:
- BasicValueConverter() {}
+ BasicValueConverter() = default;
bool Convert(const base::Value& value, bool* field) const override;
@@ -215,7 +215,7 @@ class ValueFieldConverter : public ValueConverter<FieldType> {
template <typename FieldType>
class CustomFieldConverter : public ValueConverter<FieldType> {
public:
- typedef bool(*ConvertFunc)(const StringPiece& value, FieldType* field);
+ typedef bool (*ConvertFunc)(StringPiece value, FieldType* field);
explicit CustomFieldConverter(ConvertFunc convert_func)
: convert_func_(convert_func) {}
@@ -235,7 +235,7 @@ class CustomFieldConverter : public ValueConverter<FieldType> {
template <typename NestedType>
class NestedValueConverter : public ValueConverter<NestedType> {
public:
- NestedValueConverter() {}
+ NestedValueConverter() = default;
bool Convert(const base::Value& value, NestedType* field) const override {
return converter_.Convert(value, field);
@@ -250,7 +250,7 @@ template <typename Element>
class RepeatedValueConverter
: public ValueConverter<std::vector<std::unique_ptr<Element>>> {
public:
- RepeatedValueConverter() {}
+ RepeatedValueConverter() = default;
bool Convert(const base::Value& value,
std::vector<std::unique_ptr<Element>>* field) const override {
@@ -286,7 +286,7 @@ template <typename NestedType>
class RepeatedMessageConverter
: public ValueConverter<std::vector<std::unique_ptr<NestedType>>> {
public:
- RepeatedMessageConverter() {}
+ RepeatedMessageConverter() = default;
bool Convert(const base::Value& value,
std::vector<std::unique_ptr<NestedType>>* field) const override {
@@ -407,10 +407,9 @@ class JSONValueConverter {
}
template <typename FieldType>
- void RegisterCustomField(
- const std::string& field_name,
- FieldType StructType::* field,
- bool (*convert_func)(const StringPiece&, FieldType*)) {
+ void RegisterCustomField(const std::string& field_name,
+ FieldType StructType::*field,
+ bool (*convert_func)(StringPiece, FieldType*)) {
fields_.push_back(
std::make_unique<internal::FieldConverter<StructType, FieldType>>(
field_name, field,
diff --git a/chromium/base/json/json_value_converter_unittest.cc b/chromium/base/json/json_value_converter_unittest.cc
index 805b05b5406..322f5f0a35b 100644
--- a/chromium/base/json/json_value_converter_unittest.cc
+++ b/chromium/base/json/json_value_converter_unittest.cc
@@ -30,7 +30,7 @@ struct SimpleMessage {
std::vector<std::unique_ptr<std::string>> string_values;
SimpleMessage() : foo(0), baz(false), bstruct(false), simple_enum(FOO) {}
- static bool ParseSimpleEnum(const StringPiece& value, SimpleEnum* field) {
+ static bool ParseSimpleEnum(StringPiece value, SimpleEnum* field) {
if (value == "foo") {
*field = FOO;
return true;
diff --git a/chromium/base/json/json_value_serializer_unittest.cc b/chromium/base/json/json_value_serializer_unittest.cc
index e835700a1a5..d25f95047d2 100644
--- a/chromium/base/json/json_value_serializer_unittest.cc
+++ b/chromium/base/json/json_value_serializer_unittest.cc
@@ -224,7 +224,7 @@ TEST(JSONValueSerializerTest, Roundtrip) {
Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
+ ASSERT_TRUE(null_value->is_none());
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
@@ -417,7 +417,7 @@ TEST_F(JSONFileValueSerializerTest, Roundtrip) {
Value* null_value = nullptr;
ASSERT_TRUE(root_dict->Get("null", &null_value));
ASSERT_TRUE(null_value);
- ASSERT_TRUE(null_value->IsType(Value::Type::NONE));
+ ASSERT_TRUE(null_value->is_none());
bool bool_value = false;
ASSERT_TRUE(root_dict->GetBoolean("bool", &bool_value));
diff --git a/chromium/base/json/string_escape.cc b/chromium/base/json/string_escape.cc
index 9a26ff76c4f..471a9d30cfa 100644
--- a/chromium/base/json/string_escape.cc
+++ b/chromium/base/json/string_escape.cc
@@ -116,33 +116,31 @@ bool EscapeJSONStringImpl(const S& str, bool put_in_quotes, std::string* dest) {
} // namespace
-bool EscapeJSONString(const StringPiece& str,
- bool put_in_quotes,
- std::string* dest) {
+bool EscapeJSONString(StringPiece str, bool put_in_quotes, std::string* dest) {
return EscapeJSONStringImpl(str, put_in_quotes, dest);
}
-bool EscapeJSONString(const StringPiece16& str,
+bool EscapeJSONString(StringPiece16 str,
bool put_in_quotes,
std::string* dest) {
return EscapeJSONStringImpl(str, put_in_quotes, dest);
}
-std::string GetQuotedJSONString(const StringPiece& str) {
+std::string GetQuotedJSONString(StringPiece str) {
std::string dest;
bool ok = EscapeJSONStringImpl(str, true, &dest);
DCHECK(ok);
return dest;
}
-std::string GetQuotedJSONString(const StringPiece16& str) {
+std::string GetQuotedJSONString(StringPiece16 str) {
std::string dest;
bool ok = EscapeJSONStringImpl(str, true, &dest);
DCHECK(ok);
return dest;
}
-std::string EscapeBytesAsInvalidJSONString(const StringPiece& str,
+std::string EscapeBytesAsInvalidJSONString(StringPiece str,
bool put_in_quotes) {
std::string dest;
diff --git a/chromium/base/json/string_escape.h b/chromium/base/json/string_escape.h
index c6a5b33e839..f75f475afc6 100644
--- a/chromium/base/json/string_escape.h
+++ b/chromium/base/json/string_escape.h
@@ -26,21 +26,21 @@ namespace base {
//
// If |put_in_quotes| is true, then a leading and trailing double-quote mark
// will be appended to |dest| as well.
-BASE_EXPORT bool EscapeJSONString(const StringPiece& str,
+BASE_EXPORT bool EscapeJSONString(StringPiece str,
bool put_in_quotes,
std::string* dest);
// Performs a similar function to the UTF-8 StringPiece version above,
// converting UTF-16 code units to UTF-8 code units and escaping non-printing
// control characters. On return, |dest| will contain a valid UTF-8 JSON string.
-BASE_EXPORT bool EscapeJSONString(const StringPiece16& str,
+BASE_EXPORT bool EscapeJSONString(StringPiece16 str,
bool put_in_quotes,
std::string* dest);
// Helper functions that wrap the above two functions but return the value
// instead of appending. |put_in_quotes| is always true.
-BASE_EXPORT std::string GetQuotedJSONString(const StringPiece& str);
-BASE_EXPORT std::string GetQuotedJSONString(const StringPiece16& str);
+BASE_EXPORT std::string GetQuotedJSONString(StringPiece str);
+BASE_EXPORT std::string GetQuotedJSONString(StringPiece16 str);
// Given an arbitrary byte string |str|, this will escape all non-ASCII bytes
// as \uXXXX escape sequences. This function is *NOT* meant to be used with
@@ -53,7 +53,7 @@ BASE_EXPORT std::string GetQuotedJSONString(const StringPiece16& str);
//
// The output of this function takes the *appearance* of JSON but is not in
// fact valid according to RFC 4627.
-BASE_EXPORT std::string EscapeBytesAsInvalidJSONString(const StringPiece& str,
+BASE_EXPORT std::string EscapeBytesAsInvalidJSONString(StringPiece str,
bool put_in_quotes);
} // namespace base
diff --git a/chromium/base/lazy_instance.h b/chromium/base/lazy_instance.h
index 7821ebc57fa..2d150a2be77 100644
--- a/chromium/base/lazy_instance.h
+++ b/chromium/base/lazy_instance.h
@@ -1,7 +1,17 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// Please don't introduce new instances of LazyInstance<T>. Use a function-local
+// static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+//
// The LazyInstance<Type, Traits> class manages a single instance of Type,
// which will be lazily created on the first time it's accessed. This class is
// useful for places you would normally use a function-level static, but you
@@ -38,8 +48,8 @@
#include <new> // For placement new.
#include "base/atomicops.h"
-#include "base/base_export.h"
#include "base/debug/leak_annotations.h"
+#include "base/lazy_instance_helpers.h"
#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
@@ -113,52 +123,6 @@ struct LeakyLazyInstanceTraits {
template <typename Type>
struct ErrorMustSelectLazyOrDestructorAtExitForLazyInstance {};
-// Our AtomicWord doubles as a spinlock, where a value of
-// kLazyInstanceStateCreating means the spinlock is being held for creation.
-constexpr subtle::AtomicWord kLazyInstanceStateCreating = 1;
-
-// Check if instance needs to be created. If so return true otherwise
-// if another thread has beat us, wait for instance to be created and
-// return false.
-BASE_EXPORT bool NeedsLazyInstance(subtle::AtomicWord* state);
-
-// After creating an instance, call this to register the dtor to be called
-// at program exit and to update the atomic state to hold the |new_instance|
-BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
- subtle::AtomicWord new_instance,
- void (*destructor)(void*),
- void* destructor_arg);
-
-// If |state| is uninitialized, constructs a value using |creator_func|, stores
-// it into |state| and registers |destructor| to be called with |destructor_arg|
-// as argument when the current AtExitManager goes out of scope. Then, returns
-// the value stored in |state|. It is safe to have concurrent calls to this
-// function with the same |state|.
-template <typename CreatorFunc>
-void* GetOrCreateLazyPointer(subtle::AtomicWord* state,
- const CreatorFunc& creator_func,
- void (*destructor)(void*),
- void* destructor_arg) {
- // If any bit in the created mask is true, the instance has already been
- // fully constructed.
- constexpr subtle::AtomicWord kLazyInstanceCreatedMask =
- ~internal::kLazyInstanceStateCreating;
-
- // We will hopefully have fast access when the instance is already created.
- // Since a thread sees |state| == 0 or kLazyInstanceStateCreating at most
- // once, the load is taken out of NeedsLazyInstance() as a fast-path. The load
- // has acquire memory ordering as a thread which sees |state| > creating needs
- // to acquire visibility over the associated data. Pairing Release_Store is in
- // CompleteLazyInstance().
- subtle::AtomicWord value = subtle::Acquire_Load(state);
- if (!(value & kLazyInstanceCreatedMask) && NeedsLazyInstance(state)) {
- // Create the instance in the space provided by |private_buf_|.
- value = reinterpret_cast<subtle::AtomicWord>(creator_func());
- CompleteLazyInstance(state, value, destructor, destructor_arg);
- }
- return reinterpret_cast<void*>(subtle::NoBarrier_Load(state));
-}
-
} // namespace internal
template <
@@ -186,25 +150,23 @@ class LazyInstance {
Type* Pointer() {
#if DCHECK_IS_ON()
- // Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
#endif
+
return static_cast<Type*>(internal::GetOrCreateLazyPointer(
- &private_instance_,
- [this]() { return Traits::New(private_buf_); },
+ &private_instance_, [this]() { return Traits::New(private_buf_); },
Traits::kRegisterOnExit ? OnExit : nullptr, this));
}
- bool operator==(Type* p) {
- switch (subtle::NoBarrier_Load(&private_instance_)) {
- case 0:
- return p == NULL;
- case internal::kLazyInstanceStateCreating:
- return static_cast<void*>(p) == private_buf_;
- default:
- return p == instance();
- }
+ // Returns true if the lazy instance has been created. Unlike Get() and
+ // Pointer(), calling IsCreated() will not instantiate the object of Type.
+ bool IsCreated() {
+ // Return true (i.e. "created") if |private_instance_| is either being
+ // created right now (i.e. |private_instance_| has value of
+ // internal::kLazyInstanceStateCreating) or was already created (i.e.
+ // |private_instance_| has any other non-zero value).
+ return 0 != subtle::NoBarrier_Load(&private_instance_);
}
// MSVC gives a warning that the alignment expands the size of the
diff --git a/chromium/base/lazy_instance.cc b/chromium/base/lazy_instance_helpers.cc
index de143ca5c17..38ea6f1cef3 100644
--- a/chromium/base/lazy_instance.cc
+++ b/chromium/base/lazy_instance_helpers.cc
@@ -1,8 +1,8 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "base/lazy_instance.h"
+#include "base/lazy_instance_helpers.h"
#include "base/at_exit.h"
#include "base/atomicops.h"
@@ -11,18 +11,17 @@
namespace base {
namespace internal {
-// TODO(joth): This function could be shared with Singleton, in place of its
-// WaitForInstance() call.
bool NeedsLazyInstance(subtle::AtomicWord* state) {
// Try to create the instance, if we're the first, will go from 0 to
// kLazyInstanceStateCreating, otherwise we've already been beaten here.
// The memory access has no memory ordering as state 0 and
// kLazyInstanceStateCreating have no associated data (memory barriers are
// all about ordering of memory accesses to *associated* data).
- if (subtle::NoBarrier_CompareAndSwap(state, 0,
- kLazyInstanceStateCreating) == 0)
+ if (subtle::NoBarrier_CompareAndSwap(state, 0, kLazyInstanceStateCreating) ==
+ 0) {
// Caller must create instance
return true;
+ }
// It's either in the process of being created, or already created. Spin.
// The load has acquire memory ordering as a thread which sees
@@ -51,13 +50,13 @@ void CompleteLazyInstance(subtle::AtomicWord* state,
subtle::AtomicWord new_instance,
void (*destructor)(void*),
void* destructor_arg) {
- // Instance is created, go from CREATING to CREATED.
- // Releases visibility over private_buf_ to readers. Pairing Acquire_Load's
- // are in NeedsInstance() and Pointer().
+ // Instance is created, go from CREATING to CREATED (or reset it if
+ // |new_instance| is null). Releases visibility over |private_buf_| to
+ // readers. Pairing Acquire_Load is in NeedsLazyInstance().
subtle::Release_Store(state, new_instance);
// Make sure that the lazily instantiated object will get destroyed at exit.
- if (destructor)
+ if (new_instance && destructor)
AtExitManager::RegisterCallback(destructor, destructor_arg);
}
diff --git a/chromium/base/lazy_instance_helpers.h b/chromium/base/lazy_instance_helpers.h
new file mode 100644
index 00000000000..03266979a0a
--- /dev/null
+++ b/chromium/base/lazy_instance_helpers.h
@@ -0,0 +1,77 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_LAZY_INSTANCE_INTERNAL_H_
+#define BASE_LAZY_INSTANCE_INTERNAL_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/logging.h"
+
+// Helper methods used by LazyInstance and a few other base APIs for thread-safe
+// lazy construction.
+
+namespace base {
+namespace internal {
+
+// Our AtomicWord doubles as a spinlock, where a value of
+// kLazyInstanceStateCreating means the spinlock is being held for creation.
+constexpr subtle::AtomicWord kLazyInstanceStateCreating = 1;
+
+// Helper for GetOrCreateLazyPointer(). Checks if instance needs to be created.
+// If so returns true otherwise if another thread has beat us, waits for
+// instance to be created and returns false.
+BASE_EXPORT bool NeedsLazyInstance(subtle::AtomicWord* state);
+
+// Helper for GetOrCreateLazyPointer(). After creating an instance, this is
+// called to register the dtor to be called at program exit and to update the
+// atomic state to hold the |new_instance|
+BASE_EXPORT void CompleteLazyInstance(subtle::AtomicWord* state,
+ subtle::AtomicWord new_instance,
+ void (*destructor)(void*),
+ void* destructor_arg);
+
+// If |state| is uninitialized (zero), constructs a value using |creator_func|,
+// stores it into |state| and registers |destructor| to be called with
+// |destructor_arg| as argument when the current AtExitManager goes out of
+// scope. Then, returns the value stored in |state|. It is safe to have
+// concurrent calls to this function with the same |state|. |creator_func| may
+// return nullptr if it doesn't want to create an instance anymore (e.g. on
+// shutdown), it is from then on required to return nullptr to all callers (ref.
+// StaticMemorySingletonTraits). Callers need to synchronize before
+// |creator_func| may return a non-null instance again (ref.
+// StaticMemorySingletonTraits::ResurectForTesting()).
+template <typename CreatorFunc>
+void* GetOrCreateLazyPointer(subtle::AtomicWord* state,
+ const CreatorFunc& creator_func,
+ void (*destructor)(void*),
+ void* destructor_arg) {
+ DCHECK(state);
+
+ // If any bit in the created mask is true, the instance has already been
+ // fully constructed.
+ constexpr subtle::AtomicWord kLazyInstanceCreatedMask =
+ ~internal::kLazyInstanceStateCreating;
+
+ // We will hopefully have fast access when the instance is already created.
+ // Since a thread sees |state| == 0 or kLazyInstanceStateCreating at most
+ // once, the load is taken out of NeedsLazyInstance() as a fast-path. The load
+ // has acquire memory ordering as a thread which sees |state| > creating needs
+ // to acquire visibility over the associated data. Pairing Release_Store is in
+ // CompleteLazyInstance().
+ if (!(subtle::Acquire_Load(state) & kLazyInstanceCreatedMask) &&
+ NeedsLazyInstance(state)) {
+ // This thread won the race and is now responsible for creating the instance
+ // and storing it back into |state|.
+ subtle::AtomicWord instance =
+ reinterpret_cast<subtle::AtomicWord>(creator_func());
+ CompleteLazyInstance(state, instance, destructor, destructor_arg);
+ }
+ return reinterpret_cast<void*>(subtle::NoBarrier_Load(state));
+}
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_LAZY_INSTANCE_INTERNAL_H_
diff --git a/chromium/base/lazy_instance_unittest.cc b/chromium/base/lazy_instance_unittest.cc
index cfa48f2308c..a5f024cf5cf 100644
--- a/chromium/base/lazy_instance_unittest.cc
+++ b/chromium/base/lazy_instance_unittest.cc
@@ -4,10 +4,19 @@
#include <stddef.h>
+#include <memory>
+#include <vector>
+
#include "base/at_exit.h"
#include "base/atomic_sequence_num.h"
+#include "base/atomicops.h"
+#include "base/barrier_closure.h"
+#include "base/bind.h"
#include "base/lazy_instance.h"
+#include "base/sys_info.h"
+#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
+#include "base/time/time.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -24,6 +33,9 @@ class ConstructAndDestructLogger {
~ConstructAndDestructLogger() {
destructed_seq_.GetNext();
}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConstructAndDestructLogger);
};
class SlowConstructor {
@@ -39,8 +51,11 @@ class SlowConstructor {
static int constructed;
private:
int some_int_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowConstructor);
};
+// static
int SlowConstructor::constructed = 0;
class SlowDelegate : public base::DelegateSimpleThread::Delegate {
@@ -56,33 +71,39 @@ class SlowDelegate : public base::DelegateSimpleThread::Delegate {
private:
base::LazyInstance<SlowConstructor>::DestructorAtExit* lazy_;
+
+ DISALLOW_COPY_AND_ASSIGN(SlowDelegate);
};
} // namespace
-static base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit
- lazy_logger = LAZY_INSTANCE_INITIALIZER;
+base::LazyInstance<ConstructAndDestructLogger>::DestructorAtExit lazy_logger =
+ LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, Basic) {
{
base::ShadowingAtExitManager shadow;
+ EXPECT_FALSE(lazy_logger.IsCreated());
EXPECT_EQ(0, constructed_seq_.GetNext());
EXPECT_EQ(0, destructed_seq_.GetNext());
lazy_logger.Get();
+ EXPECT_TRUE(lazy_logger.IsCreated());
EXPECT_EQ(2, constructed_seq_.GetNext());
EXPECT_EQ(1, destructed_seq_.GetNext());
lazy_logger.Pointer();
+ EXPECT_TRUE(lazy_logger.IsCreated());
EXPECT_EQ(3, constructed_seq_.GetNext());
EXPECT_EQ(2, destructed_seq_.GetNext());
}
+ EXPECT_FALSE(lazy_logger.IsCreated());
EXPECT_EQ(4, constructed_seq_.GetNext());
EXPECT_EQ(4, destructed_seq_.GetNext());
}
-static base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
+base::LazyInstance<SlowConstructor>::DestructorAtExit lazy_slow =
LAZY_INSTANCE_INITIALIZER;
TEST(LazyInstanceTest, ConstructorThreadSafety) {
@@ -177,3 +198,125 @@ TEST(LazyInstanceTest, Alignment) {
EXPECT_ALIGNED(align32.Pointer(), 32);
EXPECT_ALIGNED(align4096.Pointer(), 4096);
}
+
+namespace {
+
+// A class whose constructor busy-loops until it is told to complete
+// construction.
+class BlockingConstructor {
+ public:
+ BlockingConstructor() {
+ EXPECT_FALSE(WasConstructorCalled());
+ base::subtle::NoBarrier_Store(&constructor_called_, 1);
+ EXPECT_TRUE(WasConstructorCalled());
+ while (!base::subtle::NoBarrier_Load(&complete_construction_))
+ base::PlatformThread::YieldCurrentThread();
+ done_construction_ = true;
+ }
+
+ ~BlockingConstructor() {
+ // Restore static state for the next test.
+ base::subtle::NoBarrier_Store(&constructor_called_, 0);
+ base::subtle::NoBarrier_Store(&complete_construction_, 0);
+ }
+
+ // Returns true if BlockingConstructor() was entered.
+ static bool WasConstructorCalled() {
+ return base::subtle::NoBarrier_Load(&constructor_called_);
+ }
+
+ // Instructs BlockingConstructor() that it may now unblock its construction.
+ static void CompleteConstructionNow() {
+ base::subtle::NoBarrier_Store(&complete_construction_, 1);
+ }
+
+ bool done_construction() { return done_construction_; }
+
+ private:
+ // Use Atomic32 instead of AtomicFlag for them to be trivially initialized.
+ static base::subtle::Atomic32 constructor_called_;
+ static base::subtle::Atomic32 complete_construction_;
+
+ bool done_construction_ = false;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockingConstructor);
+};
+
+// A SimpleThread running at |thread_priority| which invokes |before_get|
+// (optional) and then invokes Get() on the LazyInstance it's assigned.
+class BlockingConstructorThread : public base::SimpleThread {
+ public:
+ BlockingConstructorThread(
+ base::ThreadPriority thread_priority,
+ base::LazyInstance<BlockingConstructor>::DestructorAtExit* lazy,
+ base::OnceClosure before_get)
+ : SimpleThread("BlockingConstructorThread", Options(thread_priority)),
+ lazy_(lazy),
+ before_get_(std::move(before_get)) {}
+
+ void Run() override {
+ if (before_get_)
+ std::move(before_get_).Run();
+ EXPECT_TRUE(lazy_->Get().done_construction());
+ }
+
+ private:
+ base::LazyInstance<BlockingConstructor>::DestructorAtExit* lazy_;
+ base::OnceClosure before_get_;
+
+ DISALLOW_COPY_AND_ASSIGN(BlockingConstructorThread);
+};
+
+// static
+base::subtle::Atomic32 BlockingConstructor::constructor_called_ = 0;
+// static
+base::subtle::Atomic32 BlockingConstructor::complete_construction_ = 0;
+
+base::LazyInstance<BlockingConstructor>::DestructorAtExit lazy_blocking =
+ LAZY_INSTANCE_INITIALIZER;
+
+} // namespace
+
+// Tests that if the thread assigned to construct the LazyInstance runs at
+// background priority : the foreground threads will yield to it enough for it
+// to eventually complete construction.
+// This is a regression test for https://crbug.com/797129.
+TEST(LazyInstanceTest, PriorityInversionAtInitializationResolves) {
+ base::TimeTicks test_begin = base::TimeTicks::Now();
+
+ // Construct BlockingConstructor from a background thread.
+ BlockingConstructorThread background_getter(
+ base::ThreadPriority::BACKGROUND, &lazy_blocking, base::OnceClosure());
+ background_getter.Start();
+
+ while (!BlockingConstructor::WasConstructorCalled())
+ base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(1));
+
+ // Spin 4 foreground thread per core contending to get the already under
+ // construction LazyInstance. When they are all running and poking at it :
+ // allow the background thread to complete its work.
+ const int kNumForegroundThreads = 4 * base::SysInfo::NumberOfProcessors();
+ std::vector<std::unique_ptr<base::SimpleThread>> foreground_threads;
+ base::RepeatingClosure foreground_thread_ready_callback =
+ base::BarrierClosure(
+ kNumForegroundThreads,
+ base::BindOnce(&BlockingConstructor::CompleteConstructionNow));
+ for (int i = 0; i < kNumForegroundThreads; ++i) {
+ foreground_threads.push_back(std::make_unique<BlockingConstructorThread>(
+ base::ThreadPriority::NORMAL, &lazy_blocking,
+ foreground_thread_ready_callback));
+ foreground_threads.back()->Start();
+ }
+
+ // This test will hang if the foreground threads become stuck in
+ // LazyInstance::Get() per the background thread never being scheduled to
+ // complete construction.
+ for (auto& foreground_thread : foreground_threads)
+ foreground_thread->Join();
+ background_getter.Join();
+
+ // Fail if this test takes more than 5 seconds (it takes 5-10 seconds on a
+ // Z840 without r527445 but is expected to be fast (~30ms) with the fix).
+ EXPECT_LT(base::TimeTicks::Now() - test_begin,
+ base::TimeDelta::FromSeconds(5));
+}
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index eba1b2c51ad..7db189f846e 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -811,11 +811,9 @@ LogMessage::~LogMessage() {
// Ensure the first characters of the string are on the stack so they
// are contained in minidumps for diagnostic purposes.
- char str_stack[1024];
- str_newline.copy(str_stack, arraysize(str_stack));
- base::debug::Alias(str_stack);
+ DEBUG_ALIAS_FOR_CSTR(str_stack, str_newline.c_str(), 1024);
- if (!(log_assert_handler_stack == nullptr) &&
+ if (log_assert_handler_stack.IsCreated() &&
!log_assert_handler_stack.Get().empty()) {
LogAssertHandlerFunction log_assert_handler =
log_assert_handler_stack.Get().top();
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index 201de8d1c43..d020719a6aa 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -1021,7 +1021,7 @@ class BASE_EXPORT LogMessage {
// is not used" and "statement has no effect".
class LogMessageVoidify {
public:
- LogMessageVoidify() { }
+ LogMessageVoidify() = default;
// This has to be an operator with a precedence lower than << but
// higher than ?:
void operator&(std::ostream&) { }
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 0e32b68519d..de87c6ca165 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -204,7 +204,13 @@ TEST_F(LoggingTest, LoggingIsLazyByDestination) {
// Official builds have CHECKs directly call BreakDebugger.
#if !defined(OFFICIAL_BUILD)
-TEST_F(LoggingTest, CheckStreamsAreLazy) {
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_CheckStreamsAreLazy DISABLED_CheckStreamsAreLazy
+#else
+#define MAYBE_CheckStreamsAreLazy CheckStreamsAreLazy
+#endif
+TEST_F(LoggingTest, MAYBE_CheckStreamsAreLazy) {
MockLogSource mock_log_source, uncalled_mock_log_source;
EXPECT_CALL(mock_log_source, Log()).Times(8).
WillRepeatedly(Return("check message"));
@@ -220,7 +226,7 @@ TEST_F(LoggingTest, CheckStreamsAreLazy) {
<< mock_log_source.Log();
}
-#endif // !defined(OFFICIAL_BUILD)
+#endif
#if defined(OFFICIAL_BUILD) && defined(OS_WIN)
NOINLINE void CheckContainingFunc(int death_location) {
@@ -420,7 +426,13 @@ class ScopedDcheckSeverity {
};
#endif // DCHECK_IS_ON() && defined(SYZYASAN)
-TEST_F(LoggingTest, Dcheck) {
+// https://crbug.com/709067 tracks test flakiness on iOS.
+#if defined(OS_IOS)
+#define MAYBE_Dcheck DISABLED_Dcheck
+#else
+#define MAYBE_Dcheck Dcheck
+#endif
+TEST_F(LoggingTest, MAYBE_Dcheck) {
#if DCHECK_IS_ON() && defined(SYZYASAN)
// When DCHECKs are enabled in SyzyASAN builds, LOG_DCHECK is mutable but
// defaults to non-fatal. Set it to LOG_FATAL to get the expected behavior
diff --git a/chromium/base/mac/scoped_dispatch_object.h b/chromium/base/mac/scoped_dispatch_object.h
index 5f5d517d9b0..cd2daf23342 100644
--- a/chromium/base/mac/scoped_dispatch_object.h
+++ b/chromium/base/mac/scoped_dispatch_object.h
@@ -15,7 +15,7 @@ namespace internal {
template <typename T>
struct ScopedDispatchObjectTraits {
- static T InvalidValue() { return nullptr; }
+ static constexpr T InvalidValue() { return nullptr; }
static T Retain(T object) {
dispatch_retain(object);
return object;
diff --git a/chromium/base/mac/scoped_nsobject.h b/chromium/base/mac/scoped_nsobject.h
index ecd8e78f9d3..d970d03e8bc 100644
--- a/chromium/base/mac/scoped_nsobject.h
+++ b/chromium/base/mac/scoped_nsobject.h
@@ -85,12 +85,12 @@ class scoped_nsprotocol
using Traits = internal::ScopedNSProtocolTraits<NST>;
#if !defined(__has_feature) || !__has_feature(objc_arc)
- explicit scoped_nsprotocol(
+ explicit constexpr scoped_nsprotocol(
NST object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: ScopedTypeRef<NST, Traits>(object, policy) {}
#else
- explicit scoped_nsprotocol(NST object = Traits::InvalidValue())
+ explicit constexpr scoped_nsprotocol(NST object = Traits::InvalidValue())
: ScopedTypeRef<NST, Traits>(object, base::scoped_policy::RETAIN) {}
#endif
@@ -149,12 +149,12 @@ class scoped_nsobject : public scoped_nsprotocol<NST*> {
using Traits = typename scoped_nsprotocol<NST*>::Traits;
#if !defined(__has_feature) || !__has_feature(objc_arc)
- explicit scoped_nsobject(
+ explicit constexpr scoped_nsobject(
NST* object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: scoped_nsprotocol<NST*>(object, policy) {}
#else
- explicit scoped_nsobject(NST* object = Traits::InvalidValue())
+ explicit constexpr scoped_nsobject(NST* object = Traits::InvalidValue())
: scoped_nsprotocol<NST*>(object) {}
#endif
@@ -198,12 +198,12 @@ class scoped_nsobject<id> : public scoped_nsprotocol<id> {
using Traits = typename scoped_nsprotocol<id>::Traits;
#if !defined(__has_feature) || !__has_feature(objc_arc)
- explicit scoped_nsobject(
+ explicit constexpr scoped_nsobject(
id object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: scoped_nsprotocol<id>(object, policy) {}
#else
- explicit scoped_nsobject(id object = Traits::InvalidValue())
+ explicit constexpr scoped_nsobject(id object = Traits::InvalidValue())
: scoped_nsprotocol<id>(object) {}
#endif
diff --git a/chromium/base/mac/scoped_typeref.h b/chromium/base/mac/scoped_typeref.h
index b8d8a142625..dd9841d734f 100644
--- a/chromium/base/mac/scoped_typeref.h
+++ b/chromium/base/mac/scoped_typeref.h
@@ -53,7 +53,7 @@ class ScopedTypeRef {
public:
typedef T element_type;
- explicit ScopedTypeRef(
+ explicit constexpr ScopedTypeRef(
__unsafe_unretained T object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: object_(object) {
diff --git a/chromium/base/macros.h b/chromium/base/macros.h
index b1c5fe75149..ca5ed5f5264 100644
--- a/chromium/base/macros.h
+++ b/chromium/base/macros.h
@@ -70,6 +70,16 @@ namespace base {
// Use these to declare and define a static local variable (static T;) so that
// it is leaked so that its destructors are not called at exit. This is
// thread-safe.
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! DEPRECATED !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// Please don't use this macro. Use a function-local static of type
+// base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#define CR_DEFINE_STATIC_LOCAL(type, name, arguments) \
static type& name = *new type arguments
diff --git a/chromium/base/memory/OWNERS b/chromium/base/memory/OWNERS
index bcaf778a95e..9b7cbb19bf7 100644
--- a/chromium/base/memory/OWNERS
+++ b/chromium/base/memory/OWNERS
@@ -1,2 +1,4 @@
per-file *chromeos*=skuhne@chromium.org
per-file *chromeos*=oshima@chromium.org
+per-file *shared_memory*=set noparent
+per-file *shared_memory*=file://ipc/SECURITY_OWNERS
diff --git a/chromium/base/memory/discardable_memory_allocator.h b/chromium/base/memory/discardable_memory_allocator.h
index ad73f0890d6..8e7e6ccc93d 100644
--- a/chromium/base/memory/discardable_memory_allocator.h
+++ b/chromium/base/memory/discardable_memory_allocator.h
@@ -27,7 +27,7 @@ class BASE_EXPORT DiscardableMemoryAllocator {
size_t size) = 0;
protected:
- virtual ~DiscardableMemoryAllocator() {}
+ virtual ~DiscardableMemoryAllocator() = default;
};
} // namespace base
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index 7aeddab63d2..e9a2d6338b8 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -28,6 +28,7 @@
#endif
#if defined(OS_WIN)
+#include <windows.h>
#include "base/win/windows_version.h"
#endif
diff --git a/chromium/base/memory/memory_coordinator_client.h b/chromium/base/memory/memory_coordinator_client.h
index d24d3e7cd10..804f0a6b854 100644
--- a/chromium/base/memory/memory_coordinator_client.h
+++ b/chromium/base/memory/memory_coordinator_client.h
@@ -71,7 +71,7 @@ class BASE_EXPORT MemoryCoordinatorClient {
virtual void OnPurgeMemory() {}
protected:
- virtual ~MemoryCoordinatorClient() {}
+ virtual ~MemoryCoordinatorClient() = default;
};
} // namespace base
diff --git a/chromium/base/memory/memory_coordinator_proxy.h b/chromium/base/memory/memory_coordinator_proxy.h
index 2bb26c451c0..b6e7b3f6efd 100644
--- a/chromium/base/memory/memory_coordinator_proxy.h
+++ b/chromium/base/memory/memory_coordinator_proxy.h
@@ -16,7 +16,7 @@ namespace base {
// method descriptions.
class BASE_EXPORT MemoryCoordinator {
public:
- virtual ~MemoryCoordinator() {}
+ virtual ~MemoryCoordinator() = default;
virtual MemoryState GetCurrentMemoryState() const = 0;
};
diff --git a/chromium/base/memory/protected_memory.h b/chromium/base/memory/protected_memory.h
index 7ae5fda7ff3..2bfb75fd454 100644
--- a/chromium/base/memory/protected_memory.h
+++ b/chromium/base/memory/protected_memory.h
@@ -17,12 +17,15 @@
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/macros.h"
+#include "base/memory/protected_memory_flags.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
#define PROTECTED_MEMORY_ENABLED 1
-#if defined(OS_LINUX)
+// Linking with lld is required to workaround crbug.com/792777.
+// TODO(vtsyrklevich): Remove once support for gold on Android/CrOs is dropped
+#if defined(OS_LINUX) && BUILDFLAG(USE_LLD)
// Define the section read-only
__asm__(".section protected_memory, \"a\"\n\t");
#define PROTECTED_MEMORY_SECTION __attribute__((section("protected_memory")))
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 5e1e8ad847b..249f70e0548 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -148,8 +148,9 @@ class BASE_EXPORT RefCountedThreadSafeBase {
bool HasOneRef() const;
protected:
- explicit RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
- explicit RefCountedThreadSafeBase(StartRefCountFromOneTag) : ref_count_(1) {
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
+ explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
+ : ref_count_(1) {
#if DCHECK_IS_ON()
needs_adopt_ref_ = true;
#endif
diff --git a/chromium/base/memory/ref_counted_memory.cc b/chromium/base/memory/ref_counted_memory.cc
index 7eaaf034257..c46bb9abbbf 100644
--- a/chromium/base/memory/ref_counted_memory.cc
+++ b/chromium/base/memory/ref_counted_memory.cc
@@ -38,6 +38,8 @@ RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
: data_(p, p + size) {}
+RefCountedBytes::RefCountedBytes(size_t size) : data_(size, 0) {}
+
scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
std::vector<unsigned char>* to_destroy) {
scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
diff --git a/chromium/base/memory/ref_counted_memory.h b/chromium/base/memory/ref_counted_memory.h
index aa22c9e525b..ca7c371681f 100644
--- a/chromium/base/memory/ref_counted_memory.h
+++ b/chromium/base/memory/ref_counted_memory.h
@@ -78,6 +78,10 @@ class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
// Constructs a RefCountedBytes object by copying |size| bytes from |p|.
RefCountedBytes(const unsigned char* p, size_t size);
+ // Constructs a RefCountedBytes object by zero-initializing a new vector of
+ // |size| bytes.
+ explicit RefCountedBytes(size_t size);
+
// Constructs a RefCountedBytes object by performing a swap. (To non
// destructively build a RefCountedBytes, use the constructor that takes a
// vector.)
@@ -91,6 +95,14 @@ class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
const std::vector<unsigned char>& data() const { return data_; }
std::vector<unsigned char>& data() { return data_; }
+ // Non-const versions of front() and front_as() that are simply shorthand for
+ // data().data().
+ unsigned char* front() { return data_.data(); }
+ template <typename T>
+ T* front_as() {
+ return reinterpret_cast<T*>(front());
+ }
+
private:
~RefCountedBytes() override;
diff --git a/chromium/base/memory/ref_counted_memory_unittest.cc b/chromium/base/memory/ref_counted_memory_unittest.cc
index 034f674b758..72046e52eb9 100644
--- a/chromium/base/memory/ref_counted_memory_unittest.cc
+++ b/chromium/base/memory/ref_counted_memory_unittest.cc
@@ -6,13 +6,16 @@
#include <stdint.h>
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::Each;
+using testing::ElementsAre;
+
namespace base {
TEST(RefCountedMemoryUnitTest, RefCountedStaticMemory) {
- scoped_refptr<RefCountedMemory> mem = new RefCountedStaticMemory(
- "static mem00", 10);
+ auto mem = MakeRefCounted<RefCountedStaticMemory>("static mem00", 10);
EXPECT_EQ(10U, mem->size());
EXPECT_EQ("static mem", std::string(mem->front_as<char>(), mem->size()));
@@ -26,41 +29,52 @@ TEST(RefCountedMemoryUnitTest, RefCountedBytes) {
EXPECT_EQ(0U, data.size());
- EXPECT_EQ(2U, mem->size());
+ ASSERT_EQ(2U, mem->size());
EXPECT_EQ(45U, mem->front()[0]);
EXPECT_EQ(99U, mem->front()[1]);
scoped_refptr<RefCountedMemory> mem2;
{
- unsigned char data2[] = { 12, 11, 99 };
- mem2 = new RefCountedBytes(data2, 3);
+ const unsigned char kData[] = {12, 11, 99};
+ mem2 = MakeRefCounted<RefCountedBytes>(kData, arraysize(kData));
}
- EXPECT_EQ(3U, mem2->size());
+ ASSERT_EQ(3U, mem2->size());
EXPECT_EQ(12U, mem2->front()[0]);
EXPECT_EQ(11U, mem2->front()[1]);
EXPECT_EQ(99U, mem2->front()[2]);
}
+TEST(RefCountedMemoryUnitTest, RefCountedBytesMutable) {
+ auto mem = base::MakeRefCounted<RefCountedBytes>(10);
+
+ ASSERT_EQ(10U, mem->size());
+ EXPECT_THAT(mem->data(), Each(0U));
+
+ // Test non-const versions of data(), front() and front_as<>().
+ mem->data()[0] = 1;
+ mem->front()[1] = 2;
+ mem->front_as<char>()[2] = 3;
+
+ EXPECT_THAT(mem->data(), ElementsAre(1, 2, 3, 0, 0, 0, 0, 0, 0, 0));
+}
+
TEST(RefCountedMemoryUnitTest, RefCountedString) {
std::string s("destroy me");
scoped_refptr<RefCountedMemory> mem = RefCountedString::TakeString(&s);
EXPECT_EQ(0U, s.size());
- EXPECT_EQ(10U, mem->size());
+ ASSERT_EQ(10U, mem->size());
EXPECT_EQ('d', mem->front()[0]);
EXPECT_EQ('e', mem->front()[1]);
+ EXPECT_EQ('e', mem->front()[9]);
}
TEST(RefCountedMemoryUnitTest, Equals) {
std::string s1("same");
scoped_refptr<RefCountedMemory> mem1 = RefCountedString::TakeString(&s1);
- std::vector<unsigned char> d2;
- d2.push_back('s');
- d2.push_back('a');
- d2.push_back('m');
- d2.push_back('e');
+ std::vector<unsigned char> d2 = {'s', 'a', 'm', 'e'};
scoped_refptr<RefCountedMemory> mem2 = RefCountedBytes::TakeVector(&d2);
EXPECT_TRUE(mem1->Equals(mem2));
diff --git a/chromium/base/memory/ref_counted_unittest.cc b/chromium/base/memory/ref_counted_unittest.cc
index 96f588e66cd..71e75bce8e3 100644
--- a/chromium/base/memory/ref_counted_unittest.cc
+++ b/chromium/base/memory/ref_counted_unittest.cc
@@ -4,10 +4,10 @@
#include "base/memory/ref_counted.h"
+#include <type_traits>
#include <utility>
#include "base/test/gtest_util.h"
-#include "base/test/opaque_ref_counted.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -161,6 +161,12 @@ TEST(RefCountedUnitTest, TestSelfAssignment) {
scoped_refptr<SelfAssign> var(p);
var = var;
EXPECT_EQ(var.get(), p);
+ var = std::move(var);
+ EXPECT_EQ(var.get(), p);
+ var.swap(var);
+ EXPECT_EQ(var.get(), p);
+ swap(var, var);
+ EXPECT_EQ(var.get(), p);
}
TEST(RefCountedUnitTest, ScopedRefPtrMemberAccess) {
@@ -188,37 +194,6 @@ TEST(RefCountedUnitTest, ScopedRefPtrToSelfMoveAssignment) {
EXPECT_TRUE(ScopedRefPtrToSelf::was_destroyed());
}
-TEST(RefCountedUnitTest, ScopedRefPtrToOpaque) {
- scoped_refptr<base::OpaqueRefCounted> initial = base::MakeOpaqueRefCounted();
- base::TestOpaqueRefCounted(initial);
-
- scoped_refptr<base::OpaqueRefCounted> assigned;
- assigned = initial;
-
- scoped_refptr<base::OpaqueRefCounted> copied(initial);
-
- scoped_refptr<base::OpaqueRefCounted> moved(std::move(initial));
-
- scoped_refptr<base::OpaqueRefCounted> move_assigned;
- move_assigned = std::move(moved);
-}
-
-TEST(RefCountedUnitTest, ScopedRefPtrToOpaqueThreadSafe) {
- scoped_refptr<base::OpaqueRefCountedThreadSafe> initial =
- base::MakeOpaqueRefCountedThreadSafe();
- base::TestOpaqueRefCountedThreadSafe(initial);
-
- scoped_refptr<base::OpaqueRefCountedThreadSafe> assigned;
- assigned = initial;
-
- scoped_refptr<base::OpaqueRefCountedThreadSafe> copied(initial);
-
- scoped_refptr<base::OpaqueRefCountedThreadSafe> moved(std::move(initial));
-
- scoped_refptr<base::OpaqueRefCountedThreadSafe> move_assigned;
- move_assigned = std::move(moved);
-}
-
TEST(RefCountedUnitTest, BooleanTesting) {
scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
EXPECT_TRUE(ptr_to_an_instance);
@@ -563,21 +538,21 @@ TEST(RefCountedUnitTest, MoveConstructorDerived) {
}
TEST(RefCountedUnitTest, TestOverloadResolutionCopy) {
- scoped_refptr<Derived> derived(new Derived);
- scoped_refptr<SelfAssign> expected(derived);
+ const scoped_refptr<Derived> derived(new Derived);
+ const scoped_refptr<SelfAssign> expected(derived);
EXPECT_EQ(expected, Overloaded(derived));
- scoped_refptr<Other> other(new Other);
+ const scoped_refptr<Other> other(new Other);
EXPECT_EQ(other, Overloaded(other));
}
TEST(RefCountedUnitTest, TestOverloadResolutionMove) {
scoped_refptr<Derived> derived(new Derived);
- scoped_refptr<SelfAssign> expected(derived);
+ const scoped_refptr<SelfAssign> expected(derived);
EXPECT_EQ(expected, Overloaded(std::move(derived)));
scoped_refptr<Other> other(new Other);
- scoped_refptr<Other> other2(other);
+ const scoped_refptr<Other> other2(other);
EXPECT_EQ(other2, Overloaded(std::move(other)));
}
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
index d56d423ee78..2b4a532c87c 100644
--- a/chromium/base/memory/scoped_refptr.h
+++ b/chromium/base/memory/scoped_refptr.h
@@ -9,6 +9,7 @@
#include <iosfwd>
#include <type_traits>
+#include <utility>
#include "base/compiler_specific.h"
#include "base/logging.h"
@@ -156,37 +157,33 @@ class scoped_refptr {
public:
typedef T element_type;
- scoped_refptr() {}
+ constexpr scoped_refptr() = default;
- scoped_refptr(T* p) : ptr_(p) {
+ // Constructs from raw pointer. constexpr if |p| is null.
+ constexpr scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
AddRef(ptr_);
}
- // Copy constructor.
- scoped_refptr(const scoped_refptr<T>& r) : ptr_(r.ptr_) {
- if (ptr_)
- AddRef(ptr_);
- }
+ // Copy constructor. This is required in addition to the copy conversion
+ // constructor below.
+ scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
// Copy conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
- scoped_refptr(const scoped_refptr<U>& r) : ptr_(r.get()) {
- if (ptr_)
- AddRef(ptr_);
- }
+ scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
- // Move constructor. This is required in addition to the conversion
- // constructor below in order for clang to warn about pessimizing moves.
- scoped_refptr(scoped_refptr&& r) : ptr_(r.get()) { r.ptr_ = nullptr; }
+ // Move constructor. This is required in addition to the move conversion
+ // constructor below.
+ scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
// Move conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
- scoped_refptr(scoped_refptr<U>&& r) : ptr_(r.get()) {
+ scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
r.ptr_ = nullptr;
}
@@ -212,48 +209,15 @@ class scoped_refptr {
return ptr_;
}
- scoped_refptr<T>& operator=(T* p) {
- // AddRef first so that self assignment should work
- if (p)
- AddRef(p);
- T* old_ptr = ptr_;
- ptr_ = p;
- if (old_ptr)
- Release(old_ptr);
- return *this;
- }
-
- scoped_refptr<T>& operator=(const scoped_refptr<T>& r) {
- return *this = r.ptr_;
- }
+ scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
- template <typename U>
- scoped_refptr<T>& operator=(const scoped_refptr<U>& r) {
- return *this = r.get();
- }
-
- scoped_refptr<T>& operator=(scoped_refptr<T>&& r) {
- scoped_refptr<T> tmp(std::move(r));
- tmp.swap(*this);
+ // Unified assignment operator.
+ scoped_refptr& operator=(scoped_refptr r) noexcept {
+ swap(r);
return *this;
}
- template <typename U>
- scoped_refptr<T>& operator=(scoped_refptr<U>&& r) {
- // We swap with a temporary variable to guarantee that |ptr_| is released
- // immediately. A naive implementation which swaps |this| and |r| would
- // unintentionally extend the lifetime of |ptr_| to at least the lifetime of
- // |r|.
- scoped_refptr<T> tmp(std::move(r));
- tmp.swap(*this);
- return *this;
- }
-
- void swap(scoped_refptr<T>& r) {
- T* tmp = ptr_;
- ptr_ = r.ptr_;
- r.ptr_ = tmp;
- }
+ void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
explicit operator bool() const { return ptr_ != nullptr; }
@@ -351,7 +315,7 @@ std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
}
template <typename T>
-void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) {
+void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
lhs.swap(rhs);
}
diff --git a/chromium/base/memory/shared_memory.h b/chromium/base/memory/shared_memory.h
index 47c33522625..9a4159855b6 100644
--- a/chromium/base/memory/shared_memory.h
+++ b/chromium/base/memory/shared_memory.h
@@ -208,7 +208,7 @@ class BASE_EXPORT SharedMemory {
// that takes ownership of the handle. As such, it's not valid to pass the
// sample handle to the IPC subsystem twice. Returns an invalid handle on
// failure.
- SharedMemoryHandle GetReadOnlyHandle();
+ SharedMemoryHandle GetReadOnlyHandle() const;
// Returns an ID for the mapped region. This is ID of the SharedMemoryHandle
// that was mapped. The ID is valid even after the SharedMemoryHandle is
diff --git a/chromium/base/memory/shared_memory_fuchsia.cc b/chromium/base/memory/shared_memory_fuchsia.cc
index 5262e16c8ad..15211d9fbac 100644
--- a/chromium/base/memory/shared_memory_fuchsia.cc
+++ b/chromium/base/memory/shared_memory_fuchsia.cc
@@ -149,7 +149,7 @@ SharedMemoryHandle SharedMemory::DuplicateHandle(
return handle.Duplicate();
}
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
zx_handle_t duped_handle;
const int kNoWriteOrExec =
ZX_DEFAULT_VMO_RIGHTS &
diff --git a/chromium/base/memory/shared_memory_handle.h b/chromium/base/memory/shared_memory_handle.h
index c4e140e6126..22b9737b4b9 100644
--- a/chromium/base/memory/shared_memory_handle.h
+++ b/chromium/base/memory/shared_memory_handle.h
@@ -11,8 +11,8 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
#include "base/process/process_handle.h"
+#include "base/win/windows_types.h"
#elif defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach.h>
#include "base/base_export.h"
diff --git a/chromium/base/memory/shared_memory_handle_win.cc b/chromium/base/memory/shared_memory_handle_win.cc
index 8b728565a71..8c11d39c871 100644
--- a/chromium/base/memory/shared_memory_handle_win.cc
+++ b/chromium/base/memory/shared_memory_handle_win.cc
@@ -7,6 +7,8 @@
#include "base/logging.h"
#include "base/unguessable_token.h"
+#include <windows.h>
+
namespace base {
SharedMemoryHandle::SharedMemoryHandle() {}
diff --git a/chromium/base/memory/shared_memory_mac.cc b/chromium/base/memory/shared_memory_mac.cc
index d2bb5ec5b27..e2735f7f963 100644
--- a/chromium/base/memory/shared_memory_mac.cc
+++ b/chromium/base/memory/shared_memory_mac.cc
@@ -244,7 +244,7 @@ void SharedMemory::Close() {
}
}
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
if (shm_.type_ == SharedMemoryHandle::POSIX) {
// We could imagine re-opening the file from /dev/fd, but that can't make it
// readonly on Mac: https://codereview.chromium.org/27265002/#msg10.
diff --git a/chromium/base/memory/shared_memory_nacl.cc b/chromium/base/memory/shared_memory_nacl.cc
index 148e6b0c485..442c0360f29 100644
--- a/chromium/base/memory/shared_memory_nacl.cc
+++ b/chromium/base/memory/shared_memory_nacl.cc
@@ -130,7 +130,7 @@ void SharedMemory::Close() {
}
}
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
// Untrusted code can't create descriptors or handles, which is needed to
// drop permissions.
return SharedMemoryHandle();
diff --git a/chromium/base/memory/shared_memory_posix.cc b/chromium/base/memory/shared_memory_posix.cc
index c148d34d4ed..f0e4f95b84f 100644
--- a/chromium/base/memory/shared_memory_posix.cc
+++ b/chromium/base/memory/shared_memory_posix.cc
@@ -363,7 +363,7 @@ bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
}
#endif // !defined(OS_ANDROID)
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
CHECK(readonly_shm_.IsValid());
return readonly_shm_.Duplicate();
}
diff --git a/chromium/base/memory/shared_memory_unittest.cc b/chromium/base/memory/shared_memory_unittest.cc
index 19a1245517d..d3c31dbd569 100644
--- a/chromium/base/memory/shared_memory_unittest.cc
+++ b/chromium/base/memory/shared_memory_unittest.cc
@@ -10,6 +10,8 @@
#include <memory>
#include "base/atomicops.h"
+#include "base/base_switches.h"
+#include "base/command_line.h"
#include "base/macros.h"
#include "base/memory/shared_memory_handle.h"
#include "base/process/kill.h"
@@ -100,12 +102,36 @@ const char MultipleThreadMain::s_test_name_[] =
"SharedMemoryOpenThreadTest";
#endif // !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
+enum class Mode {
+ Default,
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+ DisableDevShm = 1,
+#endif
+};
+
+class SharedMemoryTest : public ::testing::TestWithParam<Mode> {
+ public:
+ void SetUp() override {
+ switch (GetParam()) {
+ case Mode::Default:
+ break;
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+ case Mode::DisableDevShm:
+ CommandLine* cmdline = CommandLine::ForCurrentProcess();
+ cmdline->AppendSwitch(switches::kDisableDevShmUsage);
+ break;
+#endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+ }
+ }
+};
+
} // namespace
// Android/Mac/Fuchsia doesn't support SharedMemory::Open/Delete/
// CreateNamedDeprecated(openExisting=true)
#if !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
-TEST(SharedMemoryTest, OpenClose) {
+
+TEST_P(SharedMemoryTest, OpenClose) {
const uint32_t kDataSize = 1024;
std::string test_name = "SharedMemoryOpenCloseTest";
@@ -153,7 +179,7 @@ TEST(SharedMemoryTest, OpenClose) {
EXPECT_TRUE(rv);
}
-TEST(SharedMemoryTest, OpenExclusive) {
+TEST_P(SharedMemoryTest, OpenExclusive) {
const uint32_t kDataSize = 1024;
const uint32_t kDataSize2 = 2048;
std::ostringstream test_name_stream;
@@ -221,7 +247,7 @@ TEST(SharedMemoryTest, OpenExclusive) {
#endif // !defined(OS_ANDROID) && !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
// Check that memory is still mapped after its closed.
-TEST(SharedMemoryTest, CloseNoUnmap) {
+TEST_P(SharedMemoryTest, CloseNoUnmap) {
const size_t kDataSize = 4096;
SharedMemory memory;
@@ -246,7 +272,7 @@ TEST(SharedMemoryTest, CloseNoUnmap) {
#if !defined(OS_MACOSX) && !defined(OS_FUCHSIA)
// Create a set of N threads to each open a shared memory segment and write to
// it. Verify that they are always reading/writing consistent data.
-TEST(SharedMemoryTest, MultipleThreads) {
+TEST_P(SharedMemoryTest, MultipleThreads) {
const int kNumThreads = 5;
MultipleThreadMain::CleanUp();
@@ -287,7 +313,7 @@ TEST(SharedMemoryTest, MultipleThreads) {
// Allocate private (unique) shared memory with an empty string for a
// name. Make sure several of them don't point to the same thing as
// we might expect if the names are equal.
-TEST(SharedMemoryTest, AnonymousPrivate) {
+TEST_P(SharedMemoryTest, AnonymousPrivate) {
int i, j;
int count = 4;
bool rv;
@@ -328,7 +354,7 @@ TEST(SharedMemoryTest, AnonymousPrivate) {
}
}
-TEST(SharedMemoryTest, GetReadOnlyHandle) {
+TEST_P(SharedMemoryTest, GetReadOnlyHandle) {
StringPiece contents = "Hello World";
SharedMemory writable_shmem;
@@ -430,7 +456,7 @@ TEST(SharedMemoryTest, GetReadOnlyHandle) {
#endif // defined(OS_POSIX) || defined(OS_WIN)
}
-TEST(SharedMemoryTest, ShareToSelf) {
+TEST_P(SharedMemoryTest, ShareToSelf) {
StringPiece contents = "Hello World";
SharedMemory shmem;
@@ -461,7 +487,7 @@ TEST(SharedMemoryTest, ShareToSelf) {
contents.size()));
}
-TEST(SharedMemoryTest, ShareWithMultipleInstances) {
+TEST_P(SharedMemoryTest, ShareWithMultipleInstances) {
static const StringPiece kContents = "Hello World";
SharedMemory shmem;
@@ -505,7 +531,7 @@ TEST(SharedMemoryTest, ShareWithMultipleInstances) {
ASSERT_EQ(StringPiece(ToLowerASCII(kContents)), readonly_contents);
}
-TEST(SharedMemoryTest, MapAt) {
+TEST_P(SharedMemoryTest, MapAt) {
ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
const size_t kCount = SysInfo::VMAllocationGranularity();
const size_t kDataSize = kCount * sizeof(uint32_t);
@@ -531,7 +557,7 @@ TEST(SharedMemoryTest, MapAt) {
}
}
-TEST(SharedMemoryTest, MapTwice) {
+TEST_P(SharedMemoryTest, MapTwice) {
const uint32_t kDataSize = 1024;
SharedMemory memory;
bool rv = memory.CreateAndMapAnonymous(kDataSize);
@@ -548,7 +574,7 @@ TEST(SharedMemoryTest, MapTwice) {
// This test is not applicable for iOS (crbug.com/399384).
#if !defined(OS_IOS)
// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
-TEST(SharedMemoryTest, AnonymousExecutable) {
+TEST_P(SharedMemoryTest, AnonymousExecutable) {
const uint32_t kTestSize = 1 << 16;
SharedMemory shared_memory;
@@ -588,7 +614,7 @@ class ScopedUmaskSetter {
};
// Create a shared memory object, check its permissions.
-TEST(SharedMemoryTest, FilePermissionsAnonymous) {
+TEST_P(SharedMemoryTest, FilePermissionsAnonymous) {
const uint32_t kTestSize = 1 << 8;
SharedMemory shared_memory;
@@ -614,7 +640,7 @@ TEST(SharedMemoryTest, FilePermissionsAnonymous) {
}
// Create a shared memory object, check its permissions.
-TEST(SharedMemoryTest, FilePermissionsNamed) {
+TEST_P(SharedMemoryTest, FilePermissionsNamed) {
const uint32_t kTestSize = 1 << 8;
SharedMemory shared_memory;
@@ -645,7 +671,7 @@ TEST(SharedMemoryTest, FilePermissionsNamed) {
// Map() will return addresses which are aligned to the platform page size, this
// varies from platform to platform though. Since we'd like to advertise a
// minimum alignment that callers can count on, test for it here.
-TEST(SharedMemoryTest, MapMinimumAlignment) {
+TEST_P(SharedMemoryTest, MapMinimumAlignment) {
static const int kDataSize = 8192;
SharedMemory shared_memory;
@@ -656,7 +682,7 @@ TEST(SharedMemoryTest, MapMinimumAlignment) {
}
#if defined(OS_WIN)
-TEST(SharedMemoryTest, UnsafeImageSection) {
+TEST_P(SharedMemoryTest, UnsafeImageSection) {
const char kTestSectionName[] = "UnsafeImageSection";
wchar_t path[MAX_PATH];
EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
@@ -784,7 +810,7 @@ MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
#endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX) &&
// !defined(OS_FUCHSIA)
-TEST(SharedMemoryTest, MappedId) {
+TEST_P(SharedMemoryTest, MappedId) {
const uint32_t kDataSize = 1024;
SharedMemory memory;
SharedMemoryCreateOptions options;
@@ -809,4 +835,13 @@ TEST(SharedMemoryTest, MappedId) {
EXPECT_TRUE(memory.mapped_id().is_empty());
}
+INSTANTIATE_TEST_CASE_P(Default,
+ SharedMemoryTest,
+ ::testing::Values(Mode::Default));
+#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
+INSTANTIATE_TEST_CASE_P(SkipDevShm,
+ SharedMemoryTest,
+ ::testing::Values(Mode::DisableDevShm));
+#endif // defined(OS_LINUX) && !defined(OS_CHROMEOS)
+
} // namespace base
diff --git a/chromium/base/memory/shared_memory_win.cc b/chromium/base/memory/shared_memory_win.cc
index b05be755e0f..5540004730b 100644
--- a/chromium/base/memory/shared_memory_win.cc
+++ b/chromium/base/memory/shared_memory_win.cc
@@ -8,14 +8,17 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/logging.h"
#include "base/memory/shared_memory_tracker.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/rand_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/unguessable_token.h"
+namespace base {
namespace {
// Errors that can occur during Shared Memory construction.
@@ -41,7 +44,7 @@ void LogError(CreateError error, DWORD winerror) {
CREATE_ERROR_LAST + 1);
static_assert(ERROR_SUCCESS == 0, "Windows error code changed!");
if (winerror != ERROR_SUCCESS)
- UMA_HISTOGRAM_SPARSE_SLOWLY("SharedMemory.CreateWinError", winerror);
+ UmaHistogramSparse("SharedMemory.CreateWinError", winerror);
}
typedef enum _SECTION_INFORMATION_CLASS {
@@ -135,8 +138,6 @@ HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
} // namespace.
-namespace base {
-
SharedMemory::SharedMemory() {}
SharedMemory::SharedMemory(const string16& name) : name_(name) {}
@@ -310,10 +311,17 @@ bool SharedMemory::MapAt(off_t offset, size_t bytes) {
return false;
}
- memory_ = MapViewOfFile(
- shm_.GetHandle(),
- read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
- static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
+ // Try to map the shared memory. On the first failure, release any reserved
+ // address space for a single retry.
+ for (int i = 0; i < 2; ++i) {
+ memory_ = MapViewOfFile(
+ shm_.GetHandle(),
+ read_only_ ? FILE_MAP_READ : FILE_MAP_READ | FILE_MAP_WRITE,
+ static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), bytes);
+ if (memory_)
+ break;
+ ReleaseReservation();
+ }
if (!memory_) {
DPLOG(ERROR) << "Failed executing MapViewOfFile";
return false;
@@ -338,7 +346,7 @@ bool SharedMemory::Unmap() {
return true;
}
-SharedMemoryHandle SharedMemory::GetReadOnlyHandle() {
+SharedMemoryHandle SharedMemory::GetReadOnlyHandle() const {
HANDLE result;
ProcessHandle process = GetCurrentProcess();
if (!::DuplicateHandle(process, shm_.GetHandle(), process, &result,
diff --git a/chromium/base/memory/singleton.cc b/chromium/base/memory/singleton.cc
deleted file mode 100644
index f68ecaa8da9..00000000000
--- a/chromium/base/memory/singleton.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/singleton.h"
-#include "base/threading/platform_thread.h"
-
-namespace base {
-namespace internal {
-
-subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance) {
- // Handle the race. Another thread beat us and either:
- // - Has the object in BeingCreated state
- // - Already has the object created...
- // We know value != NULL. It could be kBeingCreatedMarker, or a valid ptr.
- // Unless your constructor can be very time consuming, it is very unlikely
- // to hit this race. When it does, we just spin and yield the thread until
- // the object has been created.
- subtle::AtomicWord value;
- while (true) {
- // The load has acquire memory ordering as the thread which reads the
- // instance pointer must acquire visibility over the associated data.
- // The pairing Release_Store operation is in Singleton::get().
- value = subtle::Acquire_Load(instance);
- if (value != kBeingCreatedMarker)
- break;
- PlatformThread::YieldCurrentThread();
- }
- return value;
-}
-
-} // namespace internal
-} // namespace base
-
diff --git a/chromium/base/memory/singleton.h b/chromium/base/memory/singleton.h
index d6049100240..ba1190d65c7 100644
--- a/chromium/base/memory/singleton.h
+++ b/chromium/base/memory/singleton.h
@@ -1,8 +1,16 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-
-// PLEASE READ: Do you really need a singleton?
+//
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+// PLEASE READ: Do you really need a singleton? If possible, use a
+// function-local static of type base::NoDestructor<T> instead:
+//
+// Factory& Factory::GetInstance() {
+// static base::NoDestructor<Factory> instance;
+// return *instance;
+// }
+// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// Singletons make it hard to determine the lifetime of an object, which can
// lead to buggy code and spurious crashes.
@@ -22,26 +30,17 @@
#include "base/at_exit.h"
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/lazy_instance_helpers.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
namespace base {
-namespace internal {
-
-// Our AtomicWord doubles as a spinlock, where a value of
-// kBeingCreatedMarker means the spinlock is being held for creation.
-static const subtle::AtomicWord kBeingCreatedMarker = 1;
-
-// We pull out some of the functionality into a non-templated function, so that
-// we can implement the more complicated pieces out of line in the .cc file.
-BASE_EXPORT subtle::AtomicWord WaitForInstance(subtle::AtomicWord* instance);
+namespace internal {
class DeleteTraceLogForTesting;
-
} // namespace internal
-
// Default traits for Singleton<Type>. Calls operator new and operator delete on
// the object. Registers automatic deletion at process exit.
// Overload if you need arguments or another memory allocation function.
@@ -83,11 +82,10 @@ struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
#endif
};
-
// Alternate traits for use with the Singleton<Type>. Allocates memory
// for the singleton instance from a static buffer. The singleton will
// be cleaned up at exit, but can't be revived after destruction unless
-// the Resurrect() method is called.
+// the ResurrectForTesting() method is called.
//
// This is useful for a certain category of things, notably logging and
// tracing, where the singleton instance is of a type carefully constructed to
@@ -107,26 +105,27 @@ struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
// process once you've unloaded.
template <typename Type>
struct StaticMemorySingletonTraits {
- // WARNING: User has to deal with get() in the singleton class
- // this is traits for returning NULL.
+ // WARNING: User has to support a New() which returns null.
static Type* New() {
- // Only constructs once and returns pointer; otherwise returns NULL.
+ // Only constructs once and returns pointer; otherwise returns null.
if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
- return NULL;
+ return nullptr;
return new (buffer_) Type();
}
static void Delete(Type* p) {
- if (p != NULL)
+ if (p)
p->Type::~Type();
}
static const bool kRegisterAtExit = true;
+
+#if DCHECK_IS_ON()
static const bool kAllowedToAccessOnNonjoinableThread = true;
+#endif
- // Exposed for unittesting.
- static void Resurrect() { subtle::NoBarrier_Store(&dead_, 0); }
+ static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
private:
alignas(Type) static char buffer_[sizeof(Type)];
@@ -237,40 +236,13 @@ class Singleton {
// Return a pointer to the one true instance of the class.
static Type* get() {
#if DCHECK_IS_ON()
- // Avoid making TLS lookup on release builds.
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
#endif
- // The load has acquire memory ordering as the thread which reads the
- // instance_ pointer must acquire visibility over the singleton data.
- subtle::AtomicWord value = subtle::Acquire_Load(&instance_);
- if (value != 0 && value != internal::kBeingCreatedMarker) {
- return reinterpret_cast<Type*>(value);
- }
-
- // Object isn't created yet, maybe we will get to create it, let's try...
- if (subtle::Acquire_CompareAndSwap(&instance_, 0,
- internal::kBeingCreatedMarker) == 0) {
- // instance_ was NULL and is now kBeingCreatedMarker. Only one thread
- // will ever get here. Threads might be spinning on us, and they will
- // stop right after we do this store.
- Type* newval = Traits::New();
-
- // Releases the visibility over instance_ to the readers.
- subtle::Release_Store(&instance_,
- reinterpret_cast<subtle::AtomicWord>(newval));
-
- if (newval != NULL && Traits::kRegisterAtExit)
- AtExitManager::RegisterCallback(OnExit, NULL);
-
- return newval;
- }
-
- // We hit a race. Wait for the other thread to complete it.
- value = internal::WaitForInstance(&instance_);
-
- return reinterpret_cast<Type*>(value);
+ return static_cast<Type*>(internal::GetOrCreateLazyPointer(
+ &instance_, &Traits::New, Traits::kRegisterAtExit ? OnExit : nullptr,
+ nullptr));
}
// Adapter function for use with AtExit(). This should be called single
diff --git a/chromium/base/memory/singleton_unittest.cc b/chromium/base/memory/singleton_unittest.cc
index 50b862d3eb0..06e53b24cd8 100644
--- a/chromium/base/memory/singleton_unittest.cc
+++ b/chromium/base/memory/singleton_unittest.cc
@@ -265,7 +265,7 @@ TEST_F(SingletonTest, Basic) {
{
// Resurrect the static singleton, and assert that it
// still points to the same (static) memory.
- CallbackSingletonWithStaticTrait::Trait::Resurrect();
+ CallbackSingletonWithStaticTrait::Trait::ResurrectForTesting();
EXPECT_EQ(GetStaticSingleton(), static_singleton);
}
}
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index 1737a674d36..9d35ff3ad26 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -215,7 +215,7 @@ template <typename T> class WeakPtrFactory;
template <typename T>
class WeakPtr : public internal::WeakPtrBase {
public:
- WeakPtr() {}
+ WeakPtr() = default;
WeakPtr(std::nullptr_t) {}
@@ -306,7 +306,7 @@ class WeakPtrFactory : public internal::WeakPtrFactoryBase {
explicit WeakPtrFactory(T* ptr)
: WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
- ~WeakPtrFactory() {}
+ ~WeakPtrFactory() = default;
WeakPtr<T> GetWeakPtr() {
DCHECK(ptr_);
@@ -338,14 +338,14 @@ class WeakPtrFactory : public internal::WeakPtrFactoryBase {
template <class T>
class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
public:
- SupportsWeakPtr() {}
+ SupportsWeakPtr() = default;
WeakPtr<T> AsWeakPtr() {
return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
}
protected:
- ~SupportsWeakPtr() {}
+ ~SupportsWeakPtr() = default;
private:
internal::WeakReferenceOwner weak_reference_owner_;
diff --git a/chromium/base/message_loop/incoming_task_queue.cc b/chromium/base/message_loop/incoming_task_queue.cc
index 941cbd8dabb..9f5f855a4b8 100644
--- a/chromium/base/message_loop/incoming_task_queue.cc
+++ b/chromium/base/message_loop/incoming_task_queue.cc
@@ -85,11 +85,6 @@ bool IncomingTaskQueue::AddToIncomingQueue(const Location& from_here,
return PostPendingTask(&pending_task);
}
-bool IncomingTaskQueue::IsIdleForTesting() {
- AutoLock lock(incoming_queue_lock_);
- return incoming_queue_.empty();
-}
-
void IncomingTaskQueue::WillDestroyCurrentMessageLoop() {
{
AutoLock auto_lock(incoming_queue_lock_);
@@ -109,11 +104,12 @@ void IncomingTaskQueue::StartScheduling() {
DCHECK(!message_loop_scheduled_);
is_ready_for_scheduling_ = true;
schedule_work = !incoming_queue_.empty();
+ if (schedule_work)
+ message_loop_scheduled_ = true;
}
if (schedule_work) {
DCHECK(message_loop_);
- // Don't need to lock |message_loop_lock_| here because this function is
- // called by MessageLoop on its thread.
+ AutoLock auto_lock(message_loop_lock_);
message_loop_->ScheduleWork();
}
}
@@ -181,6 +177,7 @@ void IncomingTaskQueue::TriageQueue::Clear() {
}
void IncomingTaskQueue::TriageQueue::ReloadFromIncomingQueueIfEmpty() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(outer_->sequence_checker_);
if (queue_.empty()) {
// TODO(robliao): Since these high resolution tasks aren't yet in the
// delayed queue, they technically shouldn't trigger high resolution timers
@@ -351,6 +348,8 @@ bool IncomingTaskQueue::PostPendingTaskLockRequired(PendingTask* pending_task) {
}
int IncomingTaskQueue::ReloadWorkQueue(TaskQueue* work_queue) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+
// Make sure no tasks are lost.
DCHECK(work_queue->empty());
diff --git a/chromium/base/message_loop/incoming_task_queue.h b/chromium/base/message_loop/incoming_task_queue.h
index 861f0fc1796..f158d2a93ff 100644
--- a/chromium/base/message_loop/incoming_task_queue.h
+++ b/chromium/base/message_loop/incoming_task_queue.h
@@ -77,9 +77,6 @@ class BASE_EXPORT IncomingTaskQueue
TimeDelta delay,
Nestable nestable);
- // Returns true if the message loop is "idle". Provided for testing.
- bool IsIdleForTesting();
-
// Disconnects |this| from the parent message loop.
void WillDestroyCurrentMessageLoop();
@@ -109,8 +106,9 @@ class BASE_EXPORT IncomingTaskQueue
// maintaining three queue queues to process tasks:
//
// TriageQueue
- // The first queue to receive all tasks for the processing sequence. Tasks are
- // generally either dispatched immediately or sent to the queues below.
+ // The first queue to receive all tasks for the processing sequence (when
+ // reloading from the thread-safe |incoming_queue_|). Tasks are generally
+ // either dispatched immediately or sent to the queues below.
//
// DelayedQueue
// The queue for holding tasks that should be run later and sorted by expected
@@ -226,8 +224,8 @@ class BASE_EXPORT IncomingTaskQueue
// Number of high resolution tasks in the sequence affine queues above.
int pending_high_res_tasks_ = 0;
- // Lock that protects |message_loop_| to prevent it from being deleted while
- // a request is made to schedule work.
+ // Lock that serializes |message_loop_->ScheduleWork()| calls as well as
+ // prevents |message_loop_| from being made nullptr during such a call.
base::Lock message_loop_lock_;
// Points to the message loop that owns |this|.
@@ -242,7 +240,7 @@ class BASE_EXPORT IncomingTaskQueue
// An incoming queue of tasks that are acquired under a mutex for processing
// on this instance's thread. These tasks have not yet been been pushed to
- // |message_loop_|.
+ // |triage_tasks_|.
TaskQueue incoming_queue_;
// True if new tasks should be accepted.
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
index 2c8899bf05e..a51db8ba54e 100644
--- a/chromium/base/message_loop/message_loop.cc
+++ b/chromium/base/message_loop/message_loop.cc
@@ -258,9 +258,17 @@ void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
}
bool MessageLoop::IsIdleForTesting() {
- // We only check the incoming queue, since we don't want to lock the work
- // queue.
- return incoming_task_queue_->IsIdleForTesting();
+ // Have unprocessed tasks? (this reloads the work queue if necessary)
+ if (incoming_task_queue_->triage_tasks().HasTasks())
+ return false;
+
+ // Have unprocessed deferred tasks which can be processed at this run-level?
+ if (incoming_task_queue_->deferred_tasks().HasTasks() &&
+ !RunLoop::IsNestedOnCurrentThread()) {
+ return false;
+ }
+
+ return true;
}
//------------------------------------------------------------------------------
@@ -303,7 +311,7 @@ void MessageLoop::BindToCurrentThread() {
internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
&sequence_local_storage_map_);
- run_loop_client_ = RunLoop::RegisterDelegateForCurrentThread(this);
+ RunLoop::RegisterDelegateForCurrentThread(this);
}
std::string MessageLoop::GetThreadName() const {
@@ -334,7 +342,7 @@ void MessageLoop::Run(bool application_tasks_allowed) {
DCHECK_EQ(this, current());
if (application_tasks_allowed && !task_execution_allowed_) {
// Allow nested task execution as explicitly requested.
- DCHECK(run_loop_client_->IsNested());
+ DCHECK(RunLoop::IsNestedOnCurrentThread());
task_execution_allowed_ = true;
pump_->Run(this);
task_execution_allowed_ = false;
@@ -363,7 +371,7 @@ void MessageLoop::SetThreadTaskRunnerHandle() {
}
bool MessageLoop::ProcessNextDelayedNonNestableTask() {
- if (run_loop_client_->IsNested())
+ if (RunLoop::IsNestedOnCurrentThread())
return false;
while (incoming_task_queue_->deferred_tasks().HasTasks()) {
@@ -399,7 +407,7 @@ void MessageLoop::RunTask(PendingTask* pending_task) {
bool MessageLoop::DeferOrRunPendingTask(PendingTask pending_task) {
if (pending_task.nestable == Nestable::kNestable ||
- !run_loop_client_->IsNested()) {
+ !RunLoop::IsNestedOnCurrentThread()) {
RunTask(&pending_task);
// Show that we ran a task (Note: a new one might arrive as a
// consequence!).
@@ -491,7 +499,7 @@ bool MessageLoop::DoIdleWork() {
if (ProcessNextDelayedNonNestableTask())
return true;
- if (run_loop_client_->ShouldQuitWhenIdle())
+ if (ShouldQuitWhenIdle())
pump_->Quit();
// When we return we will do a kernel wait for more tasks.
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
index bff60ef1102..27ee7fe8155 100644
--- a/chromium/base/message_loop/message_loop.h
+++ b/chromium/base/message_loop/message_loop.h
@@ -266,7 +266,11 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
- // Returns true if the message loop is "idle". Provided for testing.
+ // Returns true if the message loop is idle (ignoring delayed tasks). This is
+ // the same condition which triggers DoWork() to return false: i.e.
+ // out of tasks which can be processed at the current run-level -- there might
+ // be deferred non-nestable tasks remaining if currently in a nested run
+ // level.
bool IsIdleForTesting();
// Runs the specified PendingTask.
@@ -398,9 +402,6 @@ class BASE_EXPORT MessageLoop : public MessagePump::Delegate,
// Whether task observers are allowed.
bool allow_task_observers_ = true;
- // An interface back to RunLoop state accessible by this RunLoop::Delegate.
- RunLoop::Delegate::Client* run_loop_client_ = nullptr;
-
// Holds data stored through the SequenceLocalStorageSlot API.
internal::SequenceLocalStorageMap sequence_local_storage_map_;
diff --git a/chromium/base/message_loop/message_loop_io_posix_unittest.cc b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
index f98d4668d3a..89962a3efc5 100644
--- a/chromium/base/message_loop/message_loop_io_posix_unittest.cc
+++ b/chromium/base/message_loop/message_loop_io_posix_unittest.cc
@@ -145,7 +145,6 @@ TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherOutlivesMessageLoop) {
TEST_F(MessageLoopForIoPosixTest, FileDescriptorWatcherDoubleStop) {
// Verify that it's ok to call StopWatchingFileDescriptor().
- // (Errors only showed up in valgrind.)
// Arrange for message loop to live longer than watcher.
MessageLoopForIO message_loop;
diff --git a/chromium/base/message_loop/message_pump.h b/chromium/base/message_loop/message_pump.h
index cc7266304b2..dec0c94f620 100644
--- a/chromium/base/message_loop/message_pump.h
+++ b/chromium/base/message_loop/message_pump.h
@@ -19,7 +19,7 @@ class BASE_EXPORT MessagePump {
// these delegate methods are used.
class BASE_EXPORT Delegate {
public:
- virtual ~Delegate() {}
+ virtual ~Delegate() = default;
// Called from within Run in response to ScheduleWork or when the message
// pump would otherwise call DoDelayedWork. Returns true to indicate that
diff --git a/chromium/base/message_loop/message_pump_default.cc b/chromium/base/message_loop/message_pump_default.cc
index dba0f5ba4e2..4104e734655 100644
--- a/chromium/base/message_loop/message_pump_default.cc
+++ b/chromium/base/message_loop/message_pump_default.cc
@@ -89,7 +89,7 @@ void MessagePumpDefault::ScheduleDelayedWork(
void MessagePumpDefault::SetTimerSlack(TimerSlack timer_slack) {
thread_latency_qos_policy_data_t policy{};
policy.thread_latency_qos_tier = timer_slack == TIMER_SLACK_MAXIMUM
- ? LATENCY_QOS_TIER_5
+ ? LATENCY_QOS_TIER_3
: LATENCY_QOS_TIER_UNSPECIFIED;
mac::ScopedMachSendRight thread_port(mach_thread_self());
kern_return_t kr =
diff --git a/chromium/base/message_loop/message_pump_libevent.h b/chromium/base/message_loop/message_pump_libevent.h
index e14b58460b5..f4b58f59e43 100644
--- a/chromium/base/message_loop/message_pump_libevent.h
+++ b/chromium/base/message_loop/message_pump_libevent.h
@@ -32,7 +32,7 @@ class BASE_EXPORT MessagePumpLibevent : public MessagePump {
virtual void OnFileCanWriteWithoutBlocking(int fd) = 0;
protected:
- virtual ~Watcher() {}
+ virtual ~Watcher() = default;
};
// Object returned by WatchFileDescriptor to manage further watching.
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index da7c06ef820..8deea7fdf1f 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -21,6 +21,7 @@
#include "base/synchronization/waitable_event_watcher.h"
#include "base/test/gtest_util.h"
#include "base/third_party/libevent/event.h"
+#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
@@ -259,9 +260,9 @@ TEST_F(MessagePumpLibeventTest, QuitWatcher) {
WaitableEventWatcher::EventCallback write_fd_task =
BindOnce(&WriteFDWrapper, pipefds_[1], &buf, 1);
io_loop()->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
- Unretained(watcher.get()), &event, std::move(write_fd_task)));
+ FROM_HERE, BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
+ Unretained(watcher.get()), &event,
+ std::move(write_fd_task), io_loop()->task_runner()));
// Queue |event| to signal on |loop|.
loop.task_runner()->PostTask(
diff --git a/chromium/base/message_loop/message_pump_mac.mm b/chromium/base/message_loop/message_pump_mac.mm
index 0451e33ab21..1a72da62abc 100644
--- a/chromium/base/message_loop/message_pump_mac.mm
+++ b/chromium/base/message_loop/message_pump_mac.mm
@@ -8,6 +8,7 @@
#include <limits>
+#include "base/auto_reset.h"
#include "base/logging.h"
#include "base/mac/call_with_eh_frame.h"
#include "base/mac/scoped_cftyperef.h"
@@ -719,13 +720,13 @@ MessagePumpNSRunLoop::~MessagePumpNSRunLoop() {
}
void MessagePumpNSRunLoop::DoRun(Delegate* delegate) {
+ AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
+
while (keep_running_) {
// NSRunLoop manages autorelease pools itself.
[[NSRunLoop currentRunLoop] runMode:NSDefaultRunLoopMode
beforeDate:[NSDate distantFuture]];
}
-
- keep_running_ = true;
}
void MessagePumpNSRunLoop::Quit() {
@@ -789,6 +790,7 @@ MessagePumpNSApplication::~MessagePumpNSApplication() {
}
void MessagePumpNSApplication::DoRun(Delegate* delegate) {
+ AutoReset<bool> auto_reset_keep_running(&keep_running_, true);
bool last_running_own_loop_ = running_own_loop_;
// NSApp must be initialized by calling:
@@ -815,7 +817,6 @@ void MessagePumpNSApplication::DoRun(Delegate* delegate) {
[NSApp sendEvent:event];
}
}
- keep_running_ = true;
}
running_own_loop_ = last_running_own_loop_;
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 72f25a9013e..7f7b372da00 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -199,7 +199,7 @@ void AddFeatureAndFieldTrialFlags(const char* enable_features_switch,
cmd_line->AppendSwitchASCII(disable_features_switch, disabled_features);
std::string field_trial_states;
- FieldTrialList::AllStatesToString(&field_trial_states);
+ FieldTrialList::AllStatesToString(&field_trial_states, false);
if (!field_trial_states.empty()) {
cmd_line->AppendSwitchASCII(switches::kForceFieldTrials,
field_trial_states);
@@ -456,18 +456,9 @@ bool FieldTrial::GetActiveGroup(ActiveGroup* active_group) const {
return true;
}
-bool FieldTrial::GetState(State* field_trial_state) {
- if (!enable_field_trial_)
- return false;
- FinalizeGroupChoice();
- field_trial_state->trial_name = &trial_name_;
- field_trial_state->group_name = &group_name_;
- field_trial_state->activated = group_reported_;
- return true;
-}
-
-bool FieldTrial::GetStateWhileLocked(State* field_trial_state) {
- if (!enable_field_trial_)
+bool FieldTrial::GetStateWhileLocked(State* field_trial_state,
+ bool include_expired) {
+ if (!include_expired && !enable_field_trial_)
return false;
FinalizeGroupChoiceImpl(true);
field_trial_state->trial_name = &trial_name_;
@@ -648,14 +639,15 @@ void FieldTrialList::StatesToString(std::string* output) {
}
// static
-void FieldTrialList::AllStatesToString(std::string* output) {
+void FieldTrialList::AllStatesToString(std::string* output,
+ bool include_expired) {
if (!global_)
return;
AutoLock auto_lock(global_->lock_);
for (const auto& registered : global_->registered_) {
FieldTrial::State trial;
- if (!registered.second->GetStateWhileLocked(&trial))
+ if (!registered.second->GetStateWhileLocked(&trial, include_expired))
continue;
DCHECK_EQ(std::string::npos,
trial.trial_name->find(kPersistentStringSeparator));
@@ -1335,7 +1327,7 @@ void FieldTrialList::AddToAllocatorWhileLocked(
return;
FieldTrial::State trial_state;
- if (!field_trial->GetStateWhileLocked(&trial_state))
+ if (!field_trial->GetStateWhileLocked(&trial_state, false))
return;
// Or if we've already added it. We must check after GetState since it can
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index 19387a5f4c7..f794803f315 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -320,15 +320,13 @@ class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
bool GetActiveGroup(ActiveGroup* active_group) const;
// Returns the trial name and selected group name for this field trial via
- // the output parameter |field_trial_state|, but only if the trial has not
- // been disabled. In that case, true is returned and |field_trial_state| is
- // filled in; otherwise, the result is false and |field_trial_state| is left
- // untouched.
- bool GetState(State* field_trial_state);
-
- // Does the same thing as above, but is deadlock-free if the caller is holding
- // a lock.
- bool GetStateWhileLocked(State* field_trial_state);
+ // the output parameter |field_trial_state| for all the studies when
+ // |bool include_expired| is true. In case when |bool include_expired| is
+ // false, if the trial has not been disabled true is returned and
+ // |field_trial_state| is filled in; otherwise, the result is false and
+ // |field_trial_state| is left untouched.
+ // This function is deadlock-free if the caller is holding a lock.
+ bool GetStateWhileLocked(State* field_trial_state, bool include_expired);
// Returns the group_name. A winner need not have been chosen.
std::string group_name_internal() const { return group_name_; }
@@ -506,11 +504,11 @@ class BASE_EXPORT FieldTrialList {
// resurrection in another process. This allows randomization to be done in
// one process, and secondary processes can be synchronized on the result.
// The resulting string contains the name and group name pairs of all
- // registered FieldTrials which have not been disabled, with "/" used
- // to separate all names and to terminate the string. All activated trials
- // have their name prefixed with "*". This string is parsed by
- // |CreateTrialsFromString()|.
- static void AllStatesToString(std::string* output);
+ // registered FieldTrials including disabled based on |include_expired|,
+ // with "/" used to separate all names and to terminate the string. All
+ // activated trials have their name prefixed with "*". This string is parsed
+ // by |CreateTrialsFromString()|.
+ static void AllStatesToString(std::string* output, bool include_expired);
// Fills in the supplied vector |active_groups| (which must be empty when
// called) with a snapshot of all registered FieldTrials for which the group
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index 42c3ba1e436..56f727840d2 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -86,8 +86,7 @@ class FieldTrialTest : public testing::Test {
DISALLOW_COPY_AND_ASSIGN(FieldTrialTest);
};
-// Test registration, and also check that destructors are called for trials
-// (and that Valgrind doesn't catch us leaking).
+// Test registration, and also check that destructors are called for trials.
TEST_F(FieldTrialTest, Registration) {
const char name1[] = "name 1 test";
const char name2[] = "name 2 test";
@@ -334,36 +333,6 @@ TEST_F(FieldTrialTest, GetActiveFieldTrialGroupsFromString) {
EXPECT_EQ("Z", active_groups[1].group_name);
}
-TEST_F(FieldTrialTest, AllGroups) {
- FieldTrial::State field_trial_state;
- std::string one_winner("One Winner");
- scoped_refptr<FieldTrial> trial =
- CreateFieldTrial(one_winner, 10, "Default", nullptr);
- std::string winner("Winner");
- trial->AppendGroup(winner, 10);
- EXPECT_TRUE(trial->GetState(&field_trial_state));
- EXPECT_EQ(one_winner, *field_trial_state.trial_name);
- EXPECT_EQ(winner, *field_trial_state.group_name);
- trial->group();
- EXPECT_TRUE(trial->GetState(&field_trial_state));
- EXPECT_EQ(one_winner, *field_trial_state.trial_name);
- EXPECT_EQ(winner, *field_trial_state.group_name);
-
- std::string multi_group("MultiGroup");
- scoped_refptr<FieldTrial> multi_group_trial =
- CreateFieldTrial(multi_group, 9, "Default", nullptr);
-
- multi_group_trial->AppendGroup("Me", 3);
- multi_group_trial->AppendGroup("You", 3);
- multi_group_trial->AppendGroup("Them", 3);
- EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
- // Finalize the group selection by accessing the selected group.
- multi_group_trial->group();
- EXPECT_TRUE(multi_group_trial->GetState(&field_trial_state));
- EXPECT_EQ(multi_group, *field_trial_state.trial_name);
- EXPECT_EQ(multi_group_trial->group_name(), *field_trial_state.group_name);
-}
-
TEST_F(FieldTrialTest, ActiveGroupsNotFinalized) {
const char kTrialName[] = "TestTrial";
const char kSecondaryGroupName[] = "SecondaryGroup";
@@ -469,7 +438,7 @@ TEST_F(FieldTrialTest, SaveAll) {
scoped_refptr<FieldTrial> trial =
CreateFieldTrial("Some name", 10, "Default some name", nullptr);
EXPECT_EQ("", trial->group_name_internal());
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
EXPECT_EQ("Some name/Default some name/", save_string);
// Getting all states should have finalized the trial.
EXPECT_EQ("Default some name", trial->group_name_internal());
@@ -480,7 +449,7 @@ TEST_F(FieldTrialTest, SaveAll) {
trial->AppendGroup("Winner", 10);
// Finalize the group selection by accessing the selected group.
trial->group();
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
EXPECT_EQ("Some name/Default some name/*trial2/Winner/", save_string);
save_string.clear();
@@ -491,7 +460,7 @@ TEST_F(FieldTrialTest, SaveAll) {
// Finalize the group selection by accessing the selected group.
trial2->group();
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
// We assume names are alphabetized... though this is not critical.
EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/",
save_string);
@@ -501,9 +470,29 @@ TEST_F(FieldTrialTest, SaveAll) {
scoped_refptr<FieldTrial> trial3 =
CreateFieldTrial("zzz", 10, "default", nullptr);
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
+ EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+ save_string);
+
+ // Create expired study.
+ int default_group_number = -1;
+ scoped_refptr<FieldTrial> expired_trial =
+ FieldTrialList::FactoryGetFieldTrial(
+ "Expired trial name", 1000000000, "Default group",
+ OneYearBeforeBuildTime(), 1, 1, base::FieldTrial::SESSION_RANDOMIZED,
+ &default_group_number);
+ expired_trial->AppendGroup("Expired trial group name", 999999999);
+
+ save_string.clear();
+ FieldTrialList::AllStatesToString(&save_string, false);
EXPECT_EQ("Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
save_string);
+ save_string.clear();
+ FieldTrialList::AllStatesToString(&save_string, true);
+ EXPECT_EQ(
+ "Expired trial name/Default group/"
+ "Some name/Default some name/*trial2/Winner/*xxx/yyyy/zzz/default/",
+ save_string);
}
TEST_F(FieldTrialTest, Restore) {
@@ -1109,7 +1098,7 @@ TEST(FieldTrialTestWithoutList, StatesStringFormat) {
scoped_refptr<FieldTrial> trial3 =
CreateFieldTrial("zzz", 10, "default", nullptr);
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
}
// Starting with a new blank FieldTrialList.
@@ -1192,7 +1181,7 @@ TEST(FieldTrialListTest, AddTrialsToAllocator) {
FieldTrialList field_trial_list(nullptr);
FieldTrialList::CreateFieldTrial("Trial1", "Group1");
FieldTrialList::InstantiateFieldTrialAllocatorIfNeeded();
- FieldTrialList::AllStatesToString(&save_string);
+ FieldTrialList::AllStatesToString(&save_string, false);
handle = base::SharedMemory::DuplicateHandle(
field_trial_list.field_trial_allocator_->shared_memory()->handle());
}
@@ -1203,7 +1192,7 @@ TEST(FieldTrialListTest, AddTrialsToAllocator) {
shm.get()->Map(4 << 10);
FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
std::string check_string;
- FieldTrialList::AllStatesToString(&check_string);
+ FieldTrialList::AllStatesToString(&check_string, false);
EXPECT_EQ(save_string, check_string);
}
@@ -1241,7 +1230,7 @@ TEST(FieldTrialListTest, DoNotAddSimulatedFieldTrialsToAllocator) {
shm.get()->Map(4 << 10);
FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
std::string check_string;
- FieldTrialList::AllStatesToString(&check_string);
+ FieldTrialList::AllStatesToString(&check_string, false);
ASSERT_EQ(check_string.find("Simulated"), std::string::npos);
}
@@ -1325,7 +1314,7 @@ TEST(FieldTrialListTest, ClearParamsFromSharedMemory) {
shm.get()->Map(4 << 10);
FieldTrialList::CreateTrialsFromSharedMemory(std::move(shm));
std::string check_string;
- FieldTrialList::AllStatesToString(&check_string);
+ FieldTrialList::AllStatesToString(&check_string, false);
EXPECT_EQ("*Trial1/Group1/", check_string);
}
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 40e7bcc860a..488facd066d 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -19,10 +19,9 @@
#include "base/compiler_specific.h"
#include "base/debug/alias.h"
-#include "base/debug/crash_logging.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_memory_allocator.h"
@@ -438,8 +437,8 @@ bool Histogram::InspectConstructionArguments(StringPiece name,
}
if (!check_okay) {
- UMA_HISTOGRAM_SPARSE_SLOWLY("Histogram.BadConstructionArguments",
- static_cast<Sample>(HashMetricName(name)));
+ UmaHistogramSparse("Histogram.BadConstructionArguments",
+ static_cast<Sample>(HashMetricName(name)));
}
return check_okay;
@@ -575,9 +574,6 @@ bool Histogram::ValidateHistogramContents(bool crash_if_invalid,
// Abort if a problem is found (except "flags", which could legally be zero).
std::string debug_string = base::StringPrintf(
"%s/%" PRIu32 "#%d", histogram_name(), bad_fields, identifier);
-#if !defined(OS_NACL)
- base::debug::ScopedCrashKey crash_key("bad_histogram", debug_string);
-#endif
CHECK(false) << debug_string;
debug::Alias(&bad_fields);
return false;
diff --git a/chromium/base/metrics/histogram_flattener.h b/chromium/base/metrics/histogram_flattener.h
index 22d9a92d32b..6a5e3f42988 100644
--- a/chromium/base/metrics/histogram_flattener.h
+++ b/chromium/base/metrics/histogram_flattener.h
@@ -23,8 +23,8 @@ class BASE_EXPORT HistogramFlattener {
const HistogramSamples& snapshot) = 0;
protected:
- HistogramFlattener() {}
- virtual ~HistogramFlattener() {}
+ HistogramFlattener() = default;
+ virtual ~HistogramFlattener() = default;
private:
DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
diff --git a/chromium/base/metrics/histogram_functions.cc b/chromium/base/metrics/histogram_functions.cc
index 4c1a4b57ed2..47eec7dbbfd 100644
--- a/chromium/base/metrics/histogram_functions.cc
+++ b/chromium/base/metrics/histogram_functions.cc
@@ -6,13 +6,14 @@
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
+#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
namespace base {
void UmaHistogramBoolean(const std::string& name, bool sample) {
HistogramBase* histogram = BooleanHistogram::FactoryGet(
- name, base::HistogramBase::kUmaTargetedHistogramFlag);
+ name, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
@@ -100,4 +101,10 @@ void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
}
+void UmaHistogramSparse(const std::string& name, int sample) {
+ HistogramBase* histogram = SparseHistogram::FactoryGet(
+ name, HistogramBase::kUmaTargetedHistogramFlag);
+ histogram->Add(sample);
+}
+
} // namespace base
diff --git a/chromium/base/metrics/histogram_functions.h b/chromium/base/metrics/histogram_functions.h
index 46986283880..c9632fa26f0 100644
--- a/chromium/base/metrics/histogram_functions.h
+++ b/chromium/base/metrics/histogram_functions.h
@@ -110,6 +110,35 @@ BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
// Used to measure common MB-granularity memory stats. Range is up to ~64G.
BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
+// For recording sparse histograms.
+// The |sample| can be a negative or non-negative number.
+//
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a relatively large range, in cases where
+// ultra-fast performance is not critical. For instance, Sqlite.Version.* are
+// sparse because for any given database, there's going to be exactly one
+// version logged.
+//
+// Performance:
+// ------------
+// Sparse histograms are typically more memory-efficient but less time-efficient
+// than other histograms. Essentially, they sparse histograms use a map rather
+// than a vector for their backing storage; they also require lock acquisition
+// to increment a sample, whereas other histogram do not. Hence, each increment
+// operation is a bit slower than for other histograms. But, if the data is
+// sparse, then they use less memory client-side, because they allocate buckets
+// on demand rather than preallocating.
+//
+// Data size:
+// ----------
+// Note that server-side, we still need to load all buckets, across all users,
+// at once. Thus, please avoid exploding such histograms, i.e. uploading many
+// many distinct values to the server (across all users). Concretely, keep the
+// number of distinct values <= 100 ideally, definitely <= 1000. If you have no
+// guarantees on the range of your data, use clamping, e.g.:
+// UmaHistogramSparse("MyHistogram", ClampToRange(value, 0, 200));
+BASE_EXPORT void UmaHistogramSparse(const std::string& name, int sample);
+
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
diff --git a/chromium/base/metrics/histogram_functions_unittest.cc b/chromium/base/metrics/histogram_functions_unittest.cc
index 7bfd202e2dc..37206747572 100644
--- a/chromium/base/metrics/histogram_functions_unittest.cc
+++ b/chromium/base/metrics/histogram_functions_unittest.cc
@@ -7,6 +7,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/test/histogram_tester.h"
#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -17,7 +18,7 @@ enum UmaHistogramTestingEnum {
UMA_HISTOGRAM_TESTING_ENUM_THIRD
};
-TEST(HistogramFunctionsTest, HistogramExactLinear) {
+TEST(HistogramFunctionsTest, ExactLinear) {
std::string histogram("Testing.UMA.HistogramExactLinear");
HistogramTester tester;
UmaHistogramExactLinear(histogram, 10, 100);
@@ -37,7 +38,7 @@ TEST(HistogramFunctionsTest, HistogramExactLinear) {
tester.ExpectTotalCount(histogram, 5);
}
-TEST(HistogramFunctionsTest, HistogramEnumeration) {
+TEST(HistogramFunctionsTest, Enumeration) {
std::string histogram("Testing.UMA.HistogramEnumeration");
HistogramTester tester;
UmaHistogramEnumeration(histogram, UMA_HISTOGRAM_TESTING_ENUM_FIRST,
@@ -53,7 +54,7 @@ TEST(HistogramFunctionsTest, HistogramEnumeration) {
tester.ExpectTotalCount(histogram, 2);
}
-TEST(HistogramFunctionsTest, HistogramBoolean) {
+TEST(HistogramFunctionsTest, Boolean) {
std::string histogram("Testing.UMA.HistogramBoolean");
HistogramTester tester;
UmaHistogramBoolean(histogram, true);
@@ -63,7 +64,7 @@ TEST(HistogramFunctionsTest, HistogramBoolean) {
tester.ExpectTotalCount(histogram, 2);
}
-TEST(HistogramFunctionsTest, HistogramPercentage) {
+TEST(HistogramFunctionsTest, Percentage) {
std::string histogram("Testing.UMA.HistogramPercentage");
HistogramTester tester;
UmaHistogramPercentage(histogram, 50);
@@ -74,7 +75,7 @@ TEST(HistogramFunctionsTest, HistogramPercentage) {
tester.ExpectTotalCount(histogram, 2);
}
-TEST(HistogramFunctionsTest, HistogramCounts) {
+TEST(HistogramFunctionsTest, Counts) {
std::string histogram("Testing.UMA.HistogramCount.Custom");
HistogramTester tester;
UmaHistogramCustomCounts(histogram, 10, 1, 100, 10);
@@ -89,7 +90,7 @@ TEST(HistogramFunctionsTest, HistogramCounts) {
tester.ExpectTotalCount(histogram, 5);
}
-TEST(HistogramFunctionsTest, HistogramTimes) {
+TEST(HistogramFunctionsTest, Times) {
std::string histogram("Testing.UMA.HistogramTimes");
HistogramTester tester;
UmaHistogramTimes(histogram, TimeDelta::FromSeconds(1));
@@ -106,4 +107,21 @@ TEST(HistogramFunctionsTest, HistogramTimes) {
tester.ExpectTotalCount(histogram, 4);
}
+TEST(HistogramFunctionsTest, Sparse_SupportsLargeRange) {
+ std::string histogram("Testing.UMA.HistogramSparse");
+ HistogramTester tester;
+ UmaHistogramSparse(histogram, 0);
+ UmaHistogramSparse(histogram, 123456789);
+ UmaHistogramSparse(histogram, 123456789);
+ EXPECT_THAT(tester.GetAllSamples(histogram),
+ testing::ElementsAre(Bucket(0, 1), Bucket(123456789, 2)));
+}
+
+TEST(HistogramFunctionsTest, Sparse_SupportsNegativeValues) {
+ std::string histogram("Testing.UMA.HistogramSparse");
+ HistogramTester tester;
+ UmaHistogramSparse(histogram, -1);
+ tester.ExpectUniqueSample(histogram, -1, 1);
+}
+
} // namespace base.
diff --git a/chromium/base/metrics/histogram_macros.h b/chromium/base/metrics/histogram_macros.h
index 891d2a40bff..083bae753cb 100644
--- a/chromium/base/metrics/histogram_macros.h
+++ b/chromium/base/metrics/histogram_macros.h
@@ -249,32 +249,6 @@
base::HistogramBase::kUmaStabilityHistogramFlag)
//------------------------------------------------------------------------------
-// Sparse histograms.
-
-// Sparse histograms are well suited for recording counts of exact sample values
-// that are sparsely distributed over a large range.
-//
-// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and/or
-// infrequently recorded values since the implementation is slower
-// and takes more memory. For sparse data, sparse histograms have the advantage
-// of using less memory client-side, because they allocate buckets on demand
-// rather than preallocating. However, server-side, we still need to load all
-// buckets, across all users, at once.
-
-// Thus, please avoid exploding such histograms, i.e. uploading many many
-// distinct values to the server (across all users). Concretely, keep the number
-// of distinct values <= 100 at best, definitely <= 1000. If you have no
-// guarantees on the range of your data, use capping, e.g.:
-// UMA_HISTOGRAM_SPARSE_SLOWLY("MyHistogram",
-// std::max(0, std::min(200, value)));
-//
-// For instance, Sqlite.Version.* are sparse because for any given database,
-// there's going to be exactly one version logged.
-// The |sample| can be a negative or non-negative number.
-#define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
- INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample)
-
-//------------------------------------------------------------------------------
// Histogram instantiation helpers.
// Support a collection of histograms, perhaps one for each entry in an
diff --git a/chromium/base/metrics/histogram_macros_internal.h b/chromium/base/metrics/histogram_macros_internal.h
index 15e191712ac..84defae32f6 100644
--- a/chromium/base/metrics/histogram_macros_internal.h
+++ b/chromium/base/metrics/histogram_macros_internal.h
@@ -180,16 +180,4 @@
base::TimeTicks constructed_; \
} scoped_histogram_timer_##key
-// Macro for sparse histogram.
-// The implementation is more costly to add values to, and each value
-// stored has more overhead, compared to the other histogram types. However it
-// may be more efficient in memory if the total number of sample values is small
-// compared to the range of their values.
-#define INTERNAL_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
- do { \
- base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
- name, base::HistogramBase::kUmaTargetedHistogramFlag); \
- histogram->Add(sample); \
- } while (0)
-
#endif // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
diff --git a/chromium/base/metrics/histogram_samples.cc b/chromium/base/metrics/histogram_samples.cc
index 7703580538f..6830637c06d 100644
--- a/chromium/base/metrics/histogram_samples.cc
+++ b/chromium/base/metrics/histogram_samples.cc
@@ -7,6 +7,7 @@
#include <limits>
#include "base/compiler_specific.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
@@ -258,8 +259,8 @@ void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
MAX_NEGATIVE_SAMPLE_REASONS);
UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
1 << 30, 100);
- UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.NegativeSamples.Histogram",
- static_cast<int32_t>(id()));
+ UmaHistogramSparse("UMA.NegativeSamples.Histogram",
+ static_cast<int32_t>(id()));
}
SampleCountIterator::~SampleCountIterator() = default;
diff --git a/chromium/base/metrics/histogram_snapshot_manager.h b/chromium/base/metrics/histogram_snapshot_manager.h
index 51bf92c7b55..e2a404fa541 100644
--- a/chromium/base/metrics/histogram_snapshot_manager.h
+++ b/chromium/base/metrics/histogram_snapshot_manager.h
@@ -28,10 +28,10 @@ class HistogramFlattener;
// corruption, this class also validates as much redundancy as it can before
// calling for the marginal change (a.k.a., delta) in a histogram to be
// recorded.
-class BASE_EXPORT HistogramSnapshotManager {
+class BASE_EXPORT HistogramSnapshotManager final {
public:
explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
- virtual ~HistogramSnapshotManager();
+ ~HistogramSnapshotManager();
// Snapshot all histograms, and ask |histogram_flattener_| to record the
// delta. |flags_to_set| is used to set flags for each histogram.
@@ -39,15 +39,13 @@ class BASE_EXPORT HistogramSnapshotManager {
// Only histograms that have all the flags specified by the argument will be
// chosen. If all histograms should be recorded, set it to
// |Histogram::kNoFlags|.
- template <class ForwardHistogramIterator>
- void PrepareDeltas(ForwardHistogramIterator begin,
- ForwardHistogramIterator end,
+ void PrepareDeltas(const std::vector<HistogramBase*>& histograms,
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags) {
- for (ForwardHistogramIterator it = begin; it != end; ++it) {
- (*it)->SetFlags(flags_to_set);
- if (((*it)->flags() & required_flags) == required_flags)
- PrepareDelta(*it);
+ for (HistogramBase* const histogram : histograms) {
+ histogram->SetFlags(flags_to_set);
+ if ((histogram->flags() & required_flags) == required_flags)
+ PrepareDelta(histogram);
}
}
diff --git a/chromium/base/metrics/persistent_histogram_allocator.cc b/chromium/base/metrics/persistent_histogram_allocator.cc
index 79a903eb183..6178b21a48d 100644
--- a/chromium/base/metrics/persistent_histogram_allocator.cc
+++ b/chromium/base/metrics/persistent_histogram_allocator.cc
@@ -1090,9 +1090,6 @@ GlobalHistogramAllocator::GlobalHistogramAllocator(
std::unique_ptr<PersistentMemoryAllocator> memory)
: PersistentHistogramAllocator(std::move(memory)),
import_iterator_(this) {
- // Make sure the StatisticsRecorder is initialized to prevent duplicate
- // histograms from being created. It's safe to call this multiple times.
- StatisticsRecorder::Initialize();
}
void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
diff --git a/chromium/base/metrics/persistent_memory_allocator.cc b/chromium/base/metrics/persistent_memory_allocator.cc
index be107c39474..065c0282921 100644
--- a/chromium/base/metrics/persistent_memory_allocator.cc
+++ b/chromium/base/metrics/persistent_memory_allocator.cc
@@ -8,6 +8,7 @@
#include <algorithm>
#if defined(OS_WIN)
+#include <windows.h>
#include "winbase.h"
#elif defined(OS_POSIX)
#include <sys/mman.h>
@@ -16,7 +17,7 @@
#include "base/files/memory_mapped_file.h"
#include "base/logging.h"
#include "base/memory/shared_memory.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/sparse_histogram.h"
#include "base/numerics/safe_conversions.h"
#include "base/sys_info.h"
@@ -966,8 +967,8 @@ LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (address)
return Memory(address, MEM_VIRTUAL);
- UMA_HISTOGRAM_SPARSE_SLOWLY("UMA.LocalPersistentMemoryAllocator.Failures.Win",
- ::GetLastError());
+ UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Win",
+ ::GetLastError());
#elif defined(OS_POSIX)
// MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
// MAP_SHARED is not available on Linux <2.4 but required on Mac.
@@ -975,8 +976,8 @@ LocalPersistentMemoryAllocator::AllocateLocalMemory(size_t size) {
MAP_ANON | MAP_SHARED, -1, 0);
if (address != MAP_FAILED)
return Memory(address, MEM_VIRTUAL);
- UMA_HISTOGRAM_SPARSE_SLOWLY(
- "UMA.LocalPersistentMemoryAllocator.Failures.Posix", errno);
+ UmaHistogramSparse("UMA.LocalPersistentMemoryAllocator.Failures.Posix",
+ errno);
#else
#error This architecture is not (yet) supported.
#endif
diff --git a/chromium/base/metrics/record_histogram_checker.h b/chromium/base/metrics/record_histogram_checker.h
index 686bf676c63..75bc336d18e 100644
--- a/chromium/base/metrics/record_histogram_checker.h
+++ b/chromium/base/metrics/record_histogram_checker.h
@@ -15,7 +15,7 @@ namespace base {
// the given histogram should be recorded.
class BASE_EXPORT RecordHistogramChecker {
public:
- virtual ~RecordHistogramChecker() {}
+ virtual ~RecordHistogramChecker() = default;
// Returns true iff the given histogram should be recorded.
// This method may be called on any thread, so it should not mutate any state.
diff --git a/chromium/base/metrics/single_sample_metrics.h b/chromium/base/metrics/single_sample_metrics.h
index 6bfd7cb0db1..b966cb1ac5f 100644
--- a/chromium/base/metrics/single_sample_metrics.h
+++ b/chromium/base/metrics/single_sample_metrics.h
@@ -17,7 +17,7 @@ namespace base {
// and destroyed from the same thread as construction.
class BASE_EXPORT SingleSampleMetric {
public:
- virtual ~SingleSampleMetric() {}
+ virtual ~SingleSampleMetric() = default;
virtual void SetSample(HistogramBase::Sample sample) = 0;
};
@@ -34,7 +34,7 @@ class BASE_EXPORT SingleSampleMetric {
// base/metrics/histogram.h for full parameter definitions.
class BASE_EXPORT SingleSampleMetricsFactory {
public:
- virtual ~SingleSampleMetricsFactory() {}
+ virtual ~SingleSampleMetricsFactory() = default;
// Returns the factory provided by SetFactory(), or if no factory has been set
// a default factory will be provided (future calls to SetFactory() will fail
@@ -63,8 +63,8 @@ class BASE_EXPORT SingleSampleMetricsFactory {
class BASE_EXPORT DefaultSingleSampleMetricsFactory
: public SingleSampleMetricsFactory {
public:
- DefaultSingleSampleMetricsFactory() {}
- ~DefaultSingleSampleMetricsFactory() override {}
+ DefaultSingleSampleMetricsFactory() = default;
+ ~DefaultSingleSampleMetricsFactory() override = default;
// SingleSampleMetricsFactory:
std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index eeba150bf14..5f333e07eef 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -8,7 +8,7 @@
#include <string>
#include "base/metrics/histogram_base.h"
-#include "base/metrics/histogram_macros.h"
+#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
@@ -17,6 +17,7 @@
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
#include "base/strings/stringprintf.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -172,15 +173,15 @@ TEST_P(SparseHistogramTest, AddCount_LargeCountsDontOverflow) {
}
TEST_P(SparseHistogramTest, MacroBasicTest) {
- UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
- UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
- UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
+ UmaHistogramSparse("Sparse", 100);
+ UmaHistogramSparse("Sparse", 200);
+ UmaHistogramSparse("Sparse", 100);
- StatisticsRecorder::Histograms histograms;
- StatisticsRecorder::GetHistograms(&histograms);
+ const StatisticsRecorder::Histograms histograms =
+ StatisticsRecorder::GetHistograms();
- ASSERT_EQ(1U, histograms.size());
- HistogramBase* sparse_histogram = histograms[0];
+ ASSERT_THAT(histograms, testing::SizeIs(1));
+ const HistogramBase* const sparse_histogram = histograms[0];
EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
EXPECT_EQ("Sparse", StringPiece(sparse_histogram->histogram_name()));
@@ -201,18 +202,14 @@ TEST_P(SparseHistogramTest, MacroInLoopTest) {
// Unlike the macros in histogram.h, SparseHistogram macros can have a
// variable as histogram name.
for (int i = 0; i < 2; i++) {
- std::string name = StringPrintf("Sparse%d", i + 1);
- UMA_HISTOGRAM_SPARSE_SLOWLY(name, 100);
+ UmaHistogramSparse(StringPrintf("Sparse%d", i), 100);
}
- StatisticsRecorder::Histograms histograms;
- StatisticsRecorder::GetHistograms(&histograms);
- ASSERT_EQ(2U, histograms.size());
-
- std::string name1 = histograms[0]->histogram_name();
- std::string name2 = histograms[1]->histogram_name();
- EXPECT_TRUE(("Sparse1" == name1 && "Sparse2" == name2) ||
- ("Sparse2" == name1 && "Sparse1" == name2));
+ const StatisticsRecorder::Histograms histograms =
+ StatisticsRecorder::GetHistograms();
+ ASSERT_THAT(histograms, testing::SizeIs(2));
+ EXPECT_STREQ(histograms[0]->histogram_name(), "Sparse0");
+ EXPECT_STREQ(histograms[1]->histogram_name(), "Sparse1");
}
TEST_P(SparseHistogramTest, Serialize) {
diff --git a/chromium/base/metrics/statistics_recorder.cc b/chromium/base/metrics/statistics_recorder.cc
index a21adc09a0c..60e430eb72a 100644
--- a/chromium/base/metrics/statistics_recorder.cc
+++ b/chromium/base/metrics/statistics_recorder.cc
@@ -20,184 +20,138 @@
#include "base/strings/stringprintf.h"
#include "base/values.h"
+namespace base {
namespace {
-// Initialize histogram statistics gathering system.
-base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
- LAZY_INSTANCE_INITIALIZER;
-
bool HistogramNameLesser(const base::HistogramBase* a,
const base::HistogramBase* b) {
- return a->histogram_name() < b->histogram_name();
+ return strcmp(a->histogram_name(), b->histogram_name()) < 0;
}
} // namespace
-namespace base {
+// static
+LazyInstance<Lock>::Leaky StatisticsRecorder::lock_;
-StatisticsRecorder::~StatisticsRecorder() {
- DCHECK(histograms_);
- DCHECK(ranges_);
+// static
+StatisticsRecorder* StatisticsRecorder::top_ = nullptr;
- // Clean out what this object created and then restore what existed before.
- Reset();
- base::AutoLock auto_lock(lock_.Get());
- histograms_ = existing_histograms_.release();
- callbacks_ = existing_callbacks_.release();
- ranges_ = existing_ranges_.release();
- providers_ = existing_providers_.release();
- record_checker_ = existing_record_checker_.release();
+// static
+bool StatisticsRecorder::is_vlog_initialized_ = false;
+
+size_t StatisticsRecorder::BucketRangesHash::operator()(
+ const BucketRanges* const a) const {
+ return a->checksum();
}
-// static
-void StatisticsRecorder::Initialize() {
- // Tests sometimes create local StatisticsRecorders in order to provide a
- // contained environment of histograms that can be later discarded. If a
- // true global instance gets created in this environment then it will
- // eventually get disconnected when the local instance destructs and
- // restores the previous state, resulting in no StatisticsRecorder at all.
- // The global lazy instance, however, will remain valid thus ensuring that
- // another never gets installed via this method. If a |histograms_| map
- // exists then assume the StatisticsRecorder is already "initialized".
- if (histograms_)
- return;
+bool StatisticsRecorder::BucketRangesEqual::operator()(
+ const BucketRanges* const a,
+ const BucketRanges* const b) const {
+ return a->Equals(b);
+}
- // Ensure that an instance of the StatisticsRecorder object is created.
- g_statistics_recorder_.Get();
+StatisticsRecorder::~StatisticsRecorder() {
+ const AutoLock auto_lock(lock_.Get());
+ DCHECK_EQ(this, top_);
+ top_ = previous_;
}
// static
-bool StatisticsRecorder::IsActive() {
- base::AutoLock auto_lock(lock_.Get());
- return histograms_ != nullptr;
+void StatisticsRecorder::EnsureGlobalRecorderWhileLocked() {
+ lock_.Get().AssertAcquired();
+ if (top_)
+ return;
+
+ const StatisticsRecorder* const p = new StatisticsRecorder;
+ // The global recorder is never deleted.
+ ANNOTATE_LEAKING_OBJECT_PTR(p);
+ DCHECK_EQ(p, top_);
}
// static
void StatisticsRecorder::RegisterHistogramProvider(
const WeakPtr<HistogramProvider>& provider) {
- providers_->push_back(provider);
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ top_->providers_.push_back(provider);
}
// static
HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
HistogramBase* histogram) {
- HistogramBase* histogram_to_delete = nullptr;
- HistogramBase* histogram_to_return = nullptr;
- {
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_) {
- histogram_to_return = histogram;
-
- // As per crbug.com/79322 the histograms are intentionally leaked, so we
- // need to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used
- // only once for an object, the duplicates should not be annotated.
- // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
- // twice |if (!histograms_)|.
- ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
- } else {
- const char* name = histogram->histogram_name();
- StringPiece name_piece(name);
- HistogramMap::iterator it = histograms_->find(name_piece);
- if (histograms_->end() == it) {
- // |name_piece| is guaranteed to never change or be deallocated so long
- // as the histogram is alive (which is forever).
- (*histograms_)[name_piece] = histogram;
- ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
- // If there are callbacks for this histogram, we set the kCallbackExists
- // flag.
- auto callback_iterator = callbacks_->find(name);
- if (callback_iterator != callbacks_->end()) {
- if (!callback_iterator->second.is_null())
- histogram->SetFlags(HistogramBase::kCallbackExists);
- else
- histogram->ClearFlags(HistogramBase::kCallbackExists);
- }
- histogram_to_return = histogram;
- } else if (histogram == it->second) {
- // The histogram was registered before.
- histogram_to_return = histogram;
- } else {
- // We already have one histogram with this name.
- DCHECK_EQ(StringPiece(histogram->histogram_name()),
- StringPiece(it->second->histogram_name()))
- << "hash collision";
- histogram_to_return = it->second;
- histogram_to_delete = histogram;
- }
+ // Declared before |auto_lock| to ensure correct destruction order.
+ std::unique_ptr<HistogramBase> histogram_deleter;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+
+ const char* const name = histogram->histogram_name();
+ HistogramBase*& registered = top_->histograms_[name];
+
+ if (!registered) {
+ // |name| is guaranteed to never change or be deallocated so long
+ // as the histogram is alive (which is forever).
+ registered = histogram;
+ ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
+ // If there are callbacks for this histogram, we set the kCallbackExists
+ // flag.
+ const auto callback_iterator = top_->callbacks_.find(name);
+ if (callback_iterator != top_->callbacks_.end()) {
+ if (!callback_iterator->second.is_null())
+ histogram->SetFlags(HistogramBase::kCallbackExists);
+ else
+ histogram->ClearFlags(HistogramBase::kCallbackExists);
}
+ return histogram;
+ }
+
+ if (histogram == registered) {
+ // The histogram was registered before.
+ return histogram;
}
- delete histogram_to_delete;
- return histogram_to_return;
+
+ // We already have one histogram with this name.
+ histogram_deleter.reset(histogram);
+ return registered;
}
// static
const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
const BucketRanges* ranges) {
DCHECK(ranges->HasValidChecksum());
+
+ // Declared before |auto_lock| to ensure correct destruction order.
std::unique_ptr<const BucketRanges> ranges_deleter;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
- base::AutoLock auto_lock(lock_.Get());
- if (!ranges_) {
+ const BucketRanges* const registered = *top_->ranges_.insert(ranges).first;
+ if (registered == ranges) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
- return ranges;
- }
-
- std::list<const BucketRanges*>* checksum_matching_list;
- RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
- if (ranges_->end() == ranges_it) {
- // Add a new matching list to map.
- checksum_matching_list = new std::list<const BucketRanges*>();
- ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
- (*ranges_)[ranges->checksum()] = checksum_matching_list;
} else {
- checksum_matching_list = ranges_it->second;
+ ranges_deleter.reset(ranges);
}
- for (const BucketRanges* existing_ranges : *checksum_matching_list) {
- if (existing_ranges->Equals(ranges)) {
- if (existing_ranges == ranges) {
- return ranges;
- } else {
- ranges_deleter.reset(ranges);
- return existing_ranges;
- }
- }
- }
- // We haven't found a BucketRanges which has the same ranges. Register the
- // new BucketRanges.
- checksum_matching_list->push_front(ranges);
- return ranges;
+ return registered;
}
// static
void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
std::string* output) {
- if (!IsActive())
- return;
-
- Histograms snapshot;
- GetSnapshot(query, &snapshot);
- std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
- for (const HistogramBase* histogram : snapshot) {
+ for (const HistogramBase* const histogram : GetSnapshot(query)) {
histogram->WriteHTMLGraph(output);
- output->append("<br><hr><br>");
+ *output += "<br><hr><br>";
}
}
// static
void StatisticsRecorder::WriteGraph(const std::string& query,
std::string* output) {
- if (!IsActive())
- return;
if (query.length())
StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
else
output->append("Collections of all histograms\n");
- Histograms snapshot;
- GetSnapshot(query, &snapshot);
- std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
- for (const HistogramBase* histogram : snapshot) {
+ for (const HistogramBase* const histogram : GetSnapshot(query)) {
histogram->WriteAscii(output);
output->append("\n");
}
@@ -205,19 +159,11 @@ void StatisticsRecorder::WriteGraph(const std::string& query,
// static
std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
- if (!IsActive())
- return std::string();
-
- std::string output("{");
- Histograms snapshot;
- GetSnapshot(std::string(), &snapshot);
- output += "\"histograms\":[";
- bool first_histogram = true;
- for (const HistogramBase* histogram : snapshot) {
- if (first_histogram)
- first_histogram = false;
- else
- output += ",";
+ std::string output = "{\"histograms\":[";
+ const char* sep = "";
+ for (const HistogramBase* const histogram : GetHistograms()) {
+ output += sep;
+ sep = ",";
std::string json;
histogram->WriteJSON(&json, verbosity_level);
output += json;
@@ -227,28 +173,13 @@ std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
}
// static
-void StatisticsRecorder::GetHistograms(Histograms* output) {
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return;
-
- for (const auto& entry : *histograms_) {
- output->push_back(entry.second);
- }
-}
-
-// static
-void StatisticsRecorder::GetBucketRanges(
- std::vector<const BucketRanges*>* output) {
- base::AutoLock auto_lock(lock_.Get());
- if (!ranges_)
- return;
-
- for (const auto& entry : *ranges_) {
- for (auto* range_entry : *entry.second) {
- output->push_back(range_entry);
- }
- }
+std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
+ std::vector<const BucketRanges*> out;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ out.reserve(top_->ranges_.size());
+ out.assign(top_->ranges_.begin(), top_->ranges_.end());
+ return out;
}
// static
@@ -258,23 +189,25 @@ HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
// will acquire the lock at that time.
ImportGlobalPersistentHistograms();
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return nullptr;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
- HistogramMap::iterator it = histograms_->find(name);
- if (histograms_->end() == it)
- return nullptr;
- return it->second;
+ const HistogramMap::const_iterator it = top_->histograms_.find(name);
+ return it != top_->histograms_.end() ? it->second : nullptr;
}
// static
-void StatisticsRecorder::ImportProvidedHistograms() {
- if (!providers_)
- return;
+StatisticsRecorder::HistogramProviders
+StatisticsRecorder::GetHistogramProviders() {
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ return top_->providers_;
+}
+// static
+void StatisticsRecorder::ImportProvidedHistograms() {
// Merge histogram data from each provider in turn.
- for (const WeakPtr<HistogramProvider>& provider : *providers_) {
+ for (const WeakPtr<HistogramProvider>& provider : GetHistogramProviders()) {
// Weak-pointer may be invalid if the provider was destructed, though they
// generally never are.
if (provider)
@@ -288,42 +221,14 @@ void StatisticsRecorder::PrepareDeltas(
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags,
HistogramSnapshotManager* snapshot_manager) {
- if (include_persistent)
- ImportGlobalPersistentHistograms();
-
- auto known = GetKnownHistograms(include_persistent);
- snapshot_manager->PrepareDeltas(known.begin(), known.end(), flags_to_set,
- required_flags);
+ snapshot_manager->PrepareDeltas(GetKnownHistograms(include_persistent),
+ flags_to_set, required_flags);
}
// static
void StatisticsRecorder::InitLogOnShutdown() {
- if (!histograms_)
- return;
-
- base::AutoLock auto_lock(lock_.Get());
- g_statistics_recorder_.Get().InitLogOnShutdownWithoutLock();
-}
-
-// static
-void StatisticsRecorder::GetSnapshot(const std::string& query,
- Histograms* snapshot) {
- // This must be called *before* the lock is acquired below because it will
- // call back into this object to register histograms. Those called methods
- // will acquire the lock at that time.
- ImportGlobalPersistentHistograms();
-
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return;
-
- // Need a c-string query for comparisons against c-string histogram name.
- const char* query_string = query.c_str();
-
- for (const auto& entry : *histograms_) {
- if (strstr(entry.second->histogram_name(), query_string) != nullptr)
- snapshot->push_back(entry.second);
- }
+ const AutoLock auto_lock(lock_.Get());
+ InitLogOnShutdownWhileLocked();
}
// static
@@ -331,16 +236,14 @@ bool StatisticsRecorder::SetCallback(
const std::string& name,
const StatisticsRecorder::OnSampleCallback& cb) {
DCHECK(!cb.is_null());
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return false;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
- if (ContainsKey(*callbacks_, name))
+ if (!top_->callbacks_.insert({name, cb}).second)
return false;
- callbacks_->insert(std::make_pair(name, cb));
- auto it = histograms_->find(name);
- if (it != histograms_->end())
+ const HistogramMap::const_iterator it = top_->histograms_.find(name);
+ if (it != top_->histograms_.end())
it->second->SetFlags(HistogramBase::kCallbackExists);
return true;
@@ -348,199 +251,162 @@ bool StatisticsRecorder::SetCallback(
// static
void StatisticsRecorder::ClearCallback(const std::string& name) {
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
- callbacks_->erase(name);
+ top_->callbacks_.erase(name);
// We also clear the flag from the histogram (if it exists).
- auto it = histograms_->find(name);
- if (it != histograms_->end())
+ const HistogramMap::const_iterator it = top_->histograms_.find(name);
+ if (it != top_->histograms_.end())
it->second->ClearFlags(HistogramBase::kCallbackExists);
}
// static
StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
const std::string& name) {
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return OnSampleCallback();
-
- auto callback_iterator = callbacks_->find(name);
- return callback_iterator != callbacks_->end() ? callback_iterator->second
- : OnSampleCallback();
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ const auto it = top_->callbacks_.find(name);
+ return it != top_->callbacks_.end() ? it->second : OnSampleCallback();
}
// static
size_t StatisticsRecorder::GetHistogramCount() {
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_)
- return 0;
- return histograms_->size();
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ return top_->histograms_.size();
}
// static
void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
- if (!histograms_)
- return;
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
- HistogramMap::iterator found = histograms_->find(name);
- if (found == histograms_->end())
+ const HistogramMap::iterator found = top_->histograms_.find(name);
+ if (found == top_->histograms_.end())
return;
- HistogramBase* base = found->second;
+ HistogramBase* const base = found->second;
if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
// When forgetting a histogram, it's likely that other information is
// also becoming invalid. Clear the persistent reference that may no
// longer be valid. There's no danger in this as, at worst, duplicates
// will be created in persistent memory.
- Histogram* histogram = static_cast<Histogram*>(base);
- histogram->bucket_ranges()->set_persistent_reference(0);
+ static_cast<Histogram*>(base)->bucket_ranges()->set_persistent_reference(0);
}
- histograms_->erase(found);
+ top_->histograms_.erase(found);
}
// static
std::unique_ptr<StatisticsRecorder>
StatisticsRecorder::CreateTemporaryForTesting() {
+ const AutoLock auto_lock(lock_.Get());
return WrapUnique(new StatisticsRecorder());
}
// static
-void StatisticsRecorder::UninitializeForTesting() {
- // Stop now if it's never been initialized.
- if (!histograms_)
- return;
-
- // Get the global instance and destruct it. It's held in static memory so
- // can't "delete" it; call the destructor explicitly.
- DCHECK(g_statistics_recorder_.private_instance_);
- g_statistics_recorder_.Get().~StatisticsRecorder();
-
- // Now the ugly part. There's no official way to release a LazyInstance once
- // created so it's necessary to clear out an internal variable which
- // shouldn't be publicly visible but is for initialization reasons.
- g_statistics_recorder_.private_instance_ = 0;
-}
-
-// static
void StatisticsRecorder::SetRecordChecker(
std::unique_ptr<RecordHistogramChecker> record_checker) {
- record_checker_ = record_checker.release();
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ top_->record_checker_ = std::move(record_checker);
}
// static
bool StatisticsRecorder::ShouldRecordHistogram(uint64_t histogram_hash) {
- return !record_checker_ || record_checker_->ShouldRecord(histogram_hash);
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ return !top_->record_checker_ ||
+ top_->record_checker_->ShouldRecord(histogram_hash);
}
// static
-std::vector<HistogramBase*> StatisticsRecorder::GetKnownHistograms(
- bool include_persistent) {
- std::vector<HistogramBase*> known;
- base::AutoLock auto_lock(lock_.Get());
- if (!histograms_ || histograms_->empty())
- return known;
-
- known.reserve(histograms_->size());
- for (const auto& h : *histograms_) {
- if (!include_persistent &&
- (h.second->flags() & HistogramBase::kIsPersistent)) {
- continue;
+template <typename Predicate>
+StatisticsRecorder::Histograms StatisticsRecorder::GetHistogramsWithPredicate(
+ const Predicate predicate) {
+ // This must be called *before* the lock is acquired below because it will
+ // call back into this object to register histograms. Those called methods
+ // will acquire the lock at that time.
+ ImportGlobalPersistentHistograms();
+
+ Histograms out;
+
+ {
+ const AutoLock auto_lock(lock_.Get());
+ EnsureGlobalRecorderWhileLocked();
+ out.reserve(top_->histograms_.size());
+ for (const auto& entry : top_->histograms_) {
+ const HistogramBase* const histogram = entry.second;
+ DCHECK(histogram);
+ if (predicate(*histogram))
+ out.push_back(entry.second);
}
- known.push_back(h.second);
}
- return known;
+ std::sort(out.begin(), out.end(), &HistogramNameLesser);
+ return out;
}
// static
-void StatisticsRecorder::ImportGlobalPersistentHistograms() {
- if (!histograms_)
- return;
-
- // Import histograms from known persistent storage. Histograms could have
- // been added by other processes and they must be fetched and recognized
- // locally. If the persistent memory segment is not shared between processes,
- // this call does nothing.
- GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
- if (allocator)
- allocator->ImportHistogramsToStatisticsRecorder();
+StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms() {
+ return GetHistogramsWithPredicate([](const HistogramBase&) { return true; });
}
-// This singleton instance should be started during the single threaded portion
-// of main(), and hence it is not thread safe. It initializes globals to
-// provide support for all future calls.
-StatisticsRecorder::StatisticsRecorder() {
- base::AutoLock auto_lock(lock_.Get());
-
- existing_histograms_.reset(histograms_);
- existing_callbacks_.reset(callbacks_);
- existing_ranges_.reset(ranges_);
- existing_providers_.reset(providers_);
- existing_record_checker_.reset(record_checker_);
-
- histograms_ = new HistogramMap;
- callbacks_ = new CallbackMap;
- ranges_ = new RangesMap;
- providers_ = new HistogramProviders;
- record_checker_ = nullptr;
-
- InitLogOnShutdownWithoutLock();
-}
-
-void StatisticsRecorder::InitLogOnShutdownWithoutLock() {
- if (!vlog_initialized_ && VLOG_IS_ON(1)) {
- vlog_initialized_ = true;
- AtExitManager::RegisterCallback(&DumpHistogramsToVlog, this);
- }
+// static
+StatisticsRecorder::Histograms StatisticsRecorder::GetKnownHistograms(
+ bool include_persistent) {
+ return GetHistogramsWithPredicate(
+ [include_persistent](const HistogramBase& histogram) {
+ return include_persistent ||
+ (histogram.flags() & HistogramBase::kIsPersistent) == 0;
+ });
}
// static
-void StatisticsRecorder::Reset() {
- std::unique_ptr<HistogramMap> histograms_deleter;
- std::unique_ptr<CallbackMap> callbacks_deleter;
- std::unique_ptr<RangesMap> ranges_deleter;
- std::unique_ptr<HistogramProviders> providers_deleter;
- std::unique_ptr<RecordHistogramChecker> record_checker_deleter;
- {
- base::AutoLock auto_lock(lock_.Get());
- histograms_deleter.reset(histograms_);
- callbacks_deleter.reset(callbacks_);
- ranges_deleter.reset(ranges_);
- providers_deleter.reset(providers_);
- record_checker_deleter.reset(record_checker_);
- histograms_ = nullptr;
- callbacks_ = nullptr;
- ranges_ = nullptr;
- providers_ = nullptr;
- record_checker_ = nullptr;
- }
- // We are going to leak the histograms and the ranges.
+StatisticsRecorder::Histograms StatisticsRecorder::GetSnapshot(
+ const std::string& query) {
+ // Need a C-string query for comparisons against C-string histogram name.
+ const char* const query_string = query.c_str();
+ return GetHistogramsWithPredicate(
+ [query_string](const HistogramBase& histogram) {
+ return strstr(histogram.histogram_name(), query_string) != nullptr;
+ });
}
// static
-void StatisticsRecorder::DumpHistogramsToVlog(void* instance) {
- std::string output;
- StatisticsRecorder::WriteGraph(std::string(), &output);
- VLOG(1) << output;
+void StatisticsRecorder::ImportGlobalPersistentHistograms() {
+ // Import histograms from known persistent storage. Histograms could have been
+ // added by other processes and they must be fetched and recognized locally.
+ // If the persistent memory segment is not shared between processes, this call
+ // does nothing.
+ if (GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get())
+ allocator->ImportHistogramsToStatisticsRecorder();
}
-
-// static
-StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = nullptr;
-// static
-StatisticsRecorder::CallbackMap* StatisticsRecorder::callbacks_ = nullptr;
-// static
-StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = nullptr;
-// static
-StatisticsRecorder::HistogramProviders* StatisticsRecorder::providers_;
-// static
-RecordHistogramChecker* StatisticsRecorder::record_checker_ = nullptr;
-// static
-base::LazyInstance<base::Lock>::Leaky StatisticsRecorder::lock_ =
- LAZY_INSTANCE_INITIALIZER;
+// This singleton instance should be started during the single threaded portion
+// of main(), and hence it is not thread safe. It initializes globals to provide
+// support for all future calls.
+StatisticsRecorder::StatisticsRecorder() {
+ lock_.Get().AssertAcquired();
+ previous_ = top_;
+ top_ = this;
+ InitLogOnShutdownWhileLocked();
+}
+
+// static
+void StatisticsRecorder::InitLogOnShutdownWhileLocked() {
+ lock_.Get().AssertAcquired();
+ if (!is_vlog_initialized_ && VLOG_IS_ON(1)) {
+ is_vlog_initialized_ = true;
+ const auto dump_to_vlog = [](void*) {
+ std::string output;
+ WriteGraph("", &output);
+ VLOG(1) << output;
+ };
+ AtExitManager::RegisterCallback(dump_to_vlog, nullptr);
+ }
+}
} // namespace base
diff --git a/chromium/base/metrics/statistics_recorder.h b/chromium/base/metrics/statistics_recorder.h
index 49ccaf51917..c994b7e257d 100644
--- a/chromium/base/metrics/statistics_recorder.h
+++ b/chromium/base/metrics/statistics_recorder.h
@@ -12,10 +12,10 @@
#include <stdint.h>
-#include <list>
-#include <map>
#include <memory>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include <vector>
#include "base/base_export.h"
@@ -34,37 +34,19 @@ namespace base {
class BucketRanges;
class HistogramSnapshotManager;
+// In-memory recorder of usage statistics (aka metrics, aka histograms).
+//
+// All the public methods are static and act on a global recorder. This global
+// recorder is internally synchronized and all the static methods are thread
+// safe.
+//
+// StatisticsRecorder doesn't have any public constructor. For testing purpose,
+// you can create a temporary recorder using the factory method
+// CreateTemporaryForTesting(). This temporary recorder becomes the global one
+// until deleted. When this temporary recorder is deleted, it restores the
+// previous global one.
class BASE_EXPORT StatisticsRecorder {
public:
- // A class used as a key for the histogram map below. It always references
- // a string owned outside of this class, likely in the value of the map.
- class StringKey : public StringPiece {
- public:
- // Constructs the StringKey using various sources. The source must live
- // at least as long as the created object.
- StringKey(const std::string& str) : StringPiece(str) {}
- StringKey(StringPiece str) : StringPiece(str) {}
-
- // Though StringPiece is better passed by value than by reference, in
- // this case it's being passed many times and likely already been stored
- // in memory (not just registers) so the benefit of pass-by-value is
- // negated.
- bool operator<(const StringKey& rhs) const {
- // Since order is unimportant in the map and string comparisons can be
- // slow, use the length as the primary sort value.
- if (length() < rhs.length())
- return true;
- if (length() > rhs.length())
- return false;
-
- // Fall back to an actual string comparison. The lengths are the same
- // so a simple memory-compare is sufficient. This is slightly more
- // efficient than calling operator<() for StringPiece which would
- // again have to check lengths before calling wordmemcmp().
- return wordmemcmp(data(), rhs.data(), length()) < 0;
- }
- };
-
// An interface class that allows the StatisticsRecorder to forcibly merge
// histograms from providers when necessary.
class HistogramProvider {
@@ -73,58 +55,82 @@ class BASE_EXPORT StatisticsRecorder {
virtual void MergeHistogramDeltas() = 0;
};
- typedef std::map<StringKey, HistogramBase*> HistogramMap;
typedef std::vector<HistogramBase*> Histograms;
- typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
+ // Restores the previous global recorder.
+ //
+ // When several temporary recorders are created using
+ // CreateTemporaryForTesting(), these recorders must be deleted in reverse
+ // order of creation.
+ //
+ // This method is thread safe.
+ //
+ // Precondition: The recorder being deleted is the current global recorder.
~StatisticsRecorder();
- // Initializes the StatisticsRecorder system. Safe to call multiple times.
- static void Initialize();
-
- // Find out if histograms can now be registered into our list.
- static bool IsActive();
-
- // Register a provider of histograms that can be called to merge those into
- // the global StatisticsRecorder. Calls to ImportProvidedHistograms() will
- // fetch from registered providers.
+ // Registers a provider of histograms that can be called to merge those into
+ // the global recorder. Calls to ImportProvidedHistograms() will fetch from
+ // registered providers.
+ //
+ // This method is thread safe.
static void RegisterHistogramProvider(
const WeakPtr<HistogramProvider>& provider);
- // Register, or add a new histogram to the collection of statistics. If an
+ // Registers or adds a new histogram to the collection of statistics. If an
// identically named histogram is already registered, then the argument
- // |histogram| will deleted. The returned value is always the registered
+ // |histogram| will be deleted. The returned value is always the registered
// histogram (either the argument, or the pre-existing registered histogram).
+ //
+ // This method is thread safe.
static HistogramBase* RegisterOrDeleteDuplicate(HistogramBase* histogram);
- // Register, or add a new BucketRanges. If an identically BucketRanges is
- // already registered, then the argument |ranges| will deleted. The returned
- // value is always the registered BucketRanges (either the argument, or the
- // pre-existing one).
+ // Registers or adds a new BucketRanges. If an equivalent BucketRanges is
+ // already registered, then the argument |ranges| will be deleted. The
+ // returned value is always the registered BucketRanges (either the argument,
+ // or the pre-existing one).
+ //
+ // This method is thread safe.
static const BucketRanges* RegisterOrDeleteDuplicateRanges(
const BucketRanges* ranges);
// Methods for appending histogram data to a string. Only histograms which
// have |query| as a substring are written to |output| (an empty string will
// process all registered histograms).
+ //
+ // These methods are thread safe.
static void WriteHTMLGraph(const std::string& query, std::string* output);
static void WriteGraph(const std::string& query, std::string* output);
// Returns the histograms with |verbosity_level| as the serialization
// verbosity.
+ //
+ // This method is thread safe.
static std::string ToJSON(JSONVerbosityLevel verbosity_level);
- // Method for extracting histograms which were marked for use by UMA.
- static void GetHistograms(Histograms* output);
-
- // Method for extracting BucketRanges used by all histograms registered.
- static void GetBucketRanges(std::vector<const BucketRanges*>* output);
-
- // Find a histogram by name. It matches the exact name. This method is thread
- // safe. It returns NULL if a matching histogram is not found.
+ // Gets existing histograms.
+ //
+ // The returned histograms are sorted by name.
+ //
+ // Ownership of the individual histograms remains with the StatisticsRecorder.
+ //
+ // This method is thread safe.
+ static Histograms GetHistograms();
+
+ // Gets BucketRanges used by all histograms registered. The order of returned
+ // BucketRanges is not guaranteed.
+ //
+ // This method is thread safe.
+ static std::vector<const BucketRanges*> GetBucketRanges();
+
+ // Finds a histogram by name. Matches the exact name. Returns a null pointer
+ // if a matching histogram is not found.
+ //
+ // This method is thread safe.
static HistogramBase* FindHistogram(base::StringPiece name);
- // Imports histograms from providers. This must be called on the UI thread.
+ // Imports histograms from providers.
+ //
+ // This method must be called on the UI thread.
static void ImportProvidedHistograms();
// Snapshots all histograms via |snapshot_manager|. |flags_to_set| is used to
@@ -137,128 +143,180 @@ class BASE_EXPORT StatisticsRecorder {
HistogramBase::Flags required_flags,
HistogramSnapshotManager* snapshot_manager);
- // GetSnapshot copies some of the pointers to registered histograms into the
- // caller supplied vector (Histograms). Only histograms which have |query| as
- // a substring are copied (an empty string will process all registered
- // histograms).
- static void GetSnapshot(const std::string& query, Histograms* snapshot);
+ // Gets registered histograms. Only histograms which have |query| as a
+ // substring in their name are extracted. An empty query returns all
+ // registered histograms.
+ //
+ // The returned histograms are sorted by name.
+ //
+ // Ownership of the individual histograms remains with the StatisticsRecorder.
+ //
+ // This method is thread safe.
+ static Histograms GetSnapshot(const std::string& query);
typedef base::Callback<void(HistogramBase::Sample)> OnSampleCallback;
- // SetCallback sets the callback to notify when a new sample is recorded on
- // the histogram referred to by |histogram_name|. The call to this method can
- // be be done before or after the histogram is created. This method is thread
- // safe. The return value is whether or not the callback was successfully set.
+ // Sets the callback to notify when a new sample is recorded on the histogram
+ // referred to by |histogram_name|. Can be called before or after the
+ // histogram is created. Returns whether the callback was successfully set.
+ //
+ // This method is thread safe.
static bool SetCallback(const std::string& histogram_name,
const OnSampleCallback& callback);
- // ClearCallback clears any callback set on the histogram referred to by
- // |histogram_name|. This method is thread safe.
+ // Clears any callback set on the histogram referred to by |histogram_name|.
+ //
+ // This method is thread safe.
static void ClearCallback(const std::string& histogram_name);
- // FindCallback retrieves the callback for the histogram referred to by
- // |histogram_name|, or a null callback if no callback exists for this
- // histogram. This method is thread safe.
+ // Retrieves the callback for the histogram referred to by |histogram_name|,
+ // or a null callback if no callback exists for this histogram.
+ //
+ // This method is thread safe.
static OnSampleCallback FindCallback(const std::string& histogram_name);
// Returns the number of known histograms.
+ //
+ // This method is thread safe.
static size_t GetHistogramCount();
// Initializes logging histograms with --v=1. Safe to call multiple times.
// Is called from ctor but for browser it seems that it is more useful to
// start logging after statistics recorder, so we need to init log-on-shutdown
// later.
+ //
+ // This method is thread safe.
static void InitLogOnShutdown();
// Removes a histogram from the internal set of known ones. This can be
// necessary during testing persistent histograms where the underlying
// memory is being released.
+ //
+ // This method is thread safe.
static void ForgetHistogramForTesting(base::StringPiece name);
- // Creates a local StatisticsRecorder object for testing purposes. All new
- // histograms will be registered in it until it is destructed or pushed
- // aside for the lifetime of yet another SR object. The destruction of the
- // returned object will re-activate the previous one. Always release SR
- // objects in the opposite order to which they're created.
+ // Creates a temporary StatisticsRecorder object for testing purposes. All new
+ // histograms will be registered in it until it is destructed or pushed aside
+ // for the lifetime of yet another StatisticsRecorder object. The destruction
+ // of the returned object will re-activate the previous one.
+ // StatisticsRecorder objects must be deleted in the opposite order to which
+ // they're created.
+ //
+ // This method is thread safe.
static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
WARN_UNUSED_RESULT;
- // Resets any global instance of the statistics-recorder that was created
- // by a call to Initialize().
- static void UninitializeForTesting();
-
// Sets the record checker for determining if a histogram should be recorded.
// Record checker doesn't affect any already recorded histograms, so this
// method must be called very early, before any threads have started.
// Record checker methods can be called on any thread, so they shouldn't
// mutate any state.
+ //
// TODO(iburak): This is not yet hooked up to histogram recording
// infrastructure.
static void SetRecordChecker(
std::unique_ptr<RecordHistogramChecker> record_checker);
- // Returns true iff the given histogram should be recorded based on
- // the ShouldRecord() method of the record checker.
- // If the record checker is not set, returns true.
+ // Checks if the given histogram should be recorded based on the
+ // ShouldRecord() method of the record checker. If the record checker is not
+ // set, returns true.
+ //
+ // This method is thread safe.
static bool ShouldRecordHistogram(uint64_t histogram_hash);
private:
+ typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
+
+ typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
+ HistogramMap;
+
// We keep a map of callbacks to histograms, so that as histograms are
// created, we can set the callback properly.
- typedef std::map<std::string, OnSampleCallback> CallbackMap;
+ typedef std::unordered_map<std::string, OnSampleCallback> CallbackMap;
+
+ struct BucketRangesHash {
+ size_t operator()(const BucketRanges* a) const;
+ };
+
+ struct BucketRangesEqual {
+ bool operator()(const BucketRanges* a, const BucketRanges* b) const;
+ };
- // We keep all |bucket_ranges_| in a map, from checksum to a list of
- // |bucket_ranges_|. Checksum is calculated from the |ranges_| in
- // |bucket_ranges_|.
- typedef std::map<uint32_t, std::list<const BucketRanges*>*> RangesMap;
+ typedef std::
+ unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
+ RangesMap;
- friend struct LazyInstanceTraitsBase<StatisticsRecorder>;
friend class StatisticsRecorderTest;
FRIEND_TEST_ALL_PREFIXES(StatisticsRecorderTest, IterationTest);
- // Fetch set of existing histograms. Ownership of the individual histograms
- // remains with the StatisticsRecorder.
- static std::vector<HistogramBase*> GetKnownHistograms(
- bool include_persistent);
-
- // Imports histograms from global persistent memory. The global lock must
- // not be held during this call.
+ // Initializes the global recorder if it doesn't already exist. Safe to call
+ // multiple times.
+ //
+ // Precondition: The global lock is already acquired.
+ static void EnsureGlobalRecorderWhileLocked();
+
+ // Gets existing histograms matching |predicate|. |Predicate| must have the
+ // signature bool(const HistogramBase&).
+ //
+ // The returned histograms are sorted by name.
+ //
+ // Ownership of the individual histograms remains with the StatisticsRecorder.
+ //
+ // This method is thread safe.
+ template <typename Predicate>
+ static Histograms GetHistogramsWithPredicate(Predicate predicate);
+
+ // Gets existing histograms.
+ //
+ // The returned histograms are sorted by name.
+ //
+ // Ownership of the individual histograms remains with the StatisticsRecorder.
+ //
+ // This method is thread safe.
+ static Histograms GetKnownHistograms(bool include_persistent);
+
+ // Gets histogram providers.
+ //
+ // This method is thread safe.
+ static HistogramProviders GetHistogramProviders();
+
+ // Imports histograms from global persistent memory.
+ //
+ // Precondition: The global lock must not be held during this call.
static void ImportGlobalPersistentHistograms();
- // The constructor just initializes static members. Usually client code should
- // use Initialize to do this. But in test code, you can friend this class and
- // call the constructor to get a clean StatisticsRecorder.
+ // Constructs a new StatisticsRecorder and sets it as the current global
+ // recorder.
+ //
+ // Precondition: The global lock is already acquired.
StatisticsRecorder();
// Initialize implementation but without lock. Caller should guard
// StatisticsRecorder by itself if needed (it isn't in unit tests).
- void InitLogOnShutdownWithoutLock();
-
- // These are copies of everything that existed when the (test) Statistics-
- // Recorder was created. The global ones have to be moved aside to create a
- // clean environment.
- std::unique_ptr<HistogramMap> existing_histograms_;
- std::unique_ptr<CallbackMap> existing_callbacks_;
- std::unique_ptr<RangesMap> existing_ranges_;
- std::unique_ptr<HistogramProviders> existing_providers_;
- std::unique_ptr<RecordHistogramChecker> existing_record_checker_;
-
- bool vlog_initialized_ = false;
-
- static void Reset();
- static void DumpHistogramsToVlog(void* instance);
-
- static HistogramMap* histograms_;
- static CallbackMap* callbacks_;
- static RangesMap* ranges_;
- static HistogramProviders* providers_;
- static RecordHistogramChecker* record_checker_;
-
- // Lock protects access to above maps. This is a LazyInstance to avoid races
- // when the above methods are used before Initialize(). Previously each method
- // would do |if (!lock_) return;| which would race with
- // |lock_ = new Lock;| in StatisticsRecorder(). http://crbug.com/672852.
- static base::LazyInstance<base::Lock>::Leaky lock_;
+ //
+ // Precondition: The global lock is already acquired.
+ static void InitLogOnShutdownWhileLocked();
+
+ HistogramMap histograms_;
+ CallbackMap callbacks_;
+ RangesMap ranges_;
+ HistogramProviders providers_;
+ std::unique_ptr<RecordHistogramChecker> record_checker_;
+
+ // Previous global recorder that existed when this one was created.
+ StatisticsRecorder* previous_ = nullptr;
+
+ // Global lock for internal synchronization.
+ static LazyInstance<Lock>::Leaky lock_;
+
+ // Current global recorder. This recorder is used by static methods. When a
+ // new global recorder is created by CreateTemporaryForTesting(), then the
+ // previous global recorder is referenced by top_->previous_.
+ static StatisticsRecorder* top_;
+
+ // Tracks whether InitLogOnShutdownWhileLocked() has registered a logging
+ // function that will be called when the program finishes.
+ static bool is_vlog_initialized_;
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
};
diff --git a/chromium/base/metrics/statistics_recorder_unittest.cc b/chromium/base/metrics/statistics_recorder_unittest.cc
index 92d1bba602e..79ab7bf6b13 100644
--- a/chromium/base/metrics/statistics_recorder_unittest.cc
+++ b/chromium/base/metrics/statistics_recorder_unittest.cc
@@ -20,6 +20,7 @@
#include "base/metrics/record_histogram_checker.h"
#include "base/metrics/sparse_histogram.h"
#include "base/values.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace {
@@ -30,9 +31,7 @@ class LogStateSaver {
public:
LogStateSaver() : old_min_log_level_(logging::GetMinLogLevel()) {}
- ~LogStateSaver() {
- logging::SetMinLogLevel(old_min_log_level_);
- }
+ ~LogStateSaver() { logging::SetMinLogLevel(old_min_log_level_); }
private:
int old_min_log_level_;
@@ -55,6 +54,11 @@ class OddRecordHistogramChecker : public base::RecordHistogramChecker {
namespace base {
+using testing::ElementsAre;
+using testing::IsEmpty;
+using testing::SizeIs;
+using testing::UnorderedElementsAre;
+
class StatisticsRecorderTest : public testing::TestWithParam<bool> {
protected:
const int32_t kAllocatorMemorySize = 64 << 10; // 64 KiB
@@ -70,8 +74,8 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
// Use persistent memory for histograms if so indicated by test parameter.
if (use_persistent_histogram_allocator_) {
- GlobalHistogramAllocator::CreateWithLocalMemory(
- kAllocatorMemorySize, 0, "StatisticsRecorderTest");
+ GlobalHistogramAllocator::CreateWithLocalMemory(kAllocatorMemorySize, 0,
+ "StatisticsRecorderTest");
}
}
@@ -82,15 +86,19 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
void InitializeStatisticsRecorder() {
DCHECK(!statistics_recorder_);
- StatisticsRecorder::UninitializeForTesting();
statistics_recorder_ = StatisticsRecorder::CreateTemporaryForTesting();
}
+ // Deletes the global recorder if there is any. This is used by test
+ // NotInitialized to ensure a clean global state.
void UninitializeStatisticsRecorder() {
statistics_recorder_.reset();
- StatisticsRecorder::UninitializeForTesting();
+ delete StatisticsRecorder::top_;
+ DCHECK(!StatisticsRecorder::top_);
}
+ bool HasGlobalRecorder() { return StatisticsRecorder::top_ != nullptr; }
+
Histogram* CreateHistogram(const char* name,
HistogramBase::Sample min,
HistogramBase::Sample max,
@@ -102,18 +110,13 @@ class StatisticsRecorderTest : public testing::TestWithParam<bool> {
return new Histogram(name, min, max, registered_ranges);
}
- void DeleteHistogram(HistogramBase* histogram) {
- delete histogram;
- }
+ void InitLogOnShutdown() { StatisticsRecorder::InitLogOnShutdown(); }
- void InitLogOnShutdown() {
- DCHECK(statistics_recorder_);
- statistics_recorder_->InitLogOnShutdownWithoutLock();
- }
+ bool IsVLogInitialized() { return StatisticsRecorder::is_vlog_initialized_; }
- bool VLogInitialized() {
- DCHECK(statistics_recorder_);
- return statistics_recorder_->vlog_initialized_;
+ void ResetVLogInitialized() {
+ UninitializeStatisticsRecorder();
+ StatisticsRecorder::is_vlog_initialized_ = false;
}
const bool use_persistent_histogram_allocator_;
@@ -132,30 +135,25 @@ INSTANTIATE_TEST_CASE_P(Allocator, StatisticsRecorderTest, testing::Bool());
TEST_P(StatisticsRecorderTest, NotInitialized) {
UninitializeStatisticsRecorder();
+ EXPECT_FALSE(HasGlobalRecorder());
- ASSERT_FALSE(StatisticsRecorder::IsActive());
+ HistogramBase* const histogram =
+ CreateHistogram("TestHistogram", 1, 1000, 10);
+ EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicate(histogram),
+ histogram);
+ EXPECT_TRUE(HasGlobalRecorder());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram));
- StatisticsRecorder::Histograms registered_histograms;
- std::vector<const BucketRanges*> registered_ranges;
-
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(0u, registered_histograms.size());
-
- Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
-
- // When StatisticsRecorder is not initialized, register is a noop.
- EXPECT_EQ(histogram,
- StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
- // Manually delete histogram that was not registered.
- DeleteHistogram(histogram);
+ UninitializeStatisticsRecorder();
+ EXPECT_FALSE(HasGlobalRecorder());
- // RegisterOrDeleteDuplicateRanges is a no-op.
- BucketRanges* ranges = new BucketRanges(3);
+ BucketRanges* const ranges = new BucketRanges(3);
ranges->ResetChecksum();
- EXPECT_EQ(ranges,
- StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges));
- StatisticsRecorder::GetBucketRanges(&registered_ranges);
- EXPECT_EQ(0u, registered_ranges.size());
+ EXPECT_EQ(StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges),
+ ranges);
+ EXPECT_TRUE(HasGlobalRecorder());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+ UnorderedElementsAre(ranges));
}
TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
@@ -171,15 +169,15 @@ TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
EXPECT_EQ(ranges2,
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges2));
- StatisticsRecorder::GetBucketRanges(&registered_ranges);
- ASSERT_EQ(2u, registered_ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+ UnorderedElementsAre(ranges1, ranges2));
// Register some ranges again.
EXPECT_EQ(ranges1,
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges1));
- registered_ranges.clear();
- StatisticsRecorder::GetBucketRanges(&registered_ranges);
- ASSERT_EQ(2u, registered_ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+ UnorderedElementsAre(ranges1, ranges2));
+
// Make sure the ranges is still the one we know.
ASSERT_EQ(3u, ranges1->size());
EXPECT_EQ(0, ranges1->range(0));
@@ -191,31 +189,40 @@ TEST_P(StatisticsRecorderTest, RegisterBucketRanges) {
ranges3->ResetChecksum();
EXPECT_EQ(ranges1, // returning ranges1
StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges3));
- registered_ranges.clear();
- StatisticsRecorder::GetBucketRanges(&registered_ranges);
- ASSERT_EQ(2u, registered_ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(),
+ UnorderedElementsAre(ranges1, ranges2));
}
TEST_P(StatisticsRecorderTest, RegisterHistogram) {
// Create a Histogram that was not registered.
- Histogram* histogram = CreateHistogram("TestHistogram", 1, 1000, 10);
+ Histogram* const histogram1 = CreateHistogram("TestHistogram1", 1, 1000, 10);
- StatisticsRecorder::Histograms registered_histograms;
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(0u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), IsEmpty());
// Register the Histogram.
- EXPECT_EQ(histogram,
- StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(1u, registered_histograms.size());
+ EXPECT_EQ(histogram1,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram1));
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram1));
// Register the same Histogram again.
- EXPECT_EQ(histogram,
- StatisticsRecorder::RegisterOrDeleteDuplicate(histogram));
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(1u, registered_histograms.size());
+ EXPECT_EQ(histogram1,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram1));
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram1));
+
+ // Register another Histogram with the same name.
+ Histogram* const histogram2 = CreateHistogram("TestHistogram1", 1, 1000, 10);
+ EXPECT_NE(histogram1, histogram2);
+ EXPECT_EQ(histogram1,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram2));
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram1));
+
+ // Register another Histogram with a different name.
+ Histogram* const histogram3 = CreateHistogram("TestHistogram0", 1, 1000, 10);
+ EXPECT_NE(histogram1, histogram3);
+ EXPECT_EQ(histogram3,
+ StatisticsRecorder::RegisterOrDeleteDuplicate(histogram3));
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+ ElementsAre(histogram3, histogram1));
}
TEST_P(StatisticsRecorderTest, FindHistogram) {
@@ -257,63 +264,45 @@ TEST_P(StatisticsRecorderTest, GetSnapshot) {
Histogram::FactoryGet("TestHistogram2", 1, 1000, 10, Histogram::kNoFlags);
Histogram::FactoryGet("TestHistogram3", 1, 1000, 10, Histogram::kNoFlags);
- StatisticsRecorder::Histograms snapshot;
- StatisticsRecorder::GetSnapshot("Test", &snapshot);
- EXPECT_EQ(3u, snapshot.size());
-
- snapshot.clear();
- StatisticsRecorder::GetSnapshot("1", &snapshot);
- EXPECT_EQ(1u, snapshot.size());
-
- snapshot.clear();
- StatisticsRecorder::GetSnapshot("hello", &snapshot);
- EXPECT_EQ(0u, snapshot.size());
+ EXPECT_THAT(StatisticsRecorder::GetSnapshot("Test"), SizeIs(3));
+ EXPECT_THAT(StatisticsRecorder::GetSnapshot("1"), SizeIs(1));
+ EXPECT_THAT(StatisticsRecorder::GetSnapshot("hello"), IsEmpty());
}
TEST_P(StatisticsRecorderTest, RegisterHistogramWithFactoryGet) {
- StatisticsRecorder::Histograms registered_histograms;
-
- StatisticsRecorder::GetHistograms(&registered_histograms);
- ASSERT_EQ(0u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), IsEmpty());
// Create a histogram.
- HistogramBase* histogram = Histogram::FactoryGet(
+ HistogramBase* const histogram1 = Histogram::FactoryGet(
"TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(1u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram1));
// Get an existing histogram.
- HistogramBase* histogram2 = Histogram::FactoryGet(
+ HistogramBase* const histogram2 = Histogram::FactoryGet(
"TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(1u, registered_histograms.size());
- EXPECT_EQ(histogram, histogram2);
+ EXPECT_EQ(histogram1, histogram2);
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), ElementsAre(histogram1));
// Create a LinearHistogram.
- histogram = LinearHistogram::FactoryGet(
+ HistogramBase* const histogram3 = LinearHistogram::FactoryGet(
"TestLinearHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(2u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+ ElementsAre(histogram1, histogram3));
// Create a BooleanHistogram.
- histogram = BooleanHistogram::FactoryGet(
+ HistogramBase* const histogram4 = BooleanHistogram::FactoryGet(
"TestBooleanHistogram", HistogramBase::kNoFlags);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(3u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+ ElementsAre(histogram4, histogram1, histogram3));
// Create a CustomHistogram.
std::vector<int> custom_ranges;
custom_ranges.push_back(1);
custom_ranges.push_back(5);
- histogram = CustomHistogram::FactoryGet(
+ HistogramBase* const histogram5 = CustomHistogram::FactoryGet(
"TestCustomHistogram", custom_ranges, HistogramBase::kNoFlags);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(4u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(),
+ ElementsAre(histogram4, histogram5, histogram1, histogram3));
}
TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
@@ -331,35 +320,25 @@ TEST_P(StatisticsRecorderTest, RegisterHistogramWithMacros) {
// The histogram we got from macro is the same as from FactoryGet.
LOCAL_HISTOGRAM_COUNTS("TestHistogramCounts", 30);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
+ registered_histograms = StatisticsRecorder::GetHistograms();
ASSERT_EQ(1u, registered_histograms.size());
EXPECT_EQ(histogram, registered_histograms[0]);
LOCAL_HISTOGRAM_TIMES("TestHistogramTimes", TimeDelta::FromDays(1));
LOCAL_HISTOGRAM_ENUMERATION("TestHistogramEnumeration", 20, 200);
- registered_histograms.clear();
- StatisticsRecorder::GetHistograms(&registered_histograms);
- EXPECT_EQ(3u, registered_histograms.size());
+ EXPECT_THAT(StatisticsRecorder::GetHistograms(), SizeIs(3));
}
TEST_P(StatisticsRecorderTest, BucketRangesSharing) {
- std::vector<const BucketRanges*> ranges;
- StatisticsRecorder::GetBucketRanges(&ranges);
- EXPECT_EQ(0u, ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), IsEmpty());
Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags);
Histogram::FactoryGet("Histogram2", 1, 64, 8, HistogramBase::kNoFlags);
-
- StatisticsRecorder::GetBucketRanges(&ranges);
- EXPECT_EQ(1u, ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), SizeIs(1));
Histogram::FactoryGet("Histogram3", 1, 64, 16, HistogramBase::kNoFlags);
-
- ranges.clear();
- StatisticsRecorder::GetBucketRanges(&ranges);
- EXPECT_EQ(2u, ranges.size());
+ EXPECT_THAT(StatisticsRecorder::GetBucketRanges(), SizeIs(2));
}
TEST_P(StatisticsRecorderTest, ToJSON) {
@@ -423,9 +402,9 @@ TEST_P(StatisticsRecorderTest, IterationTest) {
Histogram::FactoryGet("IterationTest1", 1, 64, 16, HistogramBase::kNoFlags);
Histogram::FactoryGet("IterationTest2", 1, 64, 16, HistogramBase::kNoFlags);
- EXPECT_EQ(2U, StatisticsRecorder::GetKnownHistograms(true).size());
- EXPECT_EQ(use_persistent_histogram_allocator_ ? 0U : 2U,
- StatisticsRecorder::GetKnownHistograms(false).size());
+ EXPECT_THAT(StatisticsRecorder::GetKnownHistograms(true), SizeIs(2));
+ EXPECT_THAT(StatisticsRecorder::GetKnownHistograms(false),
+ SizeIs(use_persistent_histogram_allocator_ ? 0 : 2));
// Create a new global allocator using the same memory as the old one. Any
// old one is kept around so the memory doesn't get released.
@@ -442,11 +421,11 @@ TEST_P(StatisticsRecorderTest, IterationTest) {
InitializeStatisticsRecorder();
StatisticsRecorder::ImportGlobalPersistentHistograms();
- EXPECT_EQ(use_persistent_histogram_allocator_ ? 2U : 0U,
- StatisticsRecorder::GetKnownHistograms(true).size());
+ EXPECT_THAT(StatisticsRecorder::GetKnownHistograms(true),
+ SizeIs(use_persistent_histogram_allocator_ ? 2 : 0));
StatisticsRecorder::ImportGlobalPersistentHistograms();
- EXPECT_EQ(0U, StatisticsRecorder::GetKnownHistograms(false).size());
+ EXPECT_THAT(StatisticsRecorder::GetKnownHistograms(false), IsEmpty());
}
namespace {
@@ -624,33 +603,33 @@ TEST_P(StatisticsRecorderTest, CallbackUsedBeforeHistogramCreatedTest) {
}
TEST_P(StatisticsRecorderTest, LogOnShutdownNotInitialized) {
- UninitializeStatisticsRecorder();
+ ResetVLogInitialized();
logging::SetMinLogLevel(logging::LOG_WARNING);
InitializeStatisticsRecorder();
EXPECT_FALSE(VLOG_IS_ON(1));
- EXPECT_FALSE(VLogInitialized());
+ EXPECT_FALSE(IsVLogInitialized());
InitLogOnShutdown();
- EXPECT_FALSE(VLogInitialized());
+ EXPECT_FALSE(IsVLogInitialized());
}
TEST_P(StatisticsRecorderTest, LogOnShutdownInitializedExplicitly) {
- UninitializeStatisticsRecorder();
+ ResetVLogInitialized();
logging::SetMinLogLevel(logging::LOG_WARNING);
InitializeStatisticsRecorder();
EXPECT_FALSE(VLOG_IS_ON(1));
- EXPECT_FALSE(VLogInitialized());
+ EXPECT_FALSE(IsVLogInitialized());
logging::SetMinLogLevel(logging::LOG_VERBOSE);
EXPECT_TRUE(VLOG_IS_ON(1));
InitLogOnShutdown();
- EXPECT_TRUE(VLogInitialized());
+ EXPECT_TRUE(IsVLogInitialized());
}
TEST_P(StatisticsRecorderTest, LogOnShutdownInitialized) {
- UninitializeStatisticsRecorder();
+ ResetVLogInitialized();
logging::SetMinLogLevel(logging::LOG_VERBOSE);
InitializeStatisticsRecorder();
EXPECT_TRUE(VLOG_IS_ON(1));
- EXPECT_TRUE(VLogInitialized());
+ EXPECT_TRUE(IsVLogInitialized());
}
class TestHistogramProvider : public StatisticsRecorder::HistogramProvider {
diff --git a/chromium/base/nix/xdg_util.cc b/chromium/base/nix/xdg_util.cc
index 7d1a8795ebb..109624a1e5a 100644
--- a/chromium/base/nix/xdg_util.cc
+++ b/chromium/base/nix/xdg_util.cc
@@ -71,6 +71,8 @@ DesktopEnvironment GetDesktopEnvironment(Environment* env) {
}
if (xdg_current_desktop == "GNOME")
return DESKTOP_ENVIRONMENT_GNOME;
+ if (xdg_current_desktop == "X-Cinnamon")
+ return DESKTOP_ENVIRONMENT_CINNAMON;
if (xdg_current_desktop == "KDE") {
std::string kde_session;
if (env->GetVar(kKDESessionEnvVar, &kde_session)) {
@@ -120,6 +122,8 @@ const char* GetDesktopEnvironmentName(DesktopEnvironment env) {
switch (env) {
case DESKTOP_ENVIRONMENT_OTHER:
return nullptr;
+ case DESKTOP_ENVIRONMENT_CINNAMON:
+ return "CINNAMON";
case DESKTOP_ENVIRONMENT_GNOME:
return "GNOME";
case DESKTOP_ENVIRONMENT_KDE3:
diff --git a/chromium/base/nix/xdg_util.h b/chromium/base/nix/xdg_util.h
index c012f9ff06d..65f7d157052 100644
--- a/chromium/base/nix/xdg_util.h
+++ b/chromium/base/nix/xdg_util.h
@@ -47,6 +47,7 @@ BASE_EXPORT FilePath GetXDGUserDirectory(const char* dir_name,
enum DesktopEnvironment {
DESKTOP_ENVIRONMENT_OTHER,
+ DESKTOP_ENVIRONMENT_CINNAMON,
DESKTOP_ENVIRONMENT_GNOME,
// KDE3, KDE4 and KDE5 are sufficiently different that we count
// them as different desktop environments here.
diff --git a/chromium/base/nix/xdg_util_unittest.cc b/chromium/base/nix/xdg_util_unittest.cc
index 6f72a84c118..e195303ca3c 100644
--- a/chromium/base/nix/xdg_util_unittest.cc
+++ b/chromium/base/nix/xdg_util_unittest.cc
@@ -32,6 +32,7 @@ const char* const kDesktopMATE = "mate";
const char* const kDesktopKDE4 = "kde4";
const char* const kDesktopKDE = "kde";
const char* const kDesktopXFCE = "xfce";
+const char* const kXdgDesktopCinnamon = "X-Cinnamon";
const char* const kXdgDesktopGNOME = "GNOME";
const char* const kXdgDesktopKDE = "KDE";
const char* const kXdgDesktopPantheon = "Pantheon";
@@ -91,6 +92,15 @@ TEST(XDGUtilTest, GetDesktopEnvironmentXFCE) {
EXPECT_EQ(DESKTOP_ENVIRONMENT_XFCE, GetDesktopEnvironment(&getter));
}
+TEST(XDGUtilTest, GetXdgDesktopCinnamon) {
+ MockEnvironment getter;
+ EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
+ EXPECT_CALL(getter, GetVar(Eq(kXdgDesktop), _))
+ .WillOnce(DoAll(SetArgPointee<1>(kXdgDesktopCinnamon), Return(true)));
+
+ EXPECT_EQ(DESKTOP_ENVIRONMENT_CINNAMON, GetDesktopEnvironment(&getter));
+}
+
TEST(XDGUtilTest, GetXdgDesktopGnome) {
MockEnvironment getter;
EXPECT_CALL(getter, GetVar(_, _)).WillRepeatedly(Return(false));
diff --git a/chromium/base/no_destructor.h b/chromium/base/no_destructor.h
new file mode 100644
index 00000000000..f4ca04b069d
--- /dev/null
+++ b/chromium/base/no_destructor.h
@@ -0,0 +1,72 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_NO_DESTRUCTOR_H_
+#define BASE_NO_DESTRUCTOR_H_
+
+#include <utility>
+
+namespace base {
+
+// A wrapper that makes it easy to create an object of type T with static
+// storage duration that:
+// - is only constructed on first access
+// - never invokes the destructor
+// in order to satisfy the styleguide ban on global constructors and
+// destructors.
+//
+// Runtime constant example:
+// const std::string& GetLineSeparator() {
+// // Forwards to std::string(size_t, char, const Allocator&) constructor.
+// static const base::NoDestructor<std::string> s(5, '-');
+// return s;
+// }
+//
+// More complex initialization with a lambda:
+// const std::string& GetSessionNonce() {
+// static const base::NoDestructor<std::string> nonce([] {
+// std::string s(16);
+// crypto::RandString(s.data(), s.size());
+// return s;
+// })());
+// return *nonce;
+// }
+//
+// NoDestructor<T> stores the object inline, so it also avoids a pointer
+// indirection and a malloc. Code should prefer to use NoDestructor<T> over:
+// - The CR_DEFINE_STATIC_LOCAL() helper macro.
+// - A function scoped static T* or T& that is dynamically initialized.
+// - A global base::LazyInstance<T>.
+//
+// Note that since the destructor is never run, this *will* leak memory if used
+// as a stack or member variable. Furthermore, a NoDestructor<T> should never
+// have global scope as that may require a static initializer.
+template <typename T>
+class NoDestructor {
+ public:
+ // Not constexpr; just write static constexpr T x = ...; if the value should
+ // be a constexpr.
+ template <typename... Args>
+ explicit NoDestructor(Args&&... args) {
+ new (get()) T(std::forward<Args>(args)...);
+ }
+
+ ~NoDestructor() = default;
+
+ const T& operator*() const { return *get(); }
+ T& operator*() { return *get(); }
+
+ const T* operator->() const { return get(); }
+ T* operator->() { return get(); }
+
+ const T* get() const { return reinterpret_cast<const T*>(&storage_); }
+ T* get() { return reinterpret_cast<T*>(&storage_); }
+
+ private:
+ alignas(T) char storage_[sizeof(T)];
+};
+
+} // namespace base
+
+#endif // BASE_NO_DESTRUCTOR_H_
diff --git a/chromium/base/no_destructor_unittest.cc b/chromium/base/no_destructor_unittest.cc
new file mode 100644
index 00000000000..0cb13388099
--- /dev/null
+++ b/chromium/base/no_destructor_unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/no_destructor.h"
+
+#include <string>
+#include <utility>
+
+#include "base/logging.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+struct CheckOnDestroy {
+ ~CheckOnDestroy() { CHECK(false); }
+};
+
+TEST(NoDestructorTest, SkipsDestructors) {
+ NoDestructor<CheckOnDestroy> destructor_should_not_run;
+}
+
+struct CopyOnly {
+ CopyOnly() = default;
+
+ CopyOnly(const CopyOnly&) = default;
+ CopyOnly& operator=(const CopyOnly&) = default;
+
+ CopyOnly(CopyOnly&&) = delete;
+ CopyOnly& operator=(CopyOnly&&) = delete;
+};
+
+struct MoveOnly {
+ MoveOnly() = default;
+
+ MoveOnly(const MoveOnly&) = delete;
+ MoveOnly& operator=(const MoveOnly&) = delete;
+
+ MoveOnly(MoveOnly&&) = default;
+ MoveOnly& operator=(MoveOnly&&) = default;
+};
+
+struct ForwardingTestStruct {
+ ForwardingTestStruct(const CopyOnly&, MoveOnly&&) {}
+};
+
+TEST(NoDestructorTest, ForwardsArguments) {
+ CopyOnly copy_only;
+ MoveOnly move_only;
+
+ static NoDestructor<ForwardingTestStruct> test_forwarding(
+ copy_only, std::move(move_only));
+}
+
+TEST(NoDestructorTest, Accessors) {
+ static NoDestructor<std::string> awesome("awesome");
+
+ EXPECT_EQ("awesome", *awesome);
+ EXPECT_EQ(0, awesome->compare("awesome"));
+ EXPECT_EQ(0, awesome.get()->compare("awesome"));
+}
+
+} // namespace
+
+} // namespace base
diff --git a/chromium/base/numerics/checked_math.h b/chromium/base/numerics/checked_math.h
index 5294289e2ed..ede3344f82d 100644
--- a/chromium/base/numerics/checked_math.h
+++ b/chromium/base/numerics/checked_math.h
@@ -23,7 +23,7 @@ class CheckedNumeric {
public:
using type = T;
- constexpr CheckedNumeric() {}
+ constexpr CheckedNumeric() = default;
// Copy constructor.
template <typename Src>
diff --git a/chromium/base/observer_list.h b/chromium/base/observer_list.h
index ec1abbdd216..adf3e67d48b 100644
--- a/chromium/base/observer_list.h
+++ b/chromium/base/observer_list.h
@@ -8,7 +8,9 @@
#include <stddef.h>
#include <algorithm>
+#include <iterator>
#include <limits>
+#include <utility>
#include <vector>
#include "base/gtest_prod_util.h"
@@ -94,6 +96,12 @@ class ObserverList
// An iterator class that can be used to access the list of observers.
class Iter {
public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = ObserverType;
+ using difference_type = ptrdiff_t;
+ using pointer = ObserverType*;
+ using reference = ObserverType&;
+
Iter() : index_(0), max_index_(0) {}
explicit Iter(const ObserverList* list)
@@ -147,6 +155,12 @@ class ObserverList
return *this;
}
+ Iter operator++(int) {
+ Iter it(*this);
+ ++(*this);
+ return it;
+ }
+
ObserverType* operator->() const {
ObserverType* const current = GetCurrent();
DCHECK(current);
@@ -201,7 +215,7 @@ class ObserverList
const_iterator end() const { return const_iterator(); }
- ObserverList() {}
+ ObserverList() = default;
explicit ObserverList(ObserverListPolicy policy) : policy_(policy) {}
~ObserverList() {
diff --git a/chromium/base/observer_list_unittest.cc b/chromium/base/observer_list_unittest.cc
index fd88a2d5e8a..9cbb168b746 100644
--- a/chromium/base/observer_list_unittest.cc
+++ b/chromium/base/observer_list_unittest.cc
@@ -257,6 +257,11 @@ TEST(ObserverListTest, BasicTest) {
it3 = it3;
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
+ // Iterator post increment.
+ ObserverList<Foo>::const_iterator it4 = it3++;
+ EXPECT_EQ(it4, it1);
+ EXPECT_EQ(it4, it2);
+ EXPECT_NE(it4, it3);
}
{
@@ -275,6 +280,11 @@ TEST(ObserverListTest, BasicTest) {
it3 = it3;
EXPECT_EQ(it3, it1);
EXPECT_EQ(it3, it2);
+ // Iterator post increment.
+ ObserverList<Foo>::iterator it4 = it3++;
+ EXPECT_EQ(it4, it1);
+ EXPECT_EQ(it4, it2);
+ EXPECT_NE(it4, it3);
}
for (auto& observer : observer_list)
@@ -891,8 +901,10 @@ TEST(ObserverListTest, IteratorOutlivesList) {
for (auto& observer : *observer_list)
observer.Observe(0);
- // If this test fails, there'll be Valgrind errors when this function goes out
- // of scope.
+
+ // There are no EXPECT* statements for this test, if we catch
+ // use-after-free errors for observer_list (eg with ASan) then
+ // this test has failed. See http://crbug.com/85296.
}
TEST(ObserverListTest, BasicStdIterator) {
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index 518cc71fc1b..f6619a57581 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -6,6 +6,7 @@
#define BASE_OPTIONAL_H_
#include <type_traits>
+#include <utility>
#include "base/logging.h"
@@ -32,28 +33,29 @@ constexpr nullopt_t nullopt(0);
namespace internal {
template <typename T, bool = std::is_trivially_destructible<T>::value>
-struct OptionalStorage {
+struct OptionalStorageBase {
// Initializing |empty_| here instead of using default member initializing
// to avoid errors in g++ 4.8.
- constexpr OptionalStorage() : empty_('\0') {}
-
- constexpr explicit OptionalStorage(const T& value)
- : is_null_(false), value_(value) {}
-
- constexpr explicit OptionalStorage(T&& value)
- : is_null_(false), value_(std::move(value)) {}
+ constexpr OptionalStorageBase() : empty_('\0') {}
template <class... Args>
- constexpr explicit OptionalStorage(base::in_place_t, Args&&... args)
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
: is_null_(false), value_(std::forward<Args>(args)...) {}
// When T is not trivially destructible we must call its
// destructor before deallocating its memory.
- ~OptionalStorage() {
+ ~OptionalStorageBase() {
if (!is_null_)
value_.~T();
}
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(is_null_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_null_ = false;
+ }
+
bool is_null_ = true;
union {
// |empty_| exists so that the union will always be initialized, even when
@@ -65,25 +67,26 @@ struct OptionalStorage {
};
template <typename T>
-struct OptionalStorage<T, true> {
+struct OptionalStorageBase<T, true /* trivially destructible */> {
// Initializing |empty_| here instead of using default member initializing
// to avoid errors in g++ 4.8.
- constexpr OptionalStorage() : empty_('\0') {}
-
- constexpr explicit OptionalStorage(const T& value)
- : is_null_(false), value_(value) {}
-
- constexpr explicit OptionalStorage(T&& value)
- : is_null_(false), value_(std::move(value)) {}
+ constexpr OptionalStorageBase() : empty_('\0') {}
template <class... Args>
- constexpr explicit OptionalStorage(base::in_place_t, Args&&... args)
+ constexpr explicit OptionalStorageBase(in_place_t, Args&&... args)
: is_null_(false), value_(std::forward<Args>(args)...) {}
// When T is trivially destructible (i.e. its destructor does nothing) there
// is no need to call it. Explicitly defaulting the destructor means it's not
// user-provided. Those two together make this destructor trivial.
- ~OptionalStorage() = default;
+ ~OptionalStorageBase() = default;
+
+ template <class... Args>
+ void Init(Args&&... args) {
+ DCHECK(is_null_);
+ ::new (&value_) T(std::forward<Args>(args)...);
+ is_null_ = false;
+ }
bool is_null_ = true;
union {
@@ -95,6 +98,155 @@ struct OptionalStorage<T, true> {
};
};
+// Implement conditional constexpr copy and move constructors. These are
+// constexpr if is_trivially_{copy,move}_constructible<T>::value is true
+// respectively. If each is true, the corresponding constructor is defined as
+// "= default;", which generates a constexpr constructor (In this case,
+// the condition of constexpr-ness is satisfied because the base class also has
+// compiler generated constexpr {copy,move} constructors). Note that
+// placement-new is prohibited in constexpr.
+template <typename T,
+ bool = std::is_trivially_copy_constructible<T>::value,
+ bool = std::is_trivially_move_constructible<T>::value>
+struct OptionalStorage : OptionalStorageBase<T> {
+ // This is no trivially {copy,move} constructible case. Other cases are
+ // defined below as specializations.
+
+ // Accessing the members of template base class requires explicit
+ // declaration.
+ using OptionalStorageBase<T>::is_null_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+
+ // Inherit constructors (specifically, the in_place constructor).
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ // User defined constructor deletes the default constructor.
+ // Define it explicitly.
+ OptionalStorage() = default;
+
+ OptionalStorage(const OptionalStorage& other) {
+ if (!other.is_null_)
+ Init(other.value_);
+ }
+
+ OptionalStorage(OptionalStorage&& other) {
+ if (!other.is_null_)
+ Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ true /* trivially copy constructible */,
+ false /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_null_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(const OptionalStorage& other) = default;
+
+ OptionalStorage(OptionalStorage&& other) {
+ if (!other.is_null_)
+ Init(std::move(other.value_));
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ false /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ using OptionalStorageBase<T>::is_null_;
+ using OptionalStorageBase<T>::value_;
+ using OptionalStorageBase<T>::Init;
+ using OptionalStorageBase<T>::OptionalStorageBase;
+
+ OptionalStorage() = default;
+ OptionalStorage(OptionalStorage&& other) = default;
+
+ OptionalStorage(const OptionalStorage& other) {
+ if (!other.is_null_)
+ Init(other.value_);
+ }
+};
+
+template <typename T>
+struct OptionalStorage<T,
+ true /* trivially copy constructible */,
+ true /* trivially move constructible */>
+ : OptionalStorageBase<T> {
+ // If both trivially {copy,move} constructible are true, it is not necessary
+ // to use user-defined constructors. So, just inheriting constructors
+ // from the base class works.
+ using OptionalStorageBase<T>::OptionalStorageBase;
+};
+
+// Base class to support conditionally usable copy-/move- constructors
+// and assign operators.
+template <typename T>
+class OptionalBase {
+ // This class provides implementation rather than public API, so everything
+ // should be hidden. Often we use composition, but we cannot in this case
+ // because of C++ language restriction.
+ protected:
+ constexpr OptionalBase() = default;
+ constexpr OptionalBase(const OptionalBase& other) = default;
+ constexpr OptionalBase(OptionalBase&& other) = default;
+
+ template <class... Args>
+ constexpr explicit OptionalBase(in_place_t, Args&&... args)
+ : storage_(in_place, std::forward<Args>(args)...) {}
+
+ ~OptionalBase() = default;
+
+ OptionalBase& operator=(const OptionalBase& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(other.storage_.value_);
+ return *this;
+ }
+
+ OptionalBase& operator=(OptionalBase&& other) {
+ if (other.storage_.is_null_) {
+ FreeIfNeeded();
+ return *this;
+ }
+
+ InitOrAssign(std::move(other.storage_.value_));
+ return *this;
+ }
+
+ void InitOrAssign(const T& value) {
+ if (storage_.is_null_)
+ storage_.Init(value);
+ else
+ storage_.value_ = value;
+ }
+
+ void InitOrAssign(T&& value) {
+ if (storage_.is_null_)
+ storage_.Init(std::move(value));
+ else
+ storage_.value_ = std::move(value);
+ }
+
+ void FreeIfNeeded() {
+ if (storage_.is_null_)
+ return;
+ storage_.value_.~T();
+ storage_.is_null_ = true;
+ }
+
+ OptionalStorage<T> storage_;
+};
+
} // namespace internal
// base::Optional is a Chromium version of the C++17 optional class:
@@ -104,64 +256,53 @@ struct OptionalStorage<T, true> {
// https://chromium.googlesource.com/chromium/src/+/master/docs/optional.md
//
// These are the differences between the specification and the implementation:
-// - The constructor and emplace method using initializer_list are not
-// implemented because 'initializer_list' is banned from Chromium.
// - Constructors do not use 'constexpr' as it is a C++14 extension.
// - 'constexpr' might be missing in some places for reasons specified locally.
// - No exceptions are thrown, because they are banned from Chromium.
// - All the non-members are in the 'base' namespace instead of 'std'.
template <typename T>
-class Optional {
+class Optional : public internal::OptionalBase<T> {
public:
using value_type = T;
- constexpr Optional() {}
+ // Defer default/copy/move constructor implementation to OptionalBase.
+ // TODO(hidehiko): Implement conditional enabling.
+ constexpr Optional() = default;
+ constexpr Optional(const Optional& other) = default;
+ constexpr Optional(Optional&& other) = default;
- constexpr Optional(base::nullopt_t) {}
+ constexpr Optional(nullopt_t) {}
- // TODO(dcheng): Make these constexpr iff T is trivially constructible.
- Optional(const Optional& other) {
- if (!other.storage_.is_null_)
- Init(other.value());
- }
-
- Optional(Optional&& other) {
- if (!other.storage_.is_null_)
- Init(std::move(other.value()));
- }
+ constexpr Optional(const T& value)
+ : internal::OptionalBase<T>(in_place, value) {}
- constexpr Optional(const T& value) : storage_(value) {}
-
- constexpr Optional(T&& value) : storage_(std::move(value)) {}
+ constexpr Optional(T&& value)
+ : internal::OptionalBase<T>(in_place, std::move(value)) {}
template <class... Args>
- constexpr explicit Optional(base::in_place_t, Args&&... args)
- : storage_(base::in_place, std::forward<Args>(args)...) {}
+ constexpr explicit Optional(in_place_t, Args&&... args)
+ : internal::OptionalBase<T>(in_place, std::forward<Args>(args)...) {}
+
+ template <
+ class U,
+ class... Args,
+ class = std::enable_if_t<std::is_constructible<value_type,
+ std::initializer_list<U>&,
+ Args...>::value>>
+ constexpr explicit Optional(in_place_t,
+ std::initializer_list<U> il,
+ Args&&... args)
+ : internal::OptionalBase<T>(in_place, il, std::forward<Args>(args)...) {}
~Optional() = default;
- Optional& operator=(base::nullopt_t) {
- FreeIfNeeded();
- return *this;
- }
+ // Defer copy-/move- assign operator implementation to OptionalBase.
+ // TOOD(hidehiko): Implement conditional enabling.
+ Optional& operator=(const Optional& other) = default;
+ Optional& operator=(Optional&& other) = default;
- Optional& operator=(const Optional& other) {
- if (other.storage_.is_null_) {
- FreeIfNeeded();
- return *this;
- }
-
- InitOrAssign(other.value());
- return *this;
- }
-
- Optional& operator=(Optional&& other) {
- if (other.storage_.is_null_) {
- FreeIfNeeded();
- return *this;
- }
-
- InitOrAssign(std::move(other.value()));
+ Optional& operator=(nullopt_t) {
+ FreeIfNeeded();
return *this;
}
@@ -243,10 +384,10 @@ class Optional {
if (storage_.is_null_ != other.storage_.is_null_) {
if (storage_.is_null_) {
- Init(std::move(other.storage_.value_));
+ storage_.Init(std::move(other.storage_.value_));
other.FreeIfNeeded();
} else {
- other.Init(std::move(storage_.value_));
+ other.storage_.Init(std::move(storage_.value_));
FreeIfNeeded();
}
return;
@@ -264,201 +405,205 @@ class Optional {
template <class... Args>
void emplace(Args&&... args) {
FreeIfNeeded();
- Init(std::forward<Args>(args)...);
- }
-
- private:
- void Init(const T& value) {
- DCHECK(storage_.is_null_);
- new (&storage_.value_) T(value);
- storage_.is_null_ = false;
- }
-
- void Init(T&& value) {
- DCHECK(storage_.is_null_);
- new (&storage_.value_) T(std::move(value));
- storage_.is_null_ = false;
+ storage_.Init(std::forward<Args>(args)...);
}
- template <class... Args>
- void Init(Args&&... args) {
- DCHECK(storage_.is_null_);
- new (&storage_.value_) T(std::forward<Args>(args)...);
- storage_.is_null_ = false;
- }
-
- void InitOrAssign(const T& value) {
- if (storage_.is_null_)
- Init(value);
- else
- storage_.value_ = value;
- }
-
- void InitOrAssign(T&& value) {
- if (storage_.is_null_)
- Init(std::move(value));
- else
- storage_.value_ = std::move(value);
- }
-
- void FreeIfNeeded() {
- if (storage_.is_null_)
- return;
- storage_.value_.~T();
- storage_.is_null_ = true;
+ template <
+ class U,
+ class... Args,
+ class = std::enable_if_t<std::is_constructible<value_type,
+ std::initializer_list<U>&,
+ Args...>::value>>
+ T& emplace(std::initializer_list<U> il, Args&&... args) {
+ FreeIfNeeded();
+ storage_.Init(il, std::forward<Args>(args)...);
+ return storage_.value_;
}
- internal::OptionalStorage<T> storage_;
+ private:
+ // Accessing template base class's protected member needs explicit
+ // declaration to do so.
+ using internal::OptionalBase<T>::FreeIfNeeded;
+ using internal::OptionalBase<T>::InitOrAssign;
+ using internal::OptionalBase<T>::storage_;
};
-template <class T>
-constexpr bool operator==(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !!lhs != !!rhs ? false : lhs == nullopt || (*lhs == *rhs);
-}
-
-template <class T>
-constexpr bool operator!=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(lhs == rhs);
-}
-
-template <class T>
-constexpr bool operator<(const Optional<T>& lhs, const Optional<T>& rhs) {
- return rhs == nullopt ? false : (lhs == nullopt ? true : *lhs < *rhs);
-}
-
-template <class T>
-constexpr bool operator<=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(rhs < lhs);
-}
-
-template <class T>
-constexpr bool operator>(const Optional<T>& lhs, const Optional<T>& rhs) {
- return rhs < lhs;
-}
-
-template <class T>
-constexpr bool operator>=(const Optional<T>& lhs, const Optional<T>& rhs) {
- return !(lhs < rhs);
-}
-
-template <class T>
-constexpr bool operator==(const Optional<T>& opt, base::nullopt_t) {
+// Here after defines comparation operators. The definition follows
+// http://en.cppreference.com/w/cpp/utility/optional/operator_cmp
+// while bool() casting is replaced by has_value() to meet the chromium
+// style guide.
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value())
+ return false;
+ if (!lhs.has_value())
+ return true;
+ return *lhs == *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (lhs.has_value() != rhs.has_value())
+ return true;
+ if (!lhs.has_value())
+ return false;
+ return *lhs != *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value())
+ return false;
+ if (!lhs.has_value())
+ return true;
+ return *lhs < *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value())
+ return true;
+ if (!rhs.has_value())
+ return false;
+ return *lhs <= *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!lhs.has_value())
+ return false;
+ if (!rhs.has_value())
+ return true;
+ return *lhs > *rhs;
+}
+
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& lhs, const Optional<U>& rhs) {
+ if (!rhs.has_value())
+ return true;
+ if (!lhs.has_value())
+ return false;
+ return *lhs >= *rhs;
+}
+
+template <class T>
+constexpr bool operator==(const Optional<T>& opt, nullopt_t) {
return !opt;
}
template <class T>
-constexpr bool operator==(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator==(nullopt_t, const Optional<T>& opt) {
return !opt;
}
template <class T>
-constexpr bool operator!=(const Optional<T>& opt, base::nullopt_t) {
- return !!opt;
+constexpr bool operator!=(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator!=(base::nullopt_t, const Optional<T>& opt) {
- return !!opt;
+constexpr bool operator!=(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator<(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator<(const Optional<T>& opt, nullopt_t) {
return false;
}
template <class T>
-constexpr bool operator<(base::nullopt_t, const Optional<T>& opt) {
- return !!opt;
+constexpr bool operator<(nullopt_t, const Optional<T>& opt) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator<=(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator<=(const Optional<T>& opt, nullopt_t) {
return !opt;
}
template <class T>
-constexpr bool operator<=(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator<=(nullopt_t, const Optional<T>& opt) {
return true;
}
template <class T>
-constexpr bool operator>(const Optional<T>& opt, base::nullopt_t) {
- return !!opt;
+constexpr bool operator>(const Optional<T>& opt, nullopt_t) {
+ return opt.has_value();
}
template <class T>
-constexpr bool operator>(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator>(nullopt_t, const Optional<T>& opt) {
return false;
}
template <class T>
-constexpr bool operator>=(const Optional<T>& opt, base::nullopt_t) {
+constexpr bool operator>=(const Optional<T>& opt, nullopt_t) {
return true;
}
template <class T>
-constexpr bool operator>=(base::nullopt_t, const Optional<T>& opt) {
+constexpr bool operator>=(nullopt_t, const Optional<T>& opt) {
return !opt;
}
-template <class T>
-constexpr bool operator==(const Optional<T>& opt, const T& value) {
- return opt != nullopt ? *opt == value : false;
+template <class T, class U>
+constexpr bool operator==(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt == value : false;
}
-template <class T>
-constexpr bool operator==(const T& value, const Optional<T>& opt) {
- return opt == value;
+template <class T, class U>
+constexpr bool operator==(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value == *opt : false;
}
-template <class T>
-constexpr bool operator!=(const Optional<T>& opt, const T& value) {
- return !(opt == value);
+template <class T, class U>
+constexpr bool operator!=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt != value : true;
}
-template <class T>
-constexpr bool operator!=(const T& value, const Optional<T>& opt) {
- return !(opt == value);
+template <class T, class U>
+constexpr bool operator!=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value != *opt : true;
}
-template <class T>
-constexpr bool operator<(const Optional<T>& opt, const T& value) {
- return opt != nullopt ? *opt < value : true;
+template <class T, class U>
+constexpr bool operator<(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt < value : true;
}
-template <class T>
-constexpr bool operator<(const T& value, const Optional<T>& opt) {
- return opt != nullopt ? value < *opt : false;
+template <class T, class U>
+constexpr bool operator<(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value < *opt : false;
}
-template <class T>
-constexpr bool operator<=(const Optional<T>& opt, const T& value) {
- return !(opt > value);
+template <class T, class U>
+constexpr bool operator<=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt <= value : true;
}
-template <class T>
-constexpr bool operator<=(const T& value, const Optional<T>& opt) {
- return !(value > opt);
+template <class T, class U>
+constexpr bool operator<=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value <= *opt : false;
}
-template <class T>
-constexpr bool operator>(const Optional<T>& opt, const T& value) {
- return value < opt;
+template <class T, class U>
+constexpr bool operator>(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt > value : false;
}
-template <class T>
-constexpr bool operator>(const T& value, const Optional<T>& opt) {
- return opt < value;
+template <class T, class U>
+constexpr bool operator>(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value > *opt : true;
}
-template <class T>
-constexpr bool operator>=(const Optional<T>& opt, const T& value) {
- return !(opt < value);
+template <class T, class U>
+constexpr bool operator>=(const Optional<T>& opt, const U& value) {
+ return opt.has_value() ? *opt >= value : false;
}
-template <class T>
-constexpr bool operator>=(const T& value, const Optional<T>& opt) {
- return !(value < opt);
+template <class T, class U>
+constexpr bool operator>=(const U& value, const Optional<T>& opt) {
+ return opt.has_value() ? value >= *opt : true;
}
template <class T>
@@ -466,6 +611,12 @@ constexpr Optional<typename std::decay<T>::type> make_optional(T&& value) {
return Optional<typename std::decay<T>::type>(std::forward<T>(value));
}
+template <class T, class U, class... Args>
+constexpr Optional<T> make_optional(std::initializer_list<U> il,
+ Args&&... args) {
+ return Optional<T>(in_place, il, std::forward<Args>(args)...);
+}
+
template <class T>
void swap(Optional<T>& lhs, Optional<T>& rhs) {
lhs.swap(rhs);
diff --git a/chromium/base/optional_unittest.cc b/chromium/base/optional_unittest.cc
index b47a91a5d22..91e63e75d0d 100644
--- a/chromium/base/optional_unittest.cc
+++ b/chromium/base/optional_unittest.cc
@@ -4,10 +4,16 @@
#include "base/optional.h"
+#include <memory>
#include <set>
+#include <string>
+#include <vector>
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+using ::testing::ElementsAre;
+
namespace base {
namespace {
@@ -73,9 +79,11 @@ class TestObject {
}
bool operator==(const TestObject& other) const {
- return foo_ == other.foo_ && bar_ == other.bar_;
+ return std::tie(foo_, bar_) == std::tie(other.foo_, other.bar_);
}
+ bool operator!=(const TestObject& other) const { return !(*this == other); }
+
int foo() const { return foo_; }
State state() const { return state_; }
int move_ctors_count() const { return move_ctors_count_; }
@@ -96,6 +104,25 @@ class NonTriviallyDestructible {
~NonTriviallyDestructible() {}
};
+class DeletedDefaultConstructor {
+ public:
+ DeletedDefaultConstructor() = delete;
+ DeletedDefaultConstructor(int foo) : foo_(foo) {}
+
+ int foo() const { return foo_; }
+
+ private:
+ int foo_;
+};
+
+class DeleteNewOperators {
+ public:
+ void* operator new(size_t) = delete;
+ void* operator new(size_t, void*) = delete;
+ void* operator new[](size_t) = delete;
+ void* operator new[](size_t, void*) = delete;
+};
+
} // anonymous namespace
static_assert(std::is_trivially_destructible<Optional<int>>::value,
@@ -124,8 +151,8 @@ TEST(OptionalTest, DefaultConstructor) {
TEST(OptionalTest, CopyConstructor) {
{
- Optional<float> first(0.1f);
- Optional<float> other(first);
+ constexpr Optional<float> first(0.1f);
+ constexpr Optional<float> other(first);
EXPECT_TRUE(other);
EXPECT_EQ(other.value(), 0.1f);
@@ -180,8 +207,8 @@ TEST(OptionalTest, ValueConstructor) {
TEST(OptionalTest, MoveConstructor) {
{
- Optional<float> first(0.1f);
- Optional<float> second(std::move(first));
+ constexpr Optional<float> first(0.1f);
+ constexpr Optional<float> second(std::move(first));
EXPECT_TRUE(second);
EXPECT_EQ(second.value(), 0.1f);
@@ -273,6 +300,22 @@ TEST(OptionalTest, ConstructorForwardArguments) {
}
}
+TEST(OptionalTest, ConstructorForwardInitListAndArguments) {
+ {
+ Optional<std::vector<int>> opt(in_place, {3, 1});
+ EXPECT_TRUE(opt);
+ EXPECT_THAT(*opt, ElementsAre(3, 1));
+ EXPECT_EQ(2u, opt->size());
+ }
+
+ {
+ Optional<std::vector<int>> opt(in_place, {3, 1}, std::allocator<int>());
+ EXPECT_TRUE(opt);
+ EXPECT_THAT(*opt, ElementsAre(3, 1));
+ EXPECT_EQ(2u, opt->size());
+ }
+}
+
TEST(OptionalTest, NulloptConstructor) {
constexpr Optional<int> a(base::nullopt);
EXPECT_FALSE(a);
@@ -576,6 +619,24 @@ TEST(OptionalTest, Emplace) {
EXPECT_TRUE(!!a);
EXPECT_TRUE(TestObject(1, 0.2) == a.value());
}
+
+ {
+ Optional<std::vector<int>> a;
+ auto& ref = a.emplace({2, 3});
+ static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+ EXPECT_TRUE(a);
+ EXPECT_THAT(*a, ElementsAre(2, 3));
+ EXPECT_EQ(&ref, &*a);
+ }
+
+ {
+ Optional<std::vector<int>> a;
+ auto& ref = a.emplace({4, 5}, std::allocator<int>());
+ static_assert(std::is_same<std::vector<int>&, decltype(ref)>::value, "");
+ EXPECT_TRUE(a);
+ EXPECT_THAT(*a, ElementsAre(4, 5));
+ EXPECT_EQ(&ref, &*a);
+ }
}
TEST(OptionalTest, Equals_TwoEmpty) {
@@ -606,6 +667,13 @@ TEST(OptionalTest, Equals_TwoDifferent) {
EXPECT_FALSE(a == b);
}
+TEST(OptionalTest, Equals_DifferentType) {
+ Optional<int> a(0);
+ Optional<double> b(0);
+
+ EXPECT_TRUE(a == b);
+}
+
TEST(OptionalTest, NotEquals_TwoEmpty) {
Optional<int> a;
Optional<int> b;
@@ -634,6 +702,13 @@ TEST(OptionalTest, NotEquals_TwoDifferent) {
EXPECT_TRUE(a != b);
}
+TEST(OptionalTest, NotEquals_DifferentType) {
+ Optional<int> a(0);
+ Optional<double> b(0.0);
+
+ EXPECT_FALSE(a != b);
+}
+
TEST(OptionalTest, Less_LeftEmpty) {
Optional<int> l;
Optional<int> r(1);
@@ -676,6 +751,13 @@ TEST(OptionalTest, Less_BothValues) {
}
}
+TEST(OptionalTest, Less_DifferentType) {
+ Optional<int> l(1);
+ Optional<double> r(2.0);
+
+ EXPECT_TRUE(l < r);
+}
+
TEST(OptionalTest, LessEq_LeftEmpty) {
Optional<int> l;
Optional<int> r(1);
@@ -718,6 +800,13 @@ TEST(OptionalTest, LessEq_BothValues) {
}
}
+TEST(OptionalTest, LessEq_DifferentType) {
+ Optional<int> l(1);
+ Optional<double> r(2.0);
+
+ EXPECT_TRUE(l <= r);
+}
+
TEST(OptionalTest, Greater_BothEmpty) {
Optional<int> l;
Optional<int> r;
@@ -760,6 +849,13 @@ TEST(OptionalTest, Greater_BothValue) {
}
}
+TEST(OptionalTest, Greater_DifferentType) {
+ Optional<int> l(1);
+ Optional<double> r(2.0);
+
+ EXPECT_FALSE(l > r);
+}
+
TEST(OptionalTest, GreaterEq_BothEmpty) {
Optional<int> l;
Optional<int> r;
@@ -802,6 +898,13 @@ TEST(OptionalTest, GreaterEq_BothValue) {
}
}
+TEST(OptionalTest, GreaterEq_DifferentType) {
+ Optional<int> l(1);
+ Optional<double> r(2.0);
+
+ EXPECT_FALSE(l >= r);
+}
+
TEST(OptionalTest, OptNullEq) {
{
Optional<int> opt;
@@ -950,6 +1053,11 @@ TEST(OptionalTest, ValueEq_NotEmpty) {
}
}
+TEST(OptionalTest, ValueEq_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt == 0.0);
+}
+
TEST(OptionalTest, EqValue_Empty) {
Optional<int> opt;
EXPECT_FALSE(1 == opt);
@@ -966,6 +1074,11 @@ TEST(OptionalTest, EqValue_NotEmpty) {
}
}
+TEST(OptionalTest, EqValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(0.0 == opt);
+}
+
TEST(OptionalTest, ValueNotEq_Empty) {
Optional<int> opt;
EXPECT_TRUE(opt != 1);
@@ -982,6 +1095,11 @@ TEST(OptionalTest, ValueNotEq_NotEmpty) {
}
}
+TEST(OPtionalTest, ValueNotEq_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt != 0.0);
+}
+
TEST(OptionalTest, NotEqValue_Empty) {
Optional<int> opt;
EXPECT_TRUE(1 != opt);
@@ -998,6 +1116,11 @@ TEST(OptionalTest, NotEqValue_NotEmpty) {
}
}
+TEST(OptionalTest, NotEqValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_FALSE(0.0 != opt);
+}
+
TEST(OptionalTest, ValueLess_Empty) {
Optional<int> opt;
EXPECT_TRUE(opt < 1);
@@ -1018,6 +1141,11 @@ TEST(OptionalTest, ValueLess_NotEmpty) {
}
}
+TEST(OPtionalTest, ValueLess_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt < 1.0);
+}
+
TEST(OptionalTest, LessValue_Empty) {
Optional<int> opt;
EXPECT_FALSE(1 < opt);
@@ -1038,6 +1166,11 @@ TEST(OptionalTest, LessValue_NotEmpty) {
}
}
+TEST(OptionalTest, LessValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_FALSE(0.0 < opt);
+}
+
TEST(OptionalTest, ValueLessEq_Empty) {
Optional<int> opt;
EXPECT_TRUE(opt <= 1);
@@ -1058,6 +1191,11 @@ TEST(OptionalTest, ValueLessEq_NotEmpty) {
}
}
+TEST(OptionalTest, ValueLessEq_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt <= 0.0);
+}
+
TEST(OptionalTest, LessEqValue_Empty) {
Optional<int> opt;
EXPECT_FALSE(1 <= opt);
@@ -1078,6 +1216,11 @@ TEST(OptionalTest, LessEqValue_NotEmpty) {
}
}
+TEST(OptionalTest, LessEqValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(0.0 <= opt);
+}
+
TEST(OptionalTest, ValueGreater_Empty) {
Optional<int> opt;
EXPECT_FALSE(opt > 1);
@@ -1098,6 +1241,11 @@ TEST(OptionalTest, ValueGreater_NotEmpty) {
}
}
+TEST(OptionalTest, ValueGreater_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_FALSE(opt > 0.0);
+}
+
TEST(OptionalTest, GreaterValue_Empty) {
Optional<int> opt;
EXPECT_TRUE(1 > opt);
@@ -1118,6 +1266,11 @@ TEST(OptionalTest, GreaterValue_NotEmpty) {
}
}
+TEST(OptionalTest, GreaterValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_FALSE(0.0 > opt);
+}
+
TEST(OptionalTest, ValueGreaterEq_Empty) {
Optional<int> opt;
EXPECT_FALSE(opt >= 1);
@@ -1138,6 +1291,11 @@ TEST(OptionalTest, ValueGreaterEq_NotEmpty) {
}
}
+TEST(OptionalTest, ValueGreaterEq_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(opt <= 0.0);
+}
+
TEST(OptionalTest, GreaterEqValue_Empty) {
Optional<int> opt;
EXPECT_TRUE(1 >= opt);
@@ -1158,6 +1316,11 @@ TEST(OptionalTest, GreaterEqValue_NotEmpty) {
}
}
+TEST(OptionalTest, GreaterEqValue_DifferentType) {
+ Optional<int> opt(0);
+ EXPECT_TRUE(0.0 >= opt);
+}
+
TEST(OptionalTest, NotEquals) {
{
Optional<float> a(0.1f);
@@ -1172,6 +1335,12 @@ TEST(OptionalTest, NotEquals) {
}
{
+ Optional<int> a(1);
+ Optional<double> b(2);
+ EXPECT_NE(a, b);
+ }
+
+ {
Optional<TestObject> a(TestObject(3, 0.1));
Optional<TestObject> b(TestObject(4, 1.0));
EXPECT_TRUE(a != b);
@@ -1239,6 +1408,15 @@ TEST(OptionalTest, MakeOptional) {
EXPECT_EQ(TestObject::State::MOVE_CONSTRUCTED,
base::make_optional(std::move(value))->state());
}
+
+ {
+ auto str1 = make_optional<std::string>({'1', '2', '3'});
+ EXPECT_EQ("123", *str1);
+
+ auto str2 =
+ make_optional<std::string>({'a', 'b', 'c'}, std::allocator<char>());
+ EXPECT_EQ("abc", *str2);
+ }
}
TEST(OptionalTest, NonMemberSwap_bothNoValue) {
@@ -1376,4 +1554,21 @@ TEST(OptionalTest, AssignFromRValue) {
EXPECT_EQ(1, a->move_ctors_count());
}
+TEST(OptionalTest, DontCallDefaultCtor) {
+ Optional<DeletedDefaultConstructor> a;
+ EXPECT_FALSE(a.has_value());
+
+ a = base::make_optional<DeletedDefaultConstructor>(42);
+ EXPECT_TRUE(a.has_value());
+ EXPECT_EQ(42, a->foo());
+}
+
+TEST(OptionalTest, DontCallNewMemberFunction) {
+ Optional<DeleteNewOperators> a;
+ EXPECT_FALSE(a.has_value());
+
+ a = DeleteNewOperators();
+ EXPECT_TRUE(a.has_value());
+}
+
} // namespace base
diff --git a/chromium/base/os_compat_android.cc b/chromium/base/os_compat_android.cc
index 1eb6536bb1e..c1a2ac86ddb 100644
--- a/chromium/base/os_compat_android.cc
+++ b/chromium/base/os_compat_android.cc
@@ -10,6 +10,7 @@
#include <math.h>
#include <sys/stat.h>
#include <sys/syscall.h>
+#include <unistd.h>
#if !defined(__LP64__)
#include <time64.h>
diff --git a/chromium/base/os_compat_android.h b/chromium/base/os_compat_android.h
index bfdf2c8e753..e33b1f7ac38 100644
--- a/chromium/base/os_compat_android.h
+++ b/chromium/base/os_compat_android.h
@@ -18,15 +18,4 @@ extern "C" char* mkdtemp(char* path);
// Android has no timegm().
extern "C" time_t timegm(struct tm* const t);
-// The lockf() function is not available on Android; we translate to flock().
-#define F_LOCK LOCK_EX
-#define F_ULOCK LOCK_UN
-inline int lockf(int fd, int cmd, off_t ignored_len) {
- return flock(fd, cmd);
-}
-
-// In case __USE_FILE_OFFSET64 is not used, we need to call pwrite64() instead
-// of pwrite()
-#define pwrite(fd, buf, count, offset) pwrite64(fd, buf, count, offset)
-
#endif // BASE_OS_COMPAT_ANDROID_H_
diff --git a/chromium/base/pickle.h b/chromium/base/pickle.h
index 4fe98d420c6..eff20923a30 100644
--- a/chromium/base/pickle.h
+++ b/chromium/base/pickle.h
@@ -263,6 +263,10 @@ class BASE_EXPORT Pickle {
}
protected:
+ // Returns size of the header, which can have default value, set by user or
+ // calculated by passed raw data.
+ size_t header_size() const { return header_size_; }
+
char* mutable_payload() {
return reinterpret_cast<char*>(header_) + header_size_;
}
diff --git a/chromium/base/pickle_unittest.cc b/chromium/base/pickle_unittest.cc
index 6c302572e4d..45630470609 100644
--- a/chromium/base/pickle_unittest.cc
+++ b/chromium/base/pickle_unittest.cc
@@ -22,12 +22,12 @@ namespace {
const bool testbool1 = false;
const bool testbool2 = true;
-const int testint = 2093847192;
-const long testlong = 1093847192;
+const int testint = 2'093'847'192;
+const long testlong = 1'093'847'192;
const uint16_t testuint16 = 32123;
const uint32_t testuint32 = 1593847192;
-const int64_t testint64 = -0x7E8CA9253104BDFCLL;
-const uint64_t testuint64 = 0xCE8CA9253104BDF7ULL;
+const int64_t testint64 = -0x7E8CA925'3104BDFCLL;
+const uint64_t testuint64 = 0xCE8CA925'3104BDF7ULL;
const float testfloat = 3.1415926935f;
const double testdouble = 2.71828182845904523;
const std::string teststring("Hello world"); // note non-aligned string length
diff --git a/chromium/base/posix/file_descriptor_shuffle.h b/chromium/base/posix/file_descriptor_shuffle.h
index 78e3a7d4933..2afdc28832f 100644
--- a/chromium/base/posix/file_descriptor_shuffle.h
+++ b/chromium/base/posix/file_descriptor_shuffle.h
@@ -42,7 +42,7 @@ class InjectionDelegate {
virtual void Close(int fd) = 0;
protected:
- virtual ~InjectionDelegate() {}
+ virtual ~InjectionDelegate() = default;
};
// An implementation of the InjectionDelegate interface using the file
diff --git a/chromium/base/power_monitor/power_observer.h b/chromium/base/power_monitor/power_observer.h
index 6be70bba9d5..0142b2af327 100644
--- a/chromium/base/power_monitor/power_observer.h
+++ b/chromium/base/power_monitor/power_observer.h
@@ -23,7 +23,7 @@ class BASE_EXPORT PowerObserver {
virtual void OnResume() {}
protected:
- virtual ~PowerObserver() {}
+ virtual ~PowerObserver() = default;
};
} // namespace base
diff --git a/chromium/base/process/launch.h b/chromium/base/process/launch.h
index 2e06c4c386a..caa05c41226 100644
--- a/chromium/base/process/launch.h
+++ b/chromium/base/process/launch.h
@@ -60,8 +60,8 @@ struct BASE_EXPORT LaunchOptions {
// pre_exec_delegate below)
class BASE_EXPORT PreExecDelegate {
public:
- PreExecDelegate() {}
- virtual ~PreExecDelegate() {}
+ PreExecDelegate() = default;
+ virtual ~PreExecDelegate() = default;
// Since this is to be run between fork and exec, and fork may have happened
// while multiple threads were running, this function needs to be async
@@ -165,16 +165,6 @@ struct BASE_EXPORT LaunchOptions {
// propagate FDs into the child process.
FileHandleMappingVector fds_to_remap;
- // Each element is an RLIMIT_* constant that should be raised to its
- // rlim_max. This pointer is owned by the caller and must live through
- // the call to LaunchProcess().
- const std::vector<int>* maximize_rlimits = nullptr;
-
- // If true, start the process in a new process group, instead of
- // inheriting the parent's process group. The pgid of the child process
- // will be the same as its pid.
- bool new_process_group = false;
-
#if defined(OS_LINUX)
// If non-zero, start the process using clone(), using flags as provided.
// Unlike in clone, clone_flags may not contain a custom termination signal
@@ -210,7 +200,7 @@ struct BASE_EXPORT LaunchOptions {
LP_CLONE_FDIO_NAMESPACE | LP_CLONE_DEFAULT_JOB | LP_CLONE_FDIO_STDIO;
#endif // defined(OS_FUCHSIA)
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) && !defined(OS_FUCHSIA)
// If not empty, launch the specified executable instead of
// cmdline.GetProgram(). This is useful when it is necessary to pass a custom
// argv[0].
@@ -223,7 +213,17 @@ struct BASE_EXPORT LaunchOptions {
// code running in this delegate essentially needs to be async-signal safe
// (see man 7 signal for a list of allowed functions).
PreExecDelegate* pre_exec_delegate = nullptr;
-#endif // defined(OS_POSIX)
+
+ // Each element is an RLIMIT_* constant that should be raised to its
+ // rlim_max. This pointer is owned by the caller and must live through
+ // the call to LaunchProcess().
+ const std::vector<int>* maximize_rlimits = nullptr;
+
+ // If true, start the process in a new process group, instead of
+ // inheriting the parent's process group. The pgid of the child process
+ // will be the same as its pid.
+ bool new_process_group = false;
+#endif // defined(OS_POSIX) && !defined(OS_FUCHSIA)
#if defined(OS_CHROMEOS)
// If non-negative, the specified file descriptor will be set as the launched
@@ -364,9 +364,7 @@ BASE_EXPORT LaunchOptions LaunchOptionsForTest();
//
// This function uses the libc clone wrapper (which updates libc's pid cache)
// internally, so callers may expect things like getpid() to work correctly
-// after in both the child and parent. An exception is when this code is run
-// under Valgrind. Valgrind does not support the libc clone wrapper, so the libc
-// pid cache may be incorrect after this function is called under Valgrind.
+// after in both the child and parent.
//
// As with fork(), callers should be extremely careful when calling this while
// multiple threads are running, since at the time the fork happened, the
diff --git a/chromium/base/process/launch_fuchsia.cc b/chromium/base/process/launch_fuchsia.cc
index b79210f842b..86ea818a944 100644
--- a/chromium/base/process/launch_fuchsia.cc
+++ b/chromium/base/process/launch_fuchsia.cc
@@ -151,16 +151,24 @@ Process LaunchProcess(const std::vector<std::string>& argv,
launchpad_add_handle(lp, id_and_handle.handle, id_and_handle.id);
}
- zx_handle_t proc;
+ zx_handle_t process_handle;
const char* errmsg;
- zx_status_t status = launchpad_go(lp, &proc, &errmsg);
+ zx_status_t status = launchpad_go(lp, &process_handle, &errmsg);
if (status != ZX_OK) {
LOG(ERROR) << "launchpad_go failed: " << errmsg
<< ", status=" << zx_status_get_string(status);
return Process();
}
- return Process(proc);
+ Process process(process_handle);
+ if (options.wait) {
+ status = zx_object_wait_one(process.Handle(), ZX_TASK_TERMINATED,
+ ZX_TIME_INFINITE, nullptr);
+ DCHECK(status == ZX_OK)
+ << "zx_object_wait_one: " << zx_status_get_string(status);
+ }
+
+ return process;
}
bool GetAppOutput(const CommandLine& cl, std::string* output) {
@@ -193,4 +201,8 @@ bool GetAppOutputWithExitCode(const CommandLine& cl,
return GetAppOutputInternal(cl, false, output, exit_code);
}
+void RaiseProcessToHighPriority() {
+ // Fuchsia doesn't provide an API to change process priority.
+}
+
} // namespace base
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 148ad4f54c7..90c2d830dbc 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -38,8 +38,6 @@
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
-#include "base/third_party/valgrind/valgrind.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -287,14 +285,8 @@ void CloseSuperfluousFds(const base::InjectiveMultimap& saved_mapping) {
if (fd == dir_fd)
continue;
- // When running under Valgrind, Valgrind opens several FDs for its
- // own use and will complain if we try to close them. All of
- // these FDs are >= |max_fds|, so we can check against that here
- // before closing. See https://bugs.kde.org/show_bug.cgi?id=191758
- if (fd < static_cast<int>(max_fds)) {
- int ret = IGNORE_EINTR(close(fd));
- DPCHECK(ret == 0);
- }
+ int ret = IGNORE_EINTR(close(fd));
+ DPCHECK(ret == 0);
}
}
@@ -751,25 +743,6 @@ pid_t ForkWithFlags(unsigned long flags, pid_t* ptid, pid_t* ctid) {
RAW_LOG(FATAL, "Invalid usage of ForkWithFlags");
}
- // Valgrind's clone implementation does not support specifiying a child_stack
- // without CLONE_VM, so we cannot use libc's clone wrapper when running under
- // Valgrind. As a result, the libc pid cache may be incorrect under Valgrind.
- // See crbug.com/442817 for more details.
- if (RunningOnValgrind()) {
- // See kernel/fork.c in Linux. There is different ordering of sys_clone
- // parameters depending on CONFIG_CLONE_BACKWARDS* configuration options.
-#if defined(ARCH_CPU_X86_64)
- return syscall(__NR_clone, flags, nullptr, ptid, ctid, nullptr);
-#elif defined(ARCH_CPU_X86) || defined(ARCH_CPU_ARM_FAMILY) || \
- defined(ARCH_CPU_MIPS_FAMILY) || defined(ARCH_CPU_S390_FAMILY) || \
- defined(ARCH_CPU_PPC64_FAMILY)
- // CONFIG_CLONE_BACKWARDS defined.
- return syscall(__NR_clone, flags, nullptr, ptid, nullptr, ctid);
-#else
-#error "Unsupported architecture"
-#endif
- }
-
jmp_buf env;
if (setjmp(env) == 0) {
return CloneAndLongjmpInChild(flags, ptid, ctid, &env);
diff --git a/chromium/base/process/memory_linux.cc b/chromium/base/process/memory_linux.cc
index a985b7a8ceb..7f760ab7572 100644
--- a/chromium/base/process/memory_linux.cc
+++ b/chromium/base/process/memory_linux.cc
@@ -18,6 +18,7 @@
#include "build/build_config.h"
#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/config.h"
#include "third_party/tcmalloc/chromium/src/gperftools/tcmalloc.h"
#endif
diff --git a/chromium/base/process/memory_win.cc b/chromium/base/process/memory_win.cc
index 88244febfb4..2ae826b7e24 100644
--- a/chromium/base/process/memory_win.cc
+++ b/chromium/base/process/memory_win.cc
@@ -4,10 +4,11 @@
#include "base/process/memory.h"
+#include <windows.h> // Must be in front of other Windows header files.
+
#include <new.h>
#include <psapi.h>
#include <stddef.h>
-#include <windows.h>
// malloc_unchecked is required to implement UncheckedMalloc properly.
// It's provided by allocator_shim_win.cc but since that's not always present,
diff --git a/chromium/base/process/process_handle.h b/chromium/base/process/process_handle.h
index ae54b72d999..f3f63439df2 100644
--- a/chromium/base/process/process_handle.h
+++ b/chromium/base/process/process_handle.h
@@ -13,7 +13,7 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#endif
#if defined(OS_FUCHSIA)
diff --git a/chromium/base/process/process_info_unittest.cc b/chromium/base/process/process_info_unittest.cc
index 727c3038ff3..f54d957bb65 100644
--- a/chromium/base/process/process_info_unittest.cc
+++ b/chromium/base/process/process_info_unittest.cc
@@ -11,11 +11,12 @@
namespace base {
// See https://crbug.com/726484 for Fuchsia.
-#if !defined(OS_IOS) && !defined(OS_FUCHSIA)
+// Cannot read boot time on Android O, crbug.com/788870.
+#if !defined(OS_IOS) && !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
TEST(ProcessInfoTest, CreationTime) {
Time creation_time = CurrentProcessInfo::CreationTime();
ASSERT_FALSE(creation_time.is_null());
}
-#endif // !defined(OS_IOS) && !defined(OS_FUCHSIA)
+#endif // !defined(OS_IOS) && !defined(OS_FUCHSIA) && !defined(OS_ANDROID)
} // namespace base
diff --git a/chromium/base/process/process_info_win.cc b/chromium/base/process/process_info_win.cc
index b9864b02a9d..a33216b6935 100644
--- a/chromium/base/process/process_info_win.cc
+++ b/chromium/base/process/process_info_win.cc
@@ -15,16 +15,11 @@ namespace base {
namespace {
-base::win::ScopedHandle GetCurrentProcessToken() {
+HANDLE GetCurrentProcessToken() {
HANDLE process_token;
- BOOL result =
- OpenProcessToken(::GetCurrentProcess(), TOKEN_QUERY, &process_token);
- // These checks are turned on in release builds to debug
- // https://bugs.chromium.org/p/chromium/issues/detail?id=748431.
- PCHECK(result);
- CHECK(process_token != NULL);
- CHECK(process_token != INVALID_HANDLE_VALUE);
- return base::win::ScopedHandle(process_token);
+ OpenProcessToken(GetCurrentProcess(), TOKEN_QUERY, &process_token);
+ DCHECK(process_token != NULL && process_token != INVALID_HANDLE_VALUE);
+ return process_token;
}
} // namespace
@@ -43,11 +38,11 @@ const Time CurrentProcessInfo::CreationTime() {
}
IntegrityLevel GetCurrentProcessIntegrityLevel() {
- base::win::ScopedHandle scoped_process_token(GetCurrentProcessToken());
+ HANDLE process_token(GetCurrentProcessToken());
DWORD token_info_length = 0;
- if (::GetTokenInformation(scoped_process_token.Get(), TokenIntegrityLevel,
- nullptr, 0, &token_info_length) ||
+ if (::GetTokenInformation(process_token, TokenIntegrityLevel, nullptr, 0,
+ &token_info_length) ||
::GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
return INTEGRITY_UNKNOWN;
}
@@ -55,9 +50,8 @@ IntegrityLevel GetCurrentProcessIntegrityLevel() {
auto token_label_bytes = std::make_unique<char[]>(token_info_length);
TOKEN_MANDATORY_LABEL* token_label =
reinterpret_cast<TOKEN_MANDATORY_LABEL*>(token_label_bytes.get());
- if (!::GetTokenInformation(scoped_process_token.Get(), TokenIntegrityLevel,
- token_label, token_info_length,
- &token_info_length)) {
+ if (!::GetTokenInformation(process_token, TokenIntegrityLevel, token_label,
+ token_info_length, &token_info_length)) {
return INTEGRITY_UNKNOWN;
}
@@ -82,14 +76,14 @@ IntegrityLevel GetCurrentProcessIntegrityLevel() {
}
bool IsCurrentProcessElevated() {
- base::win::ScopedHandle scoped_process_token(GetCurrentProcessToken());
+ HANDLE process_token(GetCurrentProcessToken());
// Unlike TOKEN_ELEVATION_TYPE which returns TokenElevationTypeDefault when
// UAC is turned off, TOKEN_ELEVATION returns whether the process is elevated.
DWORD size;
TOKEN_ELEVATION elevation;
- if (!GetTokenInformation(scoped_process_token.Get(), TokenElevation,
- &elevation, sizeof(elevation), &size)) {
+ if (!GetTokenInformation(process_token, TokenElevation, &elevation,
+ sizeof(elevation), &size)) {
PLOG(ERROR) << "GetTokenInformation() failed";
return false;
}
diff --git a/chromium/base/process/process_iterator.h b/chromium/base/process/process_iterator.h
index 0d1f1a62445..54a4ec610cb 100644
--- a/chromium/base/process/process_iterator.h
+++ b/chromium/base/process/process_iterator.h
@@ -68,7 +68,7 @@ class ProcessFilter {
virtual bool Includes(const ProcessEntry& entry) const = 0;
protected:
- virtual ~ProcessFilter() {}
+ virtual ~ProcessFilter() = default;
};
// This class provides a way to iterate through a list of processes on the
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 52a5912a288..29ee0ab31c9 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -33,13 +33,20 @@
#if defined(OS_WIN)
#include "base/win/scoped_handle.h"
+#include "base/win/windows_types.h"
#endif
namespace base {
#if defined(OS_WIN)
+// _WINDOWS_ will be defined if Windows.h was included - include Windows.h first
+// to get access to the full struct definition.
+#if defined(_WINDOWS_)
struct IoCounters : public IO_COUNTERS {
};
+#else
+struct IoCounters;
+#endif
#elif defined(OS_POSIX)
struct IoCounters {
uint64_t ReadOperationCount;
@@ -245,7 +252,7 @@ class BASE_EXPORT ProcessMetrics {
// otherwise.
bool GetIOCounters(IoCounters* io_counters) const;
-#if defined(OS_LINUX) || defined(OS_AIX)
+#if defined(OS_LINUX) || defined(OS_AIX) || defined(OS_ANDROID)
// Returns the number of file descriptors currently open by the process, or
// -1 on error.
int GetOpenFdCount() const;
@@ -253,7 +260,7 @@ class BASE_EXPORT ProcessMetrics {
// Returns the soft limit of file descriptors that can be opened by the
// process, or -1 on error.
int GetOpenFdSoftLimit() const;
-#endif // defined(OS_LINUX) || defined(OS_AIX)
+#endif // defined(OS_LINUX) || defined(OS_AIX) || defined(OS_ANDROID)
#if defined(OS_LINUX) || defined(OS_ANDROID)
// Bytes of swap as reported by /proc/[pid]/status.
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 1fb76e5a2fa..d8c1e1c15fa 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -352,7 +352,6 @@ bool ProcessMetrics::GetPageFaultCounts(PageFaultCounts* counts) const {
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
-#if defined(OS_LINUX) || defined(OS_AIX)
int ProcessMetrics::GetOpenFdCount() const {
// Use /proc/<pid>/fd to count the number of entries there.
FilePath fd_path = internal::GetProcPidDir(process_).Append("fd");
@@ -395,7 +394,6 @@ int ProcessMetrics::GetOpenFdSoftLimit() const {
}
return -1;
}
-#endif // defined(OS_LINUX) || defined(OS_AIX)
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index fdf9c96a909..c622050a413 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -550,7 +550,8 @@ TEST(ProcessMetricsTest, DISABLED_GetNumberOfThreads) {
#if defined(OS_LINUX)
namespace {
-// Keep these in sync so the GetOpenFdCount test can refer to correct test main.
+// Keep these in sync so the GetChildOpenFdCount test can refer to correct test
+// main.
#define ChildMain ChildFdCount
#define ChildMainString "ChildFdCount"
@@ -596,7 +597,7 @@ MULTIPROCESS_TEST_MAIN(ChildMain) {
} // namespace
-TEST(ProcessMetricsTest, GetOpenFdCount) {
+TEST(ProcessMetricsTest, GetChildOpenFdCount) {
ScopedTempDir temp_dir;
ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
const FilePath temp_path = temp_dir.GetPath();
@@ -615,6 +616,20 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
#endif // defined(OS_LINUX)
#if defined(OS_ANDROID) || defined(OS_LINUX)
+
+TEST(ProcessMetricsTest, GetOpenFdCount) {
+ std::unique_ptr<base::ProcessMetrics> metrics(
+ base::ProcessMetrics::CreateProcessMetrics(
+ base::GetCurrentProcessHandle()));
+ int fd_count = metrics->GetOpenFdCount();
+ EXPECT_GT(fd_count, 0);
+ ScopedFILE file(fopen("/proc/self/statm", "r"));
+ EXPECT_TRUE(file);
+ int new_fd_count = metrics->GetOpenFdCount();
+ EXPECT_GT(new_fd_count, 0);
+ EXPECT_EQ(new_fd_count, fd_count + 1);
+}
+
TEST(ProcessMetricsTestLinux, GetPageFaultCounts) {
std::unique_ptr<base::ProcessMetrics> process_metrics(
base::ProcessMetrics::CreateProcessMetrics(
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index e5643ecb814..76f09f52f8d 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Must be included before process_metrics.h to get full IoCounters definition
+#include <windows.h>
+
#include "base/process/process_metrics.h"
-#include <windows.h>
#include <psapi.h>
#include <stddef.h>
#include <stdint.h>
diff --git a/chromium/base/process/process_posix.cc b/chromium/base/process/process_posix.cc
index 2481e26ba16..3e63cd069d1 100644
--- a/chromium/base/process/process_posix.cc
+++ b/chromium/base/process/process_posix.cc
@@ -15,7 +15,6 @@
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/kill.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
@@ -315,13 +314,6 @@ bool Process::Terminate(int exit_code, bool wait) const {
bool result = kill(process_, SIGTERM) == 0;
if (result && wait) {
int tries = 60;
-
- if (RunningOnValgrind()) {
- // Wait for some extra time when running under Valgrind since the child
- // processes may take some time doing leak checking.
- tries *= 2;
- }
-
unsigned sleep_ms = 4;
// The process may not end immediately due to pending I/O
diff --git a/chromium/base/process/process_util_unittest.cc b/chromium/base/process/process_util_unittest.cc
index 43f2898801f..e0b285e9ce3 100644
--- a/chromium/base/process/process_util_unittest.cc
+++ b/chromium/base/process/process_util_unittest.cc
@@ -29,7 +29,6 @@
#include "base/synchronization/waitable_event.h"
#include "base/test/multiprocess_test.h"
#include "base/test/test_timeouts.h"
-#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread.h"
#include "build/build_config.h"
@@ -69,7 +68,7 @@
#include <zircon/syscalls.h>
#endif
-using base::FilePath;
+namespace base {
namespace {
@@ -99,7 +98,7 @@ const int kExpectedStillRunningExitCode = 0;
void WaitToDie(const char* filename) {
FILE* fp;
do {
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(10));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(10));
fp = fopen(filename, "r");
} while (!fp);
fclose(fp);
@@ -116,17 +115,17 @@ void SignalChildren(const char* filename) {
// libraries closing the fds, child deadlocking). This is a simple
// case, so it's not worth the risk. Using wait loops is discouraged
// in most instances.
-base::TerminationStatus WaitForChildTermination(base::ProcessHandle handle,
- int* exit_code) {
+TerminationStatus WaitForChildTermination(ProcessHandle handle,
+ int* exit_code) {
// Now we wait until the result is something other than STILL_RUNNING.
- base::TerminationStatus status = base::TERMINATION_STATUS_STILL_RUNNING;
- const base::TimeDelta kInterval = base::TimeDelta::FromMilliseconds(20);
- base::TimeDelta waited;
+ TerminationStatus status = TERMINATION_STATUS_STILL_RUNNING;
+ const TimeDelta kInterval = TimeDelta::FromMilliseconds(20);
+ TimeDelta waited;
do {
- status = base::GetTerminationStatus(handle, exit_code);
- base::PlatformThread::Sleep(kInterval);
+ status = GetTerminationStatus(handle, exit_code);
+ PlatformThread::Sleep(kInterval);
waited += kInterval;
- } while (status == base::TERMINATION_STATUS_STILL_RUNNING &&
+ } while (status == TERMINATION_STATUS_STILL_RUNNING &&
waited < TestTimeouts::action_max_timeout());
return status;
@@ -136,7 +135,7 @@ base::TerminationStatus WaitForChildTermination(base::ProcessHandle handle,
const int kSuccess = 0;
-class ProcessUtilTest : public base::MultiProcessTest {
+class ProcessUtilTest : public MultiProcessTest {
public:
#if defined(OS_POSIX)
// Spawn a child process that counts how many file descriptors are open.
@@ -152,7 +151,7 @@ std::string ProcessUtilTest::GetSignalFilePath(const char* filename) {
return filename;
#else
FilePath tmp_dir;
- PathService::Get(base::DIR_CACHE, &tmp_dir);
+ PathService::Get(DIR_CACHE, &tmp_dir);
tmp_dir = tmp_dir.Append(filename);
return tmp_dir.value();
#endif
@@ -164,7 +163,7 @@ MULTIPROCESS_TEST_MAIN(SimpleChildProcess) {
// TODO(viettrungluu): This should be in a "MultiProcessTestTest".
TEST_F(ProcessUtilTest, SpawnChild) {
- base::Process process = SpawnChild("SimpleChildProcess");
+ Process process = SpawnChild("SimpleChildProcess");
ASSERT_TRUE(process.IsValid());
int exit_code;
EXPECT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_max_timeout(),
@@ -180,7 +179,7 @@ TEST_F(ProcessUtilTest, KillSlowChild) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
remove(signal_file.c_str());
- base::Process process = SpawnChild("SlowChildProcess");
+ Process process = SpawnChild("SlowChildProcess");
ASSERT_TRUE(process.IsValid());
SignalChildren(signal_file.c_str());
int exit_code;
@@ -194,19 +193,19 @@ TEST_F(ProcessUtilTest, DISABLED_GetTerminationStatusExit) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileSlow);
remove(signal_file.c_str());
- base::Process process = SpawnChild("SlowChildProcess");
+ Process process = SpawnChild("SlowChildProcess");
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+ GetTerminationStatus(process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
- base::TerminationStatus status =
+ TerminationStatus status =
WaitForChildTermination(process.Handle(), &exit_code);
- EXPECT_EQ(base::TERMINATION_STATUS_NORMAL_TERMINATION, status);
+ EXPECT_EQ(TERMINATION_STATUS_NORMAL_TERMINATION, status);
EXPECT_EQ(kSuccess, exit_code);
remove(signal_file.c_str());
}
@@ -215,13 +214,13 @@ TEST_F(ProcessUtilTest, DISABLED_GetTerminationStatusExit) {
// LaunchOptions::current_directory.
#if !defined(OS_ANDROID)
MULTIPROCESS_TEST_MAIN(CheckCwdProcess) {
- base::FilePath expected;
- CHECK(base::GetTempDir(&expected));
+ FilePath expected;
+ CHECK(GetTempDir(&expected));
expected = MakeAbsoluteFilePath(expected);
CHECK(!expected.empty());
- base::FilePath actual;
- CHECK(base::GetCurrentDirectory(&actual));
+ FilePath actual;
+ CHECK(GetCurrentDirectory(&actual));
actual = MakeAbsoluteFilePath(actual);
CHECK(!actual.empty());
@@ -233,13 +232,13 @@ MULTIPROCESS_TEST_MAIN(CheckCwdProcess) {
TEST_F(ProcessUtilTest, CurrentDirectory) {
// TODO(rickyz): Add support for passing arguments to multiprocess children,
// then create a special directory for this test.
- base::FilePath tmp_dir;
- ASSERT_TRUE(base::GetTempDir(&tmp_dir));
+ FilePath tmp_dir;
+ ASSERT_TRUE(GetTempDir(&tmp_dir));
- base::LaunchOptions options;
+ LaunchOptions options;
options.current_directory = tmp_dir;
- base::Process process(SpawnChildWithOptions("CheckCwdProcess", options));
+ Process process(SpawnChildWithOptions("CheckCwdProcess", options));
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
@@ -251,11 +250,11 @@ TEST_F(ProcessUtilTest, CurrentDirectory) {
#if defined(OS_WIN)
// TODO(cpu): figure out how to test this in other platforms.
TEST_F(ProcessUtilTest, GetProcId) {
- base::ProcessId id1 = base::GetProcId(GetCurrentProcess());
+ ProcessId id1 = GetProcId(GetCurrentProcess());
EXPECT_NE(0ul, id1);
- base::Process process = SpawnChild("SimpleChildProcess");
+ Process process = SpawnChild("SimpleChildProcess");
ASSERT_TRUE(process.IsValid());
- base::ProcessId id2 = process.Pid();
+ ProcessId id2 = process.Pid();
EXPECT_NE(0ul, id2);
EXPECT_NE(id1, id2);
}
@@ -299,19 +298,19 @@ TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileCrash);
remove(signal_file.c_str());
- base::Process process = SpawnChild("CrashingChildProcess");
+ Process process = SpawnChild("CrashingChildProcess");
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+ GetTerminationStatus(process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
- base::TerminationStatus status =
+ TerminationStatus status =
WaitForChildTermination(process.Handle(), &exit_code);
- EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_CRASHED, status);
+ EXPECT_EQ(TERMINATION_STATUS_PROCESS_CRASHED, status);
#if defined(OS_WIN)
EXPECT_EQ(static_cast<int>(0xc0000005), exit_code);
@@ -323,7 +322,7 @@ TEST_F(ProcessUtilTest, MAYBE_GetTerminationStatusCrash) {
#endif
// Reset signal handlers back to "normal".
- base::debug::EnableInProcessStackDumping();
+ debug::EnableInProcessStackDumping();
remove(signal_file.c_str());
}
#endif // !defined(OS_MACOSX) && !defined(OS_ANDROID)
@@ -354,22 +353,22 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigKill) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileKill);
remove(signal_file.c_str());
- base::Process process = SpawnChild("KilledChildProcess");
+ Process process = SpawnChild("KilledChildProcess");
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+ GetTerminationStatus(process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
- base::TerminationStatus status =
+ TerminationStatus status =
WaitForChildTermination(process.Handle(), &exit_code);
#if defined(OS_CHROMEOS)
- EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM, status);
+ EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED_BY_OOM, status);
#else
- EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
+ EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
#endif
#if defined(OS_WIN)
@@ -388,19 +387,19 @@ TEST_F(ProcessUtilTest, GetTerminationStatusSigTerm) {
const std::string signal_file =
ProcessUtilTest::GetSignalFilePath(kSignalFileTerm);
remove(signal_file.c_str());
- base::Process process = SpawnChild("TerminatedChildProcess");
+ Process process = SpawnChild("TerminatedChildProcess");
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
- EXPECT_EQ(base::TERMINATION_STATUS_STILL_RUNNING,
- base::GetTerminationStatus(process.Handle(), &exit_code));
+ EXPECT_EQ(TERMINATION_STATUS_STILL_RUNNING,
+ GetTerminationStatus(process.Handle(), &exit_code));
EXPECT_EQ(kExpectedStillRunningExitCode, exit_code);
SignalChildren(signal_file.c_str());
exit_code = 42;
- base::TerminationStatus status =
+ TerminationStatus status =
WaitForChildTermination(process.Handle(), &exit_code);
- EXPECT_EQ(base::TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
+ EXPECT_EQ(TERMINATION_STATUS_PROCESS_WAS_KILLED, status);
int signaled = WIFSIGNALED(exit_code);
EXPECT_NE(0, signaled);
@@ -424,46 +423,46 @@ TEST_F(ProcessUtilTest, GetAppOutput) {
expected += "\r\n";
FilePath cmd(L"cmd.exe");
- base::CommandLine cmd_line(cmd);
+ CommandLine cmd_line(cmd);
cmd_line.AppendArg("/c");
cmd_line.AppendArg("echo " + message + "");
std::string output;
- ASSERT_TRUE(base::GetAppOutput(cmd_line, &output));
+ ASSERT_TRUE(GetAppOutput(cmd_line, &output));
EXPECT_EQ(expected, output);
// Let's make sure stderr is ignored.
- base::CommandLine other_cmd_line(cmd);
+ CommandLine other_cmd_line(cmd);
other_cmd_line.AppendArg("/c");
// http://msdn.microsoft.com/library/cc772622.aspx
cmd_line.AppendArg("echo " + message + " >&2");
output.clear();
- ASSERT_TRUE(base::GetAppOutput(other_cmd_line, &output));
+ ASSERT_TRUE(GetAppOutput(other_cmd_line, &output));
EXPECT_EQ("", output);
}
// TODO(estade): if possible, port this test.
TEST_F(ProcessUtilTest, LaunchAsUser) {
- base::UserTokenHandle token;
+ UserTokenHandle token;
ASSERT_TRUE(OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &token));
- base::LaunchOptions options;
+ LaunchOptions options;
options.as_user = token;
- EXPECT_TRUE(base::LaunchProcess(MakeCmdLine("SimpleChildProcess"),
- options).IsValid());
+ EXPECT_TRUE(
+ LaunchProcess(MakeCmdLine("SimpleChildProcess"), options).IsValid());
}
static const char kEventToTriggerHandleSwitch[] = "event-to-trigger-handle";
MULTIPROCESS_TEST_MAIN(TriggerEventChildProcess) {
std::string handle_value_string =
- base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
kEventToTriggerHandleSwitch);
CHECK(!handle_value_string.empty());
uint64_t handle_value_uint64;
- CHECK(base::StringToUint64(handle_value_string, &handle_value_uint64));
+ CHECK(StringToUint64(handle_value_string, &handle_value_uint64));
// Give ownership of the handle to |event|.
- base::WaitableEvent event(base::win::ScopedHandle(
- reinterpret_cast<HANDLE>(handle_value_uint64)));
+ WaitableEvent event(
+ win::ScopedHandle(reinterpret_cast<HANDLE>(handle_value_uint64)));
event.Signal();
@@ -478,18 +477,18 @@ TEST_F(ProcessUtilTest, InheritSpecifiedHandles) {
security_attributes.bInheritHandle = true;
// Takes ownership of the event handle.
- base::WaitableEvent event(base::win::ScopedHandle(
- CreateEvent(&security_attributes, true, false, NULL)));
- base::LaunchOptions options;
+ WaitableEvent event(
+ win::ScopedHandle(CreateEvent(&security_attributes, true, false, NULL)));
+ LaunchOptions options;
options.handles_to_inherit.emplace_back(event.handle());
- base::CommandLine cmd_line = MakeCmdLine("TriggerEventChildProcess");
+ CommandLine cmd_line = MakeCmdLine("TriggerEventChildProcess");
cmd_line.AppendSwitchASCII(
kEventToTriggerHandleSwitch,
- base::Uint64ToString(reinterpret_cast<uint64_t>(event.handle())));
+ NumberToString(reinterpret_cast<uint64_t>(event.handle())));
// Launch the process and wait for it to trigger the event.
- ASSERT_TRUE(base::LaunchProcess(cmd_line, options).IsValid());
+ ASSERT_TRUE(LaunchProcess(cmd_line, options).IsValid());
EXPECT_TRUE(event.TimedWait(TestTimeouts::action_max_timeout()));
}
#endif // defined(OS_WIN)
@@ -618,9 +617,9 @@ int ProcessUtilTest::CountOpenFDsInChild() {
if (pipe(fds) < 0)
NOTREACHED();
- base::LaunchOptions options;
+ LaunchOptions options;
options.fds_to_remap.emplace_back(fds[1], kChildPipe);
- base::Process process =
+ Process process =
SpawnChildWithOptions("ProcessUtilsLeakFDChildProcess", options);
CHECK(process.IsValid());
int ret = IGNORE_EINTR(close(fds[1]));
@@ -634,9 +633,9 @@ int ProcessUtilTest::CountOpenFDsInChild() {
#if defined(THREAD_SANITIZER)
// Compiler-based ThreadSanitizer makes this test slow.
- base::TimeDelta timeout = base::TimeDelta::FromSeconds(3);
+ TimeDelta timeout = TimeDelta::FromSeconds(3);
#else
- base::TimeDelta timeout = base::TimeDelta::FromSeconds(1);
+ TimeDelta timeout = TimeDelta::FromSeconds(1);
#endif
int exit_code;
CHECK(process.WaitForExitWithTimeout(timeout, &exit_code));
@@ -704,10 +703,9 @@ TEST_F(ProcessUtilTest, FDRemappingIncludesStdio) {
ASSERT_EQ(STDOUT_FILENO, result);
// Launch the test process, which should inherit our pipe stdio.
- base::LaunchOptions options;
+ LaunchOptions options;
options.fds_to_remap.emplace_back(dev_null, dev_null);
- base::Process process =
- SpawnChildWithOptions("ProcessUtilsVerifyStdio", options);
+ Process process = SpawnChildWithOptions("ProcessUtilsVerifyStdio", options);
ASSERT_TRUE(process.IsValid());
// Restore stdio, so we can output stuff.
@@ -734,8 +732,8 @@ TEST_F(ProcessUtilTest, FDRemappingIncludesStdio) {
ASSERT_EQ(0, result);
int exit_code;
- ASSERT_TRUE(process.WaitForExitWithTimeout(base::TimeDelta::FromSeconds(5),
- &exit_code));
+ ASSERT_TRUE(
+ process.WaitForExitWithTimeout(TimeDelta::FromSeconds(5), &exit_code));
EXPECT_EQ(0, exit_code);
}
@@ -765,17 +763,18 @@ TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
ASSERT_EQ(ZX_OK, result);
// Launch the test process, and pass it one end of the pipe.
- base::LaunchOptions options;
+ LaunchOptions options;
options.handles_to_transfer.push_back(
{PA_HND(PA_USER0, kStartupHandleId), handles[0]});
- base::Process process =
- SpawnChildWithOptions("ProcessUtilsVerifyHandle", options);
+ Process process = SpawnChildWithOptions("ProcessUtilsVerifyHandle", options);
ASSERT_TRUE(process.IsValid());
// Read from the pipe to verify that the child received it.
zx_signals_t signals = 0;
- result = zx_object_wait_one(handles[1], ZX_SOCKET_READABLE,
- zx_deadline_after(ZX_SEC(5)), &signals);
+ result = zx_object_wait_one(
+ handles[1], ZX_SOCKET_READABLE,
+ (base::TimeTicks::Now() + TestTimeouts::action_timeout()).ToZxTime(),
+ &signals);
EXPECT_EQ(ZX_OK, result);
EXPECT_TRUE(signals & ZX_SOCKET_READABLE);
@@ -789,7 +788,7 @@ TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
CHECK_EQ(ZX_OK, zx_handle_close(handles[1]));
int exit_code;
- ASSERT_TRUE(process.WaitForExitWithTimeout(base::TimeDelta::FromSeconds(5),
+ ASSERT_TRUE(process.WaitForExitWithTimeout(TestTimeouts::action_timeout(),
&exit_code));
EXPECT_EQ(0, exit_code);
}
@@ -798,13 +797,13 @@ TEST_F(ProcessUtilTest, LaunchWithHandleTransfer) {
namespace {
std::string TestLaunchProcess(const std::vector<std::string>& args,
- const base::EnvironmentMap& env_changes,
+ const EnvironmentMap& env_changes,
const bool clear_environ,
const int clone_flags) {
int fds[2];
PCHECK(pipe(fds) == 0);
- base::LaunchOptions options;
+ LaunchOptions options;
options.wait = true;
options.environ = env_changes;
options.clear_environ = clear_environ;
@@ -814,7 +813,7 @@ std::string TestLaunchProcess(const std::vector<std::string>& args,
#else
CHECK_EQ(0, clone_flags);
#endif // defined(OS_LINUX)
- EXPECT_TRUE(base::LaunchProcess(args, options).IsValid());
+ EXPECT_TRUE(LaunchProcess(args, options).IsValid());
PCHECK(IGNORE_EINTR(close(fds[1])) == 0);
char buf[512];
@@ -837,7 +836,7 @@ const char kLargeString[] =
} // namespace
TEST_F(ProcessUtilTest, LaunchProcess) {
- base::EnvironmentMap env_changes;
+ EnvironmentMap env_changes;
std::vector<std::string> echo_base_test;
echo_base_test.emplace_back(kShellPath);
echo_base_test.emplace_back("-c");
@@ -884,11 +883,8 @@ TEST_F(ProcessUtilTest, LaunchProcess) {
#if defined(OS_LINUX)
// Test a non-trival value for clone_flags.
- // Don't test on Valgrind as it has limited support for clone().
- if (!RunningOnValgrind()) {
- EXPECT_EQ("wibble\n", TestLaunchProcess(echo_base_test, env_changes,
- no_clear_environ, CLONE_FS));
- }
+ EXPECT_EQ("wibble\n", TestLaunchProcess(echo_base_test, env_changes,
+ no_clear_environ, CLONE_FS));
EXPECT_EQ(
"BASE_TEST=wibble\n",
@@ -920,29 +916,27 @@ TEST_F(ProcessUtilTest, GetAppOutput) {
argv.emplace_back("-c");
argv.emplace_back("exit 0");
- EXPECT_TRUE(base::GetAppOutput(base::CommandLine(argv), &output));
+ EXPECT_TRUE(GetAppOutput(CommandLine(argv), &output));
EXPECT_STREQ("", output.c_str());
argv[2] = "exit 1";
- EXPECT_FALSE(base::GetAppOutput(base::CommandLine(argv), &output));
+ EXPECT_FALSE(GetAppOutput(CommandLine(argv), &output));
EXPECT_STREQ("", output.c_str());
argv[2] = "echo foobar42";
- EXPECT_TRUE(base::GetAppOutput(base::CommandLine(argv), &output));
+ EXPECT_TRUE(GetAppOutput(CommandLine(argv), &output));
EXPECT_STREQ("foobar42\n", output.c_str());
#else
- EXPECT_TRUE(base::GetAppOutput(base::CommandLine(FilePath("true")),
- &output));
+ EXPECT_TRUE(GetAppOutput(CommandLine(FilePath("true")), &output));
EXPECT_STREQ("", output.c_str());
- EXPECT_FALSE(base::GetAppOutput(base::CommandLine(FilePath("false")),
- &output));
+ EXPECT_FALSE(GetAppOutput(CommandLine(FilePath("false")), &output));
std::vector<std::string> argv;
argv.emplace_back("/bin/echo");
argv.emplace_back("-n");
argv.emplace_back("foobar42");
- EXPECT_TRUE(base::GetAppOutput(base::CommandLine(argv), &output));
+ EXPECT_TRUE(GetAppOutput(CommandLine(argv), &output));
EXPECT_STREQ("foobar42", output.c_str());
#endif // defined(OS_ANDROID)
}
@@ -955,8 +949,7 @@ TEST_F(ProcessUtilTest, GetAppOutputWithExitCode) {
argv.emplace_back(kShellPath); // argv[0]
argv.emplace_back("-c"); // argv[1]
argv.emplace_back("echo foo"); // argv[2];
- EXPECT_TRUE(base::GetAppOutputWithExitCode(base::CommandLine(argv), &output,
- &exit_code));
+ EXPECT_TRUE(GetAppOutputWithExitCode(CommandLine(argv), &output, &exit_code));
EXPECT_STREQ("foo\n", output.c_str());
EXPECT_EQ(exit_code, kSuccess);
@@ -964,8 +957,7 @@ TEST_F(ProcessUtilTest, GetAppOutputWithExitCode) {
// code.
output.clear();
argv[2] = "echo foo; exit 2";
- EXPECT_TRUE(base::GetAppOutputWithExitCode(base::CommandLine(argv), &output,
- &exit_code));
+ EXPECT_TRUE(GetAppOutputWithExitCode(CommandLine(argv), &output, &exit_code));
EXPECT_STREQ("foo\n", output.c_str());
EXPECT_EQ(exit_code, 2);
}
@@ -973,14 +965,13 @@ TEST_F(ProcessUtilTest, GetAppOutputWithExitCode) {
// There's no such thing as a parent process id on Fuchsia.
#if !defined(OS_FUCHSIA)
TEST_F(ProcessUtilTest, GetParentProcessId) {
- base::ProcessId ppid =
- base::GetParentProcessId(base::GetCurrentProcessHandle());
- EXPECT_EQ(ppid, static_cast<base::ProcessId>(getppid()));
+ ProcessId ppid = GetParentProcessId(GetCurrentProcessHandle());
+ EXPECT_EQ(ppid, static_cast<ProcessId>(getppid()));
}
#endif // !defined(OS_FUCHSIA)
// TODO(port): port those unit tests.
-bool IsProcessDead(base::ProcessHandle child) {
+bool IsProcessDead(ProcessHandle child) {
#if defined(OS_FUCHSIA)
// ProcessHandle is an zx_handle_t, not a pid on Fuchsia, so waitpid() doesn't
// make sense.
@@ -1001,12 +992,11 @@ bool IsProcessDead(base::ProcessHandle child) {
}
TEST_F(ProcessUtilTest, DelayedTermination) {
- base::Process child_process = SpawnChild("process_util_test_never_die");
+ Process child_process = SpawnChild("process_util_test_never_die");
ASSERT_TRUE(child_process.IsValid());
- base::EnsureProcessTerminated(child_process.Duplicate());
+ EnsureProcessTerminated(child_process.Duplicate());
int exit_code;
- child_process.WaitForExitWithTimeout(base::TimeDelta::FromSeconds(5),
- &exit_code);
+ child_process.WaitForExitWithTimeout(TimeDelta::FromSeconds(5), &exit_code);
// Check that process was really killed.
EXPECT_TRUE(IsProcessDead(child_process.Handle()));
@@ -1020,11 +1010,11 @@ MULTIPROCESS_TEST_MAIN(process_util_test_never_die) {
}
TEST_F(ProcessUtilTest, ImmediateTermination) {
- base::Process child_process = SpawnChild("process_util_test_die_immediately");
+ Process child_process = SpawnChild("process_util_test_die_immediately");
ASSERT_TRUE(child_process.IsValid());
// Give it time to die.
sleep(2);
- base::EnsureProcessTerminated(child_process.Duplicate());
+ EnsureProcessTerminated(child_process.Duplicate());
// Check that process was really killed.
EXPECT_TRUE(IsProcessDead(child_process.Handle()));
@@ -1034,45 +1024,45 @@ MULTIPROCESS_TEST_MAIN(process_util_test_die_immediately) {
return kSuccess;
}
-#if !defined(OS_ANDROID)
-class ReadFromPipeDelegate : public base::LaunchOptions::PreExecDelegate {
+#if !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
+class WriteToPipeDelegate : public LaunchOptions::PreExecDelegate {
public:
- explicit ReadFromPipeDelegate(int fd) : fd_(fd) {}
- ~ReadFromPipeDelegate() override = default;
+ explicit WriteToPipeDelegate(int fd) : fd_(fd) {}
+ ~WriteToPipeDelegate() override = default;
void RunAsyncSafe() override {
- char c;
- RAW_CHECK(HANDLE_EINTR(read(fd_, &c, 1)) == 1);
+ RAW_CHECK(HANDLE_EINTR(write(fd_, &kPipeValue, 1)) == 1);
RAW_CHECK(IGNORE_EINTR(close(fd_)) == 0);
- RAW_CHECK(c == kPipeValue);
}
private:
int fd_;
- DISALLOW_COPY_AND_ASSIGN(ReadFromPipeDelegate);
+ DISALLOW_COPY_AND_ASSIGN(WriteToPipeDelegate);
};
TEST_F(ProcessUtilTest, PreExecHook) {
int pipe_fds[2];
ASSERT_EQ(0, pipe(pipe_fds));
- base::ScopedFD read_fd(pipe_fds[0]);
- base::ScopedFD write_fd(pipe_fds[1]);
+ ScopedFD read_fd(pipe_fds[0]);
+ ScopedFD write_fd(pipe_fds[1]);
- ReadFromPipeDelegate read_from_pipe_delegate(read_fd.get());
- base::LaunchOptions options;
- options.fds_to_remap.emplace_back(read_fd.get(), read_fd.get());
- options.pre_exec_delegate = &read_from_pipe_delegate;
- base::Process process(SpawnChildWithOptions("SimpleChildProcess", options));
+ WriteToPipeDelegate write_to_pipe_delegate(write_fd.get());
+ LaunchOptions options;
+ options.fds_to_remap.emplace_back(write_fd.get(), write_fd.get());
+ options.pre_exec_delegate = &write_to_pipe_delegate;
+ Process process(SpawnChildWithOptions("SimpleChildProcess", options));
ASSERT_TRUE(process.IsValid());
- read_fd.reset();
- ASSERT_EQ(1, HANDLE_EINTR(write(write_fd.get(), &kPipeValue, 1)));
+ write_fd.reset();
+ char c;
+ ASSERT_EQ(1, HANDLE_EINTR(read(read_fd.get(), &c, 1)));
+ EXPECT_EQ(c, kPipeValue);
int exit_code = 42;
EXPECT_TRUE(process.WaitForExit(&exit_code));
EXPECT_EQ(0, exit_code);
}
-#endif // !defined(OS_ANDROID)
+#endif // !defined(OS_ANDROID) && !defined(OS_FUCHSIA)
#endif // defined(OS_POSIX)
@@ -1087,17 +1077,16 @@ MULTIPROCESS_TEST_MAIN(CheckPidProcess) {
#if defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
TEST_F(ProcessUtilTest, CloneFlags) {
- if (RunningOnValgrind() ||
- !base::PathExists(FilePath("/proc/self/ns/user")) ||
- !base::PathExists(FilePath("/proc/self/ns/pid"))) {
+ if (!PathExists(FilePath("/proc/self/ns/user")) ||
+ !PathExists(FilePath("/proc/self/ns/pid"))) {
// User or PID namespaces are not supported.
return;
}
- base::LaunchOptions options;
+ LaunchOptions options;
options.clone_flags = CLONE_NEWUSER | CLONE_NEWPID;
- base::Process process(SpawnChildWithOptions("CheckPidProcess", options));
+ Process process(SpawnChildWithOptions("CheckPidProcess", options));
ASSERT_TRUE(process.IsValid());
int exit_code = 42;
@@ -1107,18 +1096,11 @@ TEST_F(ProcessUtilTest, CloneFlags) {
#endif // defined(CLONE_NEWUSER) && defined(CLONE_NEWPID)
TEST(ForkWithFlagsTest, UpdatesPidCache) {
- // The libc clone function, which allows ForkWithFlags to keep the pid cache
- // up to date, does not work on Valgrind.
- if (RunningOnValgrind()) {
- return;
- }
-
// Warm up the libc pid cache, if there is one.
ASSERT_EQ(syscall(__NR_getpid), getpid());
pid_t ctid = 0;
- const pid_t pid =
- base::ForkWithFlags(SIGCHLD | CLONE_CHILD_SETTID, nullptr, &ctid);
+ const pid_t pid = ForkWithFlags(SIGCHLD | CLONE_CHILD_SETTID, nullptr, &ctid);
if (pid == 0) {
// In child. Check both the raw getpid syscall and the libc getpid wrapper
// (which may rely on a pid cache).
@@ -1135,10 +1117,10 @@ TEST(ForkWithFlagsTest, UpdatesPidCache) {
}
TEST_F(ProcessUtilTest, InvalidCurrentDirectory) {
- base::LaunchOptions options;
- options.current_directory = base::FilePath("/dev/null");
+ LaunchOptions options;
+ options.current_directory = FilePath("/dev/null");
- base::Process process(SpawnChildWithOptions("SimpleChildProcess", options));
+ Process process(SpawnChildWithOptions("SimpleChildProcess", options));
ASSERT_TRUE(process.IsValid());
int exit_code = kSuccess;
@@ -1146,3 +1128,5 @@ TEST_F(ProcessUtilTest, InvalidCurrentDirectory) {
EXPECT_NE(kSuccess, exit_code);
}
#endif // defined(OS_LINUX)
+
+} // namespace base
diff --git a/chromium/base/process/process_win.cc b/chromium/base/process/process_win.cc
index 005a68e6e3b..10a67f3026d 100644
--- a/chromium/base/process/process_win.cc
+++ b/chromium/base/process/process_win.cc
@@ -10,6 +10,8 @@
#include "base/process/kill.h"
#include "base/threading/thread_restrictions.h"
+#include <windows.h>
+
namespace {
DWORD kBasicProcessAccess =
@@ -132,19 +134,22 @@ void Process::Close() {
}
bool Process::Terminate(int exit_code, bool wait) const {
+ constexpr DWORD kWaitMs = 60 * 1000;
+
// exit_code cannot be implemented.
DCHECK(IsValid());
bool result = (::TerminateProcess(Handle(), exit_code) != FALSE);
if (result) {
// The process may not end immediately due to pending I/O
- if (wait && ::WaitForSingleObject(Handle(), 60 * 1000) != WAIT_OBJECT_0)
+ if (wait && ::WaitForSingleObject(Handle(), kWaitMs) != WAIT_OBJECT_0)
DPLOG(ERROR) << "Error waiting for process exit";
Exited(exit_code);
} else {
// The process can't be terminated, perhaps because it has already
- // exited.
+ // exited or is in the process of exiting. A non-zero timeout is necessary
+ // here for the same reasons as above.
DPLOG(ERROR) << "Unable to terminate process";
- if (::WaitForSingleObject(Handle(), 0) == WAIT_OBJECT_0) {
+ if (::WaitForSingleObject(Handle(), kWaitMs) == WAIT_OBJECT_0) {
DWORD actual_exit;
Exited(::GetExitCodeProcess(Handle(), &actual_exit) ? actual_exit
: exit_code);
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index 61230c62fdc..3b7bfde0cf9 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -32,7 +32,12 @@ void ProxyToTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner,
} // namespace
-RunLoop::Delegate::Delegate() {
+RunLoop::Delegate::Delegate()
+ : should_quit_when_idle_callback_(base::BindRepeating(
+ [](Delegate* self) {
+ return self->active_run_loops_.top()->quit_when_idle_received_;
+ },
+ Unretained(this))) {
// The Delegate can be created on another thread. It is only bound in
// RegisterDelegateForCurrentThread().
DETACH_FROM_THREAD(bound_thread_checker_);
@@ -47,23 +52,12 @@ RunLoop::Delegate::~Delegate() {
tls_delegate.Get().Set(nullptr);
}
-bool RunLoop::Delegate::Client::ShouldQuitWhenIdle() const {
- DCHECK_CALLED_ON_VALID_THREAD(outer_->bound_thread_checker_);
- DCHECK(outer_->bound_);
- return outer_->active_run_loops_.top()->quit_when_idle_received_;
+bool RunLoop::Delegate::ShouldQuitWhenIdle() {
+ return should_quit_when_idle_callback_.Run();
}
-bool RunLoop::Delegate::Client::IsNested() const {
- DCHECK_CALLED_ON_VALID_THREAD(outer_->bound_thread_checker_);
- DCHECK(outer_->bound_);
- return outer_->active_run_loops_.size() > 1;
-}
-
-RunLoop::Delegate::Client::Client(Delegate* outer) : outer_(outer) {}
-
// static
-RunLoop::Delegate::Client* RunLoop::RegisterDelegateForCurrentThread(
- Delegate* delegate) {
+void RunLoop::RegisterDelegateForCurrentThread(Delegate* delegate) {
// Bind |delegate| to this thread.
DCHECK(!delegate->bound_);
DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
@@ -72,8 +66,30 @@ RunLoop::Delegate::Client* RunLoop::RegisterDelegateForCurrentThread(
DCHECK(!tls_delegate.Get().Get());
tls_delegate.Get().Set(delegate);
delegate->bound_ = true;
+}
+
+// static
+RunLoop::Delegate* RunLoop::OverrideDelegateForCurrentThreadForTesting(
+ Delegate* delegate,
+ Delegate::ShouldQuitWhenIdleCallback
+ overriding_should_quit_when_idle_callback) {
+ // Bind |delegate| to this thread.
+ DCHECK(!delegate->bound_);
+ DCHECK_CALLED_ON_VALID_THREAD(delegate->bound_thread_checker_);
+
+ // Overriding cannot be performed while running.
+ DCHECK(!IsRunningOnCurrentThread());
+
+ // Override the current Delegate (there must be one).
+ Delegate* overridden_delegate = tls_delegate.Get().Get();
+ DCHECK(overridden_delegate);
+ DCHECK(overridden_delegate->bound_);
+ overridden_delegate->should_quit_when_idle_callback_ =
+ std::move(overriding_should_quit_when_idle_callback);
+ tls_delegate.Get().Set(delegate);
+ delegate->bound_ = true;
- return &delegate->client_interface_;
+ return overridden_delegate;
}
RunLoop::RunLoop(Type type)
@@ -305,6 +321,11 @@ void RunLoop::AfterRun() {
RunLoop* previous_run_loop =
active_run_loops_.empty() ? nullptr : active_run_loops_.top();
+ if (previous_run_loop) {
+ for (auto& observer : delegate_->nesting_observers_)
+ observer.OnExitNestedRunLoop();
+ }
+
// Execute deferred Quit, if any:
if (previous_run_loop && previous_run_loop->quit_called_)
delegate_->Quit();
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index 7cf72dbf01d..b7c594e16c1 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -5,6 +5,7 @@
#ifndef BASE_RUN_LOOP_H_
#define BASE_RUN_LOOP_H_
+#include <utility>
#include <vector>
#include "base/base_export.h"
@@ -129,12 +130,13 @@ class BASE_EXPORT RunLoop {
// Safe to call before RegisterDelegateForCurrentThread().
static bool IsNestedOnCurrentThread();
- // A NestingObserver is notified when a nested RunLoop begins. The observers
- // are notified before the current thread's RunLoop::Delegate::Run() is
- // invoked and nested work begins.
+ // A NestingObserver is notified when a nested RunLoop begins and ends.
class BASE_EXPORT NestingObserver {
public:
+ // Notified before a nested loop starts running work on the current thread.
virtual void OnBeginNestedRunLoop() = 0;
+ // Notified after a nested loop is done running work on the current thread.
+ virtual void OnExitNestedRunLoop() {}
protected:
virtual ~NestingObserver() = default;
@@ -151,42 +153,20 @@ class BASE_EXPORT RunLoop {
static void DisallowNestingOnCurrentThread();
// A RunLoop::Delegate is a generic interface that allows RunLoop to be
- // separate from the uderlying implementation of the message loop for this
+ // separate from the underlying implementation of the message loop for this
// thread. It holds private state used by RunLoops on its associated thread.
// One and only one RunLoop::Delegate must be registered on a given thread
// via RunLoop::RegisterDelegateForCurrentThread() before RunLoop instances
// and RunLoop static methods can be used on it.
class BASE_EXPORT Delegate {
- protected:
- Delegate();
- ~Delegate();
-
- // The client interface provided back to the caller who registers this
- // Delegate via RegisterDelegateForCurrentThread.
- class BASE_EXPORT Client {
- public:
- // Returns true if the Delegate should return from the topmost Run() when
- // it becomes idle. The Delegate is responsible for probing this when it
- // becomes idle.
- bool ShouldQuitWhenIdle() const;
-
- // Returns true if this |outer_| is currently in nested runs. This is a
- // shortcut for RunLoop::IsNestedOnCurrentThread() for the owner of this
- // interface.
- bool IsNested() const;
-
- private:
- // Only a Delegate can instantiate a Delegate::Client.
- friend class Delegate;
- Client(Delegate* outer);
-
- Delegate* outer_;
- };
+ public:
+ // A Callback which returns true if the Delegate should return from the
+ // topmost Run() when it becomes idle. The Delegate is responsible for
+ // probing this when it becomes idle.
+ using ShouldQuitWhenIdleCallback = RepeatingCallback<bool(void)>;
- private:
- // While the state is owned by the Delegate subclass, only RunLoop can use
- // it.
- friend class RunLoop;
+ Delegate();
+ virtual ~Delegate();
// Used by RunLoop to inform its Delegate to Run/Quit. Implementations are
// expected to keep on running synchronously from the Run() call until the
@@ -194,12 +174,12 @@ class BASE_EXPORT RunLoop {
// return from the Run() call as soon as possible without executing
// remaining tasks/messages. Run() calls can nest in which case each Quit()
// call should result in the topmost active Run() call returning. The only
- // other trigger for Run() to return is Client::ShouldQuitWhenIdle() which
- // the Delegate should probe before sleeping when it becomes idle.
- // |application_tasks_allowed| is true if this is the first Run() call on
- // the stack or it was made from a nested RunLoop of
- // Type::kNestableTasksAllowed (otherwise this Run() level should only
- // process system tasks).
+ // other trigger for Run() to return is the
+ // |should_quit_when_idle_callback_| which the Delegate should probe before
+ // sleeping when it becomes idle. |application_tasks_allowed| is true if
+ // this is the first Run() call on the stack or it was made from a nested
+ // RunLoop of Type::kNestableTasksAllowed (otherwise this Run() level should
+ // only process system tasks).
virtual void Run(bool application_tasks_allowed) = 0;
virtual void Quit() = 0;
@@ -212,6 +192,16 @@ class BASE_EXPORT RunLoop {
// system messages.
virtual void EnsureWorkScheduled() = 0;
+ protected:
+ // Returns the result of this Delegate's |should_quit_when_idle_callback_|.
+ // "protected" so it can be invoked only by the Delegate itself.
+ bool ShouldQuitWhenIdle();
+
+ private:
+ // While the state is owned by the Delegate subclass, only RunLoop can use
+ // it.
+ friend class RunLoop;
+
// A vector-based stack is more memory efficient than the default
// deque-based stack as the active RunLoop stack isn't expected to ever
// have more than a few entries.
@@ -229,19 +219,28 @@ class BASE_EXPORT RunLoop {
// RegisterDelegateForCurrentThread().
bool bound_ = false;
+ ShouldQuitWhenIdleCallback should_quit_when_idle_callback_;
+
// Thread-affine per its use of TLS.
THREAD_CHECKER(bound_thread_checker_);
- Client client_interface_ = Client(this);
-
DISALLOW_COPY_AND_ASSIGN(Delegate);
};
// Registers |delegate| on the current thread. Must be called once and only
// once per thread before using RunLoop methods on it. |delegate| is from then
- // on forever bound to that thread (including its destruction). The returned
- // Delegate::Client is valid as long as |delegate| is kept alive.
- static Delegate::Client* RegisterDelegateForCurrentThread(Delegate* delegate);
+ // on forever bound to that thread (including its destruction).
+ static void RegisterDelegateForCurrentThread(Delegate* delegate);
+
+ // Akin to RegisterDelegateForCurrentThread but overrides an existing Delegate
+ // (there must be one). Returning the overridden Delegate which the caller is
+ // now in charge of driving. |override_should_quit_when_idle_callback|
+ // specifies will replace the overridden Delegate's
+ // |should_quit_when_idle_callback_|, giving full control to |delegate|.
+ static Delegate* OverrideDelegateForCurrentThreadForTesting(
+ Delegate* delegate,
+ Delegate::ShouldQuitWhenIdleCallback
+ overriding_should_quit_when_idle_callback);
// Quits the active RunLoop (when idle) -- there must be one. These were
// introduced as prefered temporary replacements to the long deprecated
@@ -306,9 +305,8 @@ class BASE_EXPORT RunLoop {
bool running_ = false;
// Used to record that QuitWhenIdle() was called on this RunLoop, meaning that
// the Delegate should quit Run() once it becomes idle (it's responsible for
- // probing this state via Client::ShouldQuitWhenIdle()). This state is stored
- // here rather than pushed to Delegate via, e.g., Delegate::QuitWhenIdle() to
- // support nested RunLoops.
+ // probing this state via ShouldQuitWhenIdle()). This state is stored here
+ // rather than pushed to Delegate to support nested RunLoops.
bool quit_when_idle_received_ = false;
// RunLoop is not thread-safe. Its state/methods, unless marked as such, may
diff --git a/chromium/base/run_loop_unittest.cc b/chromium/base/run_loop_unittest.cc
index beebda30e70..96060f4a660 100644
--- a/chromium/base/run_loop_unittest.cc
+++ b/chromium/base/run_loop_unittest.cc
@@ -87,7 +87,7 @@ class SimpleSingleThreadTaskRunner : public SingleThreadTaskRunner {
return origin_thread_checker_.CalledOnValidThread();
}
- bool ProcessTask() {
+ bool ProcessSingleTask() {
OnceClosure task;
{
AutoLock auto_lock(tasks_lock_);
@@ -97,11 +97,18 @@ class SimpleSingleThreadTaskRunner : public SingleThreadTaskRunner {
pending_tasks_.pop();
}
// It's important to Run() after pop() and outside the lock as |task| may
- // run a nested loop which will re-enter ProcessTask().
+ // run a nested loop which will re-enter ProcessSingleTask().
std::move(task).Run();
return true;
}
+ base::queue<OnceClosure> TakePendingTasks() {
+ AutoLock auto_lock(tasks_lock_);
+ base::queue<OnceClosure> pending_tasks;
+ std::swap(pending_tasks, pending_tasks_);
+ return pending_tasks;
+ }
+
private:
~SimpleSingleThreadTaskRunner() override = default;
@@ -116,49 +123,65 @@ class SimpleSingleThreadTaskRunner : public SingleThreadTaskRunner {
DISALLOW_COPY_AND_ASSIGN(SimpleSingleThreadTaskRunner);
};
+// The basis of all TestDelegates, allows safely injecting a OnceClosure to be
+// run in the next idle phase of this delegate's Run() implementation. This can
+// be used to have code run on a thread that is otherwise livelocked in an idle
+// phase (sometimes a simple PostTask() won't do it -- e.g. when processing
+// application tasks is disallowed).
+class InjectableTestDelegate : public RunLoop::Delegate {
+ public:
+ void InjectClosureOnDelegate(OnceClosure closure) {
+ AutoLock auto_lock(closure_lock_);
+ closure_ = std::move(closure);
+ }
+
+ bool RunInjectedClosure() {
+ AutoLock auto_lock(closure_lock_);
+ if (closure_.is_null())
+ return false;
+ std::move(closure_).Run();
+ return true;
+ }
+
+ private:
+ Lock closure_lock_;
+ OnceClosure closure_;
+};
+
// A simple test RunLoop::Delegate to exercise Runloop logic independent of any
-// other base constructs.
-class TestDelegate final : public RunLoop::Delegate {
+// other base constructs. BindToCurrentThread() must be called before this
+// TestBoundDelegate is operational.
+class TestBoundDelegate final : public InjectableTestDelegate {
public:
- TestDelegate() = default;
+ TestBoundDelegate() = default;
+ // Makes this TestBoundDelegate become the RunLoop::Delegate and
+ // ThreadTaskRunnerHandle for this thread.
void BindToCurrentThread() {
thread_task_runner_handle_ =
std::make_unique<ThreadTaskRunnerHandle>(simple_task_runner_);
- run_loop_client_ = RunLoop::RegisterDelegateForCurrentThread(this);
- }
-
- // Runs |closure| on the TestDelegate thread as part of Run(). Useful to
- // inject code in an otherwise livelocked Run() state.
- void RunClosureOnDelegate(OnceClosure closure) {
- AutoLock auto_lock(closure_lock_);
- closure_ = std::move(closure);
+ RunLoop::RegisterDelegateForCurrentThread(this);
}
private:
void Run(bool application_tasks_allowed) override {
if (nested_run_allowing_tasks_incoming_) {
- EXPECT_TRUE(run_loop_client_->IsNested());
+ EXPECT_TRUE(RunLoop::IsNestedOnCurrentThread());
EXPECT_TRUE(application_tasks_allowed);
- } else if (run_loop_client_->IsNested()) {
+ } else if (RunLoop::IsNestedOnCurrentThread()) {
EXPECT_FALSE(application_tasks_allowed);
}
nested_run_allowing_tasks_incoming_ = false;
while (!should_quit_) {
- if (application_tasks_allowed && simple_task_runner_->ProcessTask())
+ if (application_tasks_allowed && simple_task_runner_->ProcessSingleTask())
continue;
- if (run_loop_client_->ShouldQuitWhenIdle())
+ if (ShouldQuitWhenIdle())
break;
- {
- AutoLock auto_lock(closure_lock_);
- if (!closure_.is_null()) {
- std::move(closure_).Run();
- continue;
- }
- }
+ if (RunInjectedClosure())
+ continue;
PlatformThread::YieldCurrentThread();
}
@@ -177,14 +200,80 @@ class TestDelegate final : public RunLoop::Delegate {
scoped_refptr<SimpleSingleThreadTaskRunner> simple_task_runner_ =
MakeRefCounted<SimpleSingleThreadTaskRunner>();
+
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle_;
bool should_quit_ = false;
+};
- Lock closure_lock_;
- OnceClosure closure_;
+// A test RunLoop::Delegate meant to override an existing RunLoop::Delegate.
+// TakeOverCurrentThread() must be called before this TestBoundDelegate is
+// operational.
+class TestOverridingDelegate final : public InjectableTestDelegate {
+ public:
+ TestOverridingDelegate() = default;
+
+ // Overrides the existing RunLoop::Delegate and ThreadTaskRunnerHandles on
+ // this thread with this TestOverridingDelegate's.
+ void TakeOverCurrentThread() {
+ overridden_task_runner_ = ThreadTaskRunnerHandle::Get();
+ ASSERT_TRUE(overridden_task_runner_);
+ thread_task_runner_handle_override_scope_ =
+ ThreadTaskRunnerHandle::OverrideForTesting(
+ simple_task_runner_,
+ ThreadTaskRunnerHandle::OverrideType::kTakeOverThread);
+
+ // TestOverridingDelegate::Run() is designed with the assumption that the
+ // overridden Delegate's Run() always returns control to it when it becomes
+ // idle.
+ overridden_delegate_ = RunLoop::OverrideDelegateForCurrentThreadForTesting(
+ this, base::BindRepeating([]() { return true; }));
+ ASSERT_TRUE(overridden_delegate_);
+ }
+
+ private:
+ void Run(bool application_tasks_allowed) override {
+ while (!should_quit_) {
+ auto pending_tasks = simple_task_runner_->TakePendingTasks();
+ if (!pending_tasks.empty()) {
+ while (!pending_tasks.empty()) {
+ overridden_task_runner_->PostTask(FROM_HERE,
+ std::move(pending_tasks.front()));
+ pending_tasks.pop();
+ }
+ overridden_delegate_->Run(application_tasks_allowed);
+ continue;
+ }
+
+ if (ShouldQuitWhenIdle())
+ break;
+
+ if (RunInjectedClosure())
+ continue;
+
+ PlatformThread::YieldCurrentThread();
+ }
+ should_quit_ = false;
+ }
+
+ void Quit() override {
+ should_quit_ = true;
+ overridden_delegate_->Quit();
+ }
+
+ void EnsureWorkScheduled() override {
+ overridden_delegate_->EnsureWorkScheduled();
+ }
- RunLoop::Delegate::Client* run_loop_client_ = nullptr;
+ scoped_refptr<SimpleSingleThreadTaskRunner> simple_task_runner_ =
+ MakeRefCounted<SimpleSingleThreadTaskRunner>();
+
+ ScopedClosureRunner thread_task_runner_handle_override_scope_;
+
+ scoped_refptr<SingleThreadTaskRunner> overridden_task_runner_;
+ RunLoop::Delegate* overridden_delegate_;
+
+ bool should_quit_ = false;
};
enum class RunLoopTestType {
@@ -195,6 +284,10 @@ enum class RunLoopTestType {
// Runs all RunLoopTests under a test RunLoop::Delegate to make sure the
// delegate interface fully works standalone.
kTestDelegate,
+
+ // Runs all RunLoopTests through a RunLoop::Delegate which overrides a
+ // kRealEnvironment's registered RunLoop::Delegate.
+ kOverridingTestDelegate,
};
// The task environment for the RunLoopTest of a given type. A separate class
@@ -203,20 +296,31 @@ class RunLoopTestEnvironment {
public:
RunLoopTestEnvironment(RunLoopTestType type) {
switch (type) {
- case RunLoopTestType::kRealEnvironment:
+ case RunLoopTestType::kRealEnvironment: {
task_environment_ = std::make_unique<test::ScopedTaskEnvironment>();
break;
- case RunLoopTestType::kTestDelegate:
- test_delegate_ = std::make_unique<TestDelegate>();
- test_delegate_->BindToCurrentThread();
+ }
+ case RunLoopTestType::kTestDelegate: {
+ auto test_delegate = std::make_unique<TestBoundDelegate>();
+ test_delegate->BindToCurrentThread();
+ test_delegate_ = std::move(test_delegate);
break;
+ }
+ case RunLoopTestType::kOverridingTestDelegate: {
+ task_environment_ = std::make_unique<test::ScopedTaskEnvironment>();
+ auto test_delegate = std::make_unique<TestOverridingDelegate>();
+ test_delegate->TakeOverCurrentThread();
+ test_delegate_ = std::move(test_delegate);
+ break;
+ }
}
}
private:
- // Instantiates one or the other based on the RunLoopTestType.
+ // Instantiates one or the other based on the RunLoopTestType (or both in the
+ // kOverridingTestDelegate case).
std::unique_ptr<test::ScopedTaskEnvironment> task_environment_;
- std::unique_ptr<TestDelegate> test_delegate_;
+ std::unique_ptr<InjectableTestDelegate> test_delegate_;
};
class RunLoopTest : public testing::TestWithParam<RunLoopTestType> {
@@ -466,21 +570,37 @@ TEST_P(RunLoopTest, IsNestedOnCurrentThread) {
run_loop_.Run();
}
+namespace {
+
class MockNestingObserver : public RunLoop::NestingObserver {
public:
MockNestingObserver() = default;
// RunLoop::NestingObserver:
MOCK_METHOD0(OnBeginNestedRunLoop, void());
+ MOCK_METHOD0(OnExitNestedRunLoop, void());
private:
DISALLOW_COPY_AND_ASSIGN(MockNestingObserver);
};
+class MockTask {
+ public:
+ MockTask() = default;
+ MOCK_METHOD0(Task, void());
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockTask);
+};
+
+} // namespace
+
TEST_P(RunLoopTest, NestingObservers) {
EXPECT_TRUE(RunLoop::IsNestingAllowedOnCurrentThread());
testing::StrictMock<MockNestingObserver> nesting_observer;
+ testing::StrictMock<MockTask> mock_task_a;
+ testing::StrictMock<MockTask> mock_task_b;
RunLoop::AddNestingObserverOnCurrentThread(&nesting_observer);
@@ -495,14 +615,27 @@ TEST_P(RunLoopTest, NestingObservers) {
nested_run_loop.Run();
});
- // Generate a stack of nested RunLoops, an OnBeginNestedRunLoop() is
- // expected when beginning each nesting depth.
+ // Generate a stack of nested RunLoops. OnBeginNestedRunLoop() is expected
+ // when beginning each nesting depth and OnExitNestedRunLoop() is expected
+ // when exiting each nesting depth.
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_nested_loop);
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&MockTask::Task, base::Unretained(&mock_task_a)));
ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_nested_loop);
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop_.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ base::BindOnce(&MockTask::Task, base::Unretained(&mock_task_b)));
- EXPECT_CALL(nesting_observer, OnBeginNestedRunLoop()).Times(2);
- run_loop_.Run();
+ {
+ testing::InSequence in_sequence;
+ EXPECT_CALL(nesting_observer, OnBeginNestedRunLoop());
+ EXPECT_CALL(mock_task_a, Task());
+ EXPECT_CALL(nesting_observer, OnBeginNestedRunLoop());
+ EXPECT_CALL(mock_task_b, Task());
+ EXPECT_CALL(nesting_observer, OnExitNestedRunLoop()).Times(2);
+ }
+ run_loop_.RunUntilIdle();
RunLoop::RemoveNestingObserverOnCurrentThread(&nesting_observer);
}
@@ -539,15 +672,19 @@ INSTANTIATE_TEST_CASE_P(Real,
INSTANTIATE_TEST_CASE_P(Mock,
RunLoopTest,
testing::Values(RunLoopTestType::kTestDelegate));
+INSTANTIATE_TEST_CASE_P(
+ OverridingMock,
+ RunLoopTest,
+ testing::Values(RunLoopTestType::kOverridingTestDelegate));
TEST(RunLoopDeathTest, MustRegisterBeforeInstantiating) {
- TestDelegate unbound_test_delegate_;
+ TestBoundDelegate unbound_test_delegate_;
// Exercise the DCHECK in RunLoop::RunLoop().
EXPECT_DCHECK_DEATH({ RunLoop(); });
}
TEST(RunLoopDelegateTest, NestableTasksDontRunInDefaultNestedLoops) {
- TestDelegate test_delegate;
+ TestBoundDelegate test_delegate;
test_delegate.BindToCurrentThread();
base::Thread other_thread("test");
@@ -599,8 +736,8 @@ TEST(RunLoopDelegateTest, NestableTasksDontRunInDefaultNestedLoops) {
other_thread.task_runner()->PostDelayedTask(
FROM_HERE,
BindOnce(
- [](TestDelegate* test_delegate, OnceClosure injected_closure) {
- test_delegate->RunClosureOnDelegate(std::move(injected_closure));
+ [](TestBoundDelegate* test_delegate, OnceClosure injected_closure) {
+ test_delegate->InjectClosureOnDelegate(std::move(injected_closure));
},
Unretained(&test_delegate), nested_run_loop.QuitWhenIdleClosure()),
TestTimeouts::tiny_timeout());
diff --git a/chromium/base/security_unittest.cc b/chromium/base/security_unittest.cc
index a41607c8571..8d292211b86 100644
--- a/chromium/base/security_unittest.cc
+++ b/chromium/base/security_unittest.cc
@@ -54,17 +54,6 @@ NOINLINE Type HideValueFromCompiler(volatile Type value) {
#define MALLOC_OVERFLOW_TEST(function) DISABLED_##function
#endif
-#if defined(OS_LINUX) && defined(__x86_64__)
-// Detect runtime TCMalloc bypasses.
-bool IsTcMallocBypassed() {
- // This should detect a TCMalloc bypass from Valgrind.
- char* g_slice = getenv("G_SLICE");
- if (g_slice && !strcmp(g_slice, "always-malloc"))
- return true;
- return false;
-}
-#endif
-
// There are platforms where these tests are known to fail. We would like to
// be able to easily check the status on the bots, but marking tests as
// FAILS_ is too clunky.
@@ -133,8 +122,6 @@ bool ArePointersToSameArea(void* ptr1, void* ptr2, size_t size) {
// Check if TCMalloc uses an underlying random memory allocator.
TEST(SecurityTest, MALLOC_OVERFLOW_TEST(RandomMemoryAllocations)) {
- if (IsTcMallocBypassed())
- return;
size_t kPageSize = 4096; // We support x86_64 only.
// Check that malloc() returns an address that is neither the kernel's
// un-hinted mmap area, nor the current brk() area. The first malloc() may
diff --git a/chromium/base/sequenced_task_runner.h b/chromium/base/sequenced_task_runner.h
index dde135050b2..53d21ad166e 100644
--- a/chromium/base/sequenced_task_runner.h
+++ b/chromium/base/sequenced_task_runner.h
@@ -141,7 +141,7 @@ class BASE_EXPORT SequencedTaskRunner : public TaskRunner {
}
protected:
- ~SequencedTaskRunner() override {}
+ ~SequencedTaskRunner() override = default;
private:
bool DeleteOrReleaseSoonInternal(const Location& from_here,
diff --git a/chromium/base/single_thread_task_runner.h b/chromium/base/single_thread_task_runner.h
index 12b057524ee..4d6938ed6c0 100644
--- a/chromium/base/single_thread_task_runner.h
+++ b/chromium/base/single_thread_task_runner.h
@@ -28,7 +28,7 @@ class BASE_EXPORT SingleThreadTaskRunner : public SequencedTaskRunner {
bool BelongsToCurrentThread() const { return RunsTasksInCurrentSequence(); }
protected:
- ~SingleThreadTaskRunner() override {}
+ ~SingleThreadTaskRunner() override = default;
};
} // namespace base
diff --git a/chromium/base/strings/pattern.cc b/chromium/base/strings/pattern.cc
index e10d4ac0d79..5a0a055745e 100644
--- a/chromium/base/strings/pattern.cc
+++ b/chromium/base/strings/pattern.cc
@@ -154,13 +154,13 @@ struct NextCharUTF16 {
} // namespace
-bool MatchPattern(const StringPiece& eval, const StringPiece& pattern) {
+bool MatchPattern(StringPiece eval, StringPiece pattern) {
return MatchPatternT(eval.data(), eval.data() + eval.size(),
pattern.data(), pattern.data() + pattern.size(),
0, NextCharUTF8());
}
-bool MatchPattern(const StringPiece16& eval, const StringPiece16& pattern) {
+bool MatchPattern(StringPiece16 eval, StringPiece16 pattern) {
return MatchPatternT(eval.data(), eval.data() + eval.size(),
pattern.data(), pattern.data() + pattern.size(),
0, NextCharUTF16());
diff --git a/chromium/base/strings/pattern.h b/chromium/base/strings/pattern.h
index b698207b9d6..15f96e25eb1 100644
--- a/chromium/base/strings/pattern.h
+++ b/chromium/base/strings/pattern.h
@@ -16,10 +16,8 @@ namespace base {
// The backslash character (\) is an escape character for * and ?
// We limit the patterns to having a max of 16 * or ? characters.
// ? matches 0 or 1 character, while * matches 0 or more characters.
-BASE_EXPORT bool MatchPattern(const StringPiece& string,
- const StringPiece& pattern);
-BASE_EXPORT bool MatchPattern(const StringPiece16& string,
- const StringPiece16& pattern);
+BASE_EXPORT bool MatchPattern(StringPiece string, StringPiece pattern);
+BASE_EXPORT bool MatchPattern(StringPiece16 string, StringPiece16 pattern);
} // namespace base
diff --git a/chromium/base/strings/strcat.cc b/chromium/base/strings/strcat.cc
new file mode 100644
index 00000000000..3d5b2cade51
--- /dev/null
+++ b/chromium/base/strings/strcat.cc
@@ -0,0 +1,81 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat.h"
+
+namespace base {
+
+namespace {
+
+// Reserves an additional amount of size in the given string, growing by at
+// least 2x. Used by StrAppend().
+//
+// The "at least 2x" growing rule duplicates the exponential growth of
+// std::string. The problem is that most implementations of reserve() will grow
+// exactly to the requested amount instead of exponentially growing like would
+// happen when appending normally. If we didn't do this, an append after the
+// call to StrAppend() would definitely cause a reallocation, and loops with
+// StrAppend() calls would have O(n^2) complexity to execute. Instead, we want
+// StrAppend() to have the same semantics as std::string::append().
+//
+// If the string is empty, we assume that exponential growth is not necessary.
+template <typename String>
+void ReserveAdditional(String* str, typename String::size_type additional) {
+ str->reserve(std::max(str->size() + additional, str->size() * 2));
+}
+
+template <typename DestString, typename InputString>
+void StrAppendT(DestString* dest, span<const InputString> pieces) {
+ size_t additional_size = 0;
+ for (const auto& cur : pieces)
+ additional_size += cur.size();
+ ReserveAdditional(dest, additional_size);
+
+ for (const auto& cur : pieces)
+ dest->append(cur.data(), cur.size());
+}
+
+} // namespace
+
+std::string StrCat(span<const StringPiece> pieces) {
+ std::string result;
+ StrAppendT(&result, pieces);
+ return result;
+}
+
+string16 StrCat(span<const StringPiece16> pieces) {
+ string16 result;
+ StrAppendT(&result, pieces);
+ return result;
+}
+
+std::string StrCat(span<const std::string> pieces) {
+ std::string result;
+ StrAppendT(&result, pieces);
+ return result;
+}
+
+string16 StrCat(span<const string16> pieces) {
+ string16 result;
+ StrAppendT(&result, pieces);
+ return result;
+}
+
+void StrAppend(std::string* dest, span<const StringPiece> pieces) {
+ StrAppendT(dest, pieces);
+}
+
+void StrAppend(string16* dest, span<const StringPiece16> pieces) {
+ StrAppendT(dest, pieces);
+}
+
+void StrAppend(std::string* dest, span<const std::string> pieces) {
+ StrAppendT(dest, pieces);
+}
+
+void StrAppend(string16* dest, span<const string16> pieces) {
+ StrAppendT(dest, pieces);
+}
+
+} // namespace base
diff --git a/chromium/base/strings/strcat.h b/chromium/base/strings/strcat.h
new file mode 100644
index 00000000000..b249d4919e8
--- /dev/null
+++ b/chromium/base/strings/strcat.h
@@ -0,0 +1,93 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRCAT_H_
+#define BASE_STRINGS_STRCAT_H_
+
+#include <initializer_list>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// StrCat ----------------------------------------------------------------------
+//
+// StrCat is a function to perform concatenation on a sequence of strings.
+// It is preferrable to a sequence of "a + b + c" because it is both faster and
+// generates less code.
+//
+// std::string result = base::StrCat({"foo ", result, "\nfoo ", bar});
+//
+// To join an array of strings with a separator, see base::JoinString in
+// base/strings/string_util.h.
+//
+// MORE INFO
+//
+// StrCat can see all arguments at once, so it can allocate one return buffer
+// of exactly the right size and copy once, as opposed to a sequence of
+// operator+ which generates a series of temporary strings, copying as it goes.
+// And by using StringPiece arguments, StrCat can avoid creating temporary
+// string objects for char* constants.
+//
+// ALTERNATIVES
+//
+// Internal Google / Abseil has a similar StrCat function. That version takes
+// an overloaded number of arguments instead of initializer list (overflowing
+// to initializer list for many arguments). We don't have any legacy
+// requirements and using only initializer_list is simpler and generates
+// roughly the same amount of code at the call sites.
+//
+// Abseil's StrCat also allows numbers by using an intermediate class that can
+// be implicitly constructed from either a string or various number types. This
+// class formats the numbers into a static buffer for increased performance,
+// and the call sites look nice.
+//
+// As-written Abseil's helper class for numbers generates slightly more code
+// than the raw StringPiece version. We can de-inline the helper class'
+// constructors which will cause the StringPiece constructors to be de-inlined
+// for this call and generate slightly less code. This is something we can
+// explore more in the future.
+
+BASE_EXPORT std::string StrCat(span<const StringPiece> pieces);
+BASE_EXPORT string16 StrCat(span<const StringPiece16> pieces);
+BASE_EXPORT std::string StrCat(span<const std::string> pieces);
+BASE_EXPORT string16 StrCat(span<const string16> pieces);
+
+// Initializer list forwards to the array version.
+inline std::string StrCat(std::initializer_list<StringPiece> pieces) {
+ return StrCat(make_span(pieces.begin(), pieces.size()));
+}
+inline string16 StrCat(std::initializer_list<StringPiece16> pieces) {
+ return StrCat(make_span(pieces.begin(), pieces.size()));
+}
+
+// StrAppend -------------------------------------------------------------------
+//
+// Appends a sequence of strings to a destination. Prefer:
+// StrAppend(&foo, ...);
+// over:
+// foo += StrCat(...);
+// because it avoids a temporary string allocation and copy.
+
+BASE_EXPORT void StrAppend(std::string* dest, span<const StringPiece> pieces);
+BASE_EXPORT void StrAppend(string16* dest, span<const StringPiece16> pieces);
+BASE_EXPORT void StrAppend(std::string* dest, span<const std::string> pieces);
+BASE_EXPORT void StrAppend(string16* dest, span<const string16> pieces);
+
+// Initializer list forwards to the array version.
+inline void StrAppend(std::string* dest,
+ std::initializer_list<StringPiece> pieces) {
+ return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+}
+inline void StrAppend(string16* dest,
+ std::initializer_list<StringPiece16> pieces) {
+ return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+}
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRCAT_H_
diff --git a/chromium/base/strings/strcat_unittest.cc b/chromium/base/strings/strcat_unittest.cc
new file mode 100644
index 00000000000..cf2db5180f5
--- /dev/null
+++ b/chromium/base/strings/strcat_unittest.cc
@@ -0,0 +1,67 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat.h"
+#include "base/strings/utf_string_conversions.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(StrCat, 8Bit) {
+ EXPECT_EQ("", StrCat({""}));
+ EXPECT_EQ("1", StrCat({"1"}));
+ EXPECT_EQ("122", StrCat({"1", "22"}));
+ EXPECT_EQ("122333", StrCat({"1", "22", "333"}));
+ EXPECT_EQ("1223334444", StrCat({"1", "22", "333", "4444"}));
+ EXPECT_EQ("122333444455555", StrCat({"1", "22", "333", "4444", "55555"}));
+}
+
+TEST(StrCat, 16Bit) {
+ string16 arg1 = ASCIIToUTF16("1");
+ string16 arg2 = ASCIIToUTF16("22");
+ string16 arg3 = ASCIIToUTF16("333");
+
+ EXPECT_EQ(ASCIIToUTF16(""), StrCat({string16()}));
+ EXPECT_EQ(ASCIIToUTF16("1"), StrCat({arg1}));
+ EXPECT_EQ(ASCIIToUTF16("122"), StrCat({arg1, arg2}));
+ EXPECT_EQ(ASCIIToUTF16("122333"), StrCat({arg1, arg2, arg3}));
+}
+
+TEST(StrAppend, 8Bit) {
+ std::string result;
+
+ result = "foo";
+ StrAppend(&result, {std::string()});
+ EXPECT_EQ("foo", result);
+
+ result = "foo";
+ StrAppend(&result, {"1"});
+ EXPECT_EQ("foo1", result);
+
+ result = "foo";
+ StrAppend(&result, {"1", "22", "333"});
+ EXPECT_EQ("foo122333", result);
+}
+
+TEST(StrAppend, 16Bit) {
+ string16 arg1 = ASCIIToUTF16("1");
+ string16 arg2 = ASCIIToUTF16("22");
+ string16 arg3 = ASCIIToUTF16("333");
+
+ string16 result;
+
+ result = ASCIIToUTF16("foo");
+ StrAppend(&result, {string16()});
+ EXPECT_EQ(ASCIIToUTF16("foo"), result);
+
+ result = ASCIIToUTF16("foo");
+ StrAppend(&result, {arg1});
+ EXPECT_EQ(ASCIIToUTF16("foo1"), result);
+
+ result = ASCIIToUTF16("foo");
+ StrAppend(&result, {arg1, arg2, arg3});
+ EXPECT_EQ(ASCIIToUTF16("foo122333"), result);
+}
+
+} // namespace base
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index f8a7cff3e5d..86fa2e34af7 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -284,23 +284,6 @@ typedef BaseHexIteratorRangeToInt64Traits<StringPiece::const_iterator>
typedef BaseHexIteratorRangeToUInt64Traits<StringPiece::const_iterator>
HexIteratorRangeToUInt64Traits;
-template <typename STR>
-bool HexStringToBytesT(const STR& input, std::vector<uint8_t>* output) {
- DCHECK_EQ(output->size(), 0u);
- size_t count = input.size();
- if (count == 0 || (count % 2) != 0)
- return false;
- for (uintptr_t i = 0; i < count / 2; ++i) {
- uint8_t msb = 0; // most significant 4 bits
- uint8_t lsb = 0; // least significant 4 bits
- if (!CharToDigit<16>(input[i * 2], &msb) ||
- !CharToDigit<16>(input[i * 2 + 1], &lsb))
- return false;
- output->push_back((msb << 4) | lsb);
- }
- return true;
-}
-
template <typename VALUE, int BASE>
class StringPieceToNumberTraits
: public BaseIteratorRangeToNumberTraits<StringPiece::const_iterator,
@@ -309,7 +292,7 @@ class StringPieceToNumberTraits
};
template <typename VALUE>
-bool StringToIntImpl(const StringPiece& input, VALUE* output) {
+bool StringToIntImpl(StringPiece input, VALUE* output) {
return IteratorRangeToNumber<StringPieceToNumberTraits<VALUE, 10> >::Invoke(
input.begin(), input.end(), output);
}
@@ -322,54 +305,60 @@ class StringPiece16ToNumberTraits
};
template <typename VALUE>
-bool String16ToIntImpl(const StringPiece16& input, VALUE* output) {
+bool String16ToIntImpl(StringPiece16 input, VALUE* output) {
return IteratorRangeToNumber<StringPiece16ToNumberTraits<VALUE, 10> >::Invoke(
input.begin(), input.end(), output);
}
} // namespace
-std::string NumberToString(int32_t value) {
- return IntToStringT<std::string, int32_t>::IntToString(value);
+std::string NumberToString(int value) {
+ return IntToStringT<std::string, int>::IntToString(value);
+}
+
+string16 NumberToString16(int value) {
+ return IntToStringT<string16, int>::IntToString(value);
}
-string16 NumberToString16(int32_t value) {
- return IntToStringT<string16, int32_t>::IntToString(value);
+std::string NumberToString(unsigned value) {
+ return IntToStringT<std::string, unsigned>::IntToString(value);
}
-std::string NumberToString(uint32_t value) {
- return IntToStringT<std::string, uint32_t>::IntToString(value);
+string16 NumberToString16(unsigned value) {
+ return IntToStringT<string16, unsigned>::IntToString(value);
}
-string16 NumberToString16(uint32_t value) {
- return IntToStringT<string16, uint32_t>::IntToString(value);
+std::string NumberToString(long value) {
+ return IntToStringT<std::string, long>::IntToString(value);
}
-std::string NumberToString(int64_t value) {
- return IntToStringT<std::string, int64_t>::IntToString(value);
+string16 NumberToString16(long value) {
+ return IntToStringT<string16, long>::IntToString(value);
}
-string16 NumberToString16(int64_t value) {
- return IntToStringT<string16, int64_t>::IntToString(value);
+std::string NumberToString(unsigned long value) {
+ return IntToStringT<std::string, unsigned long>::IntToString(value);
}
-std::string NumberToString(uint64_t value) {
- return IntToStringT<std::string, uint64_t>::IntToString(value);
+string16 NumberToString16(unsigned long value) {
+ return IntToStringT<string16, unsigned long>::IntToString(value);
}
-string16 NumberToString16(uint64_t value) {
- return IntToStringT<string16, uint64_t>::IntToString(value);
+std::string NumberToString(long long value) {
+ return IntToStringT<std::string, long long>::IntToString(value);
}
-#if defined(OS_MACOSX)
-std::string NumberToString(size_t value) {
- return IntToStringT<std::string, size_t>::IntToString(value);
+string16 NumberToString16(long long value) {
+ return IntToStringT<string16, long long>::IntToString(value);
}
-string16 NumberToString16(size_t value) {
- return IntToStringT<string16, size_t>::IntToString(value);
+std::string NumberToString(unsigned long long value) {
+ return IntToStringT<std::string, unsigned long long>::IntToString(value);
+}
+
+string16 NumberToString16(unsigned long long value) {
+ return IntToStringT<string16, unsigned long long>::IntToString(value);
}
-#endif
std::string NumberToString(double value) {
// According to g_fmt.cc, it is sufficient to declare a buffer of size 32.
@@ -388,43 +377,43 @@ base::string16 NumberToString16(double value) {
return base::string16(&buffer[0], &buffer[strlen(buffer)]);
}
-bool StringToInt(const StringPiece& input, int* output) {
+bool StringToInt(StringPiece input, int* output) {
return StringToIntImpl(input, output);
}
-bool StringToInt(const StringPiece16& input, int* output) {
+bool StringToInt(StringPiece16 input, int* output) {
return String16ToIntImpl(input, output);
}
-bool StringToUint(const StringPiece& input, unsigned* output) {
+bool StringToUint(StringPiece input, unsigned* output) {
return StringToIntImpl(input, output);
}
-bool StringToUint(const StringPiece16& input, unsigned* output) {
+bool StringToUint(StringPiece16 input, unsigned* output) {
return String16ToIntImpl(input, output);
}
-bool StringToInt64(const StringPiece& input, int64_t* output) {
+bool StringToInt64(StringPiece input, int64_t* output) {
return StringToIntImpl(input, output);
}
-bool StringToInt64(const StringPiece16& input, int64_t* output) {
+bool StringToInt64(StringPiece16 input, int64_t* output) {
return String16ToIntImpl(input, output);
}
-bool StringToUint64(const StringPiece& input, uint64_t* output) {
+bool StringToUint64(StringPiece input, uint64_t* output) {
return StringToIntImpl(input, output);
}
-bool StringToUint64(const StringPiece16& input, uint64_t* output) {
+bool StringToUint64(StringPiece16 input, uint64_t* output) {
return String16ToIntImpl(input, output);
}
-bool StringToSizeT(const StringPiece& input, size_t* output) {
+bool StringToSizeT(StringPiece input, size_t* output) {
return StringToIntImpl(input, output);
}
-bool StringToSizeT(const StringPiece16& input, size_t* output) {
+bool StringToSizeT(StringPiece16 input, size_t* output) {
return String16ToIntImpl(input, output);
}
@@ -472,28 +461,41 @@ std::string HexEncode(const void* bytes, size_t size) {
return ret;
}
-bool HexStringToInt(const StringPiece& input, int* output) {
+bool HexStringToInt(StringPiece input, int* output) {
return IteratorRangeToNumber<HexIteratorRangeToIntTraits>::Invoke(
input.begin(), input.end(), output);
}
-bool HexStringToUInt(const StringPiece& input, uint32_t* output) {
+bool HexStringToUInt(StringPiece input, uint32_t* output) {
return IteratorRangeToNumber<HexIteratorRangeToUIntTraits>::Invoke(
input.begin(), input.end(), output);
}
-bool HexStringToInt64(const StringPiece& input, int64_t* output) {
+bool HexStringToInt64(StringPiece input, int64_t* output) {
return IteratorRangeToNumber<HexIteratorRangeToInt64Traits>::Invoke(
input.begin(), input.end(), output);
}
-bool HexStringToUInt64(const StringPiece& input, uint64_t* output) {
+bool HexStringToUInt64(StringPiece input, uint64_t* output) {
return IteratorRangeToNumber<HexIteratorRangeToUInt64Traits>::Invoke(
input.begin(), input.end(), output);
}
-bool HexStringToBytes(const std::string& input, std::vector<uint8_t>* output) {
- return HexStringToBytesT(input, output);
+bool HexStringToBytes(StringPiece input, std::vector<uint8_t>* output) {
+ DCHECK_EQ(output->size(), 0u);
+ size_t count = input.size();
+ if (count == 0 || (count % 2) != 0)
+ return false;
+ for (uintptr_t i = 0; i < count / 2; ++i) {
+ uint8_t msb = 0; // most significant 4 bits
+ uint8_t lsb = 0; // least significant 4 bits
+ if (!CharToDigit<16>(input[i * 2], &msb) ||
+ !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
+ return false;
+ }
+ output->push_back((msb << 4) | lsb);
+ }
+ return true;
}
} // namespace base
diff --git a/chromium/base/strings/string_number_conversions.h b/chromium/base/strings/string_number_conversions.h
index daf205eb5b8..057b60abc6e 100644
--- a/chromium/base/strings/string_number_conversions.h
+++ b/chromium/base/strings/string_number_conversions.h
@@ -41,27 +41,20 @@ namespace base {
// Number -> string conversions ------------------------------------------------
// Ignores locale! see warning above.
-BASE_EXPORT std::string NumberToString(int32_t value);
-BASE_EXPORT std::string NumberToString(uint32_t value);
-BASE_EXPORT std::string NumberToString(int64_t value);
-BASE_EXPORT std::string NumberToString(uint64_t value);
+BASE_EXPORT std::string NumberToString(int value);
+BASE_EXPORT string16 NumberToString16(int value);
+BASE_EXPORT std::string NumberToString(unsigned int value);
+BASE_EXPORT string16 NumberToString16(unsigned int value);
+BASE_EXPORT std::string NumberToString(long value);
+BASE_EXPORT string16 NumberToString16(long value);
+BASE_EXPORT std::string NumberToString(unsigned long value);
+BASE_EXPORT string16 NumberToString16(unsigned long value);
+BASE_EXPORT std::string NumberToString(long long value);
+BASE_EXPORT string16 NumberToString16(long long value);
+BASE_EXPORT std::string NumberToString(unsigned long long value);
+BASE_EXPORT string16 NumberToString16(unsigned long long value);
BASE_EXPORT std::string NumberToString(double value);
-
-BASE_EXPORT base::string16 NumberToString16(int32_t value);
-BASE_EXPORT base::string16 NumberToString16(uint32_t value);
-BASE_EXPORT base::string16 NumberToString16(int64_t value);
-BASE_EXPORT base::string16 NumberToString16(uint64_t value);
-BASE_EXPORT base::string16 NumberToString16(double value);
-
-// Compilers seem to disagree about whether size_t is a different name for
-// uint32_t/uint64_t, or whether it's a completely different type that requires
-// a conversion. Therefore, a size_t version must exist for some compilers (to
-// avoid ambiguous call errors), but must not exist for others (to avoid
-// multiple definition errors).
-#if defined(OS_MACOSX)
-BASE_EXPORT std::string NumberToString(size_t value);
-BASE_EXPORT base::string16 NumberToString16(size_t value);
-#endif
+BASE_EXPORT string16 NumberToString16(double value);
// Type-specific naming for backwards compatibility.
//
@@ -85,12 +78,6 @@ inline std::string Int64ToString(int64_t value) {
inline string16 Int64ToString16(int64_t value) {
return NumberToString16(value);
}
-inline std::string Uint64ToString(uint64_t value) {
- return NumberToString(value);
-}
-inline string16 Uint64ToString16(uint64_t value) {
- return NumberToString16(value);
-}
// String -> number conversions ------------------------------------------------
@@ -110,20 +97,20 @@ inline string16 Uint64ToString16(uint64_t value) {
// - Empty string. |*output| will be set to 0.
// WARNING: Will write to |output| even when returning false.
// Read the comments above carefully.
-BASE_EXPORT bool StringToInt(const StringPiece& input, int* output);
-BASE_EXPORT bool StringToInt(const StringPiece16& input, int* output);
+BASE_EXPORT bool StringToInt(StringPiece input, int* output);
+BASE_EXPORT bool StringToInt(StringPiece16 input, int* output);
-BASE_EXPORT bool StringToUint(const StringPiece& input, unsigned* output);
-BASE_EXPORT bool StringToUint(const StringPiece16& input, unsigned* output);
+BASE_EXPORT bool StringToUint(StringPiece input, unsigned* output);
+BASE_EXPORT bool StringToUint(StringPiece16 input, unsigned* output);
-BASE_EXPORT bool StringToInt64(const StringPiece& input, int64_t* output);
-BASE_EXPORT bool StringToInt64(const StringPiece16& input, int64_t* output);
+BASE_EXPORT bool StringToInt64(StringPiece input, int64_t* output);
+BASE_EXPORT bool StringToInt64(StringPiece16 input, int64_t* output);
-BASE_EXPORT bool StringToUint64(const StringPiece& input, uint64_t* output);
-BASE_EXPORT bool StringToUint64(const StringPiece16& input, uint64_t* output);
+BASE_EXPORT bool StringToUint64(StringPiece input, uint64_t* output);
+BASE_EXPORT bool StringToUint64(StringPiece16 input, uint64_t* output);
-BASE_EXPORT bool StringToSizeT(const StringPiece& input, size_t* output);
-BASE_EXPORT bool StringToSizeT(const StringPiece16& input, size_t* output);
+BASE_EXPORT bool StringToSizeT(StringPiece input, size_t* output);
+BASE_EXPORT bool StringToSizeT(StringPiece16 input, size_t* output);
// For floating-point conversions, only conversions of input strings in decimal
// form are defined to work. Behavior with strings representing floating-point
@@ -148,30 +135,30 @@ BASE_EXPORT std::string HexEncode(const void* bytes, size_t size);
// Best effort conversion, see StringToInt above for restrictions.
// Will only successful parse hex values that will fit into |output|, i.e.
// -0x80000000 < |input| < 0x7FFFFFFF.
-BASE_EXPORT bool HexStringToInt(const StringPiece& input, int* output);
+BASE_EXPORT bool HexStringToInt(StringPiece input, int* output);
// Best effort conversion, see StringToInt above for restrictions.
// Will only successful parse hex values that will fit into |output|, i.e.
// 0x00000000 < |input| < 0xFFFFFFFF.
// The string is not required to start with 0x.
-BASE_EXPORT bool HexStringToUInt(const StringPiece& input, uint32_t* output);
+BASE_EXPORT bool HexStringToUInt(StringPiece input, uint32_t* output);
// Best effort conversion, see StringToInt above for restrictions.
// Will only successful parse hex values that will fit into |output|, i.e.
// -0x8000000000000000 < |input| < 0x7FFFFFFFFFFFFFFF.
-BASE_EXPORT bool HexStringToInt64(const StringPiece& input, int64_t* output);
+BASE_EXPORT bool HexStringToInt64(StringPiece input, int64_t* output);
// Best effort conversion, see StringToInt above for restrictions.
// Will only successful parse hex values that will fit into |output|, i.e.
// 0x0000000000000000 < |input| < 0xFFFFFFFFFFFFFFFF.
// The string is not required to start with 0x.
-BASE_EXPORT bool HexStringToUInt64(const StringPiece& input, uint64_t* output);
+BASE_EXPORT bool HexStringToUInt64(StringPiece input, uint64_t* output);
// Similar to the previous functions, except that output is a vector of bytes.
// |*output| will contain as many bytes as were successfully parsed prior to the
// error. There is no overflow, but input.size() must be evenly divisible by 2.
// Leading 0x or +/- are not allowed.
-BASE_EXPORT bool HexStringToBytes(const std::string& input,
+BASE_EXPORT bool HexStringToBytes(StringPiece input,
std::vector<uint8_t>* output);
} // namespace base
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index 2112f2309f7..33398f692cd 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -252,7 +252,7 @@ bool ReplaceCharsT(const StringType& input,
StringType* output);
bool ReplaceChars(const string16& input,
- const StringPiece16& replace_chars,
+ StringPiece16 replace_chars,
const string16& replace_with,
string16* output) {
return ReplaceCharsT(input, replace_chars, StringPiece16(replace_with),
@@ -260,20 +260,20 @@ bool ReplaceChars(const string16& input,
}
bool ReplaceChars(const std::string& input,
- const StringPiece& replace_chars,
+ StringPiece replace_chars,
const std::string& replace_with,
std::string* output) {
return ReplaceCharsT(input, replace_chars, StringPiece(replace_with), output);
}
bool RemoveChars(const string16& input,
- const StringPiece16& remove_chars,
+ StringPiece16 remove_chars,
string16* output) {
return ReplaceCharsT(input, remove_chars, StringPiece16(), output);
}
bool RemoveChars(const std::string& input,
- const StringPiece& remove_chars,
+ StringPiece remove_chars,
std::string* output) {
return ReplaceCharsT(input, remove_chars, StringPiece(), output);
}
@@ -338,13 +338,13 @@ BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
}
StringPiece16 TrimString(StringPiece16 input,
- const StringPiece16& trim_chars,
+ StringPiece16 trim_chars,
TrimPositions positions) {
return TrimStringPieceT(input, trim_chars, positions);
}
StringPiece TrimString(StringPiece input,
- const StringPiece& trim_chars,
+ StringPiece trim_chars,
TrimPositions positions) {
return TrimStringPieceT(input, trim_chars, positions);
}
@@ -459,13 +459,11 @@ std::string CollapseWhitespaceASCII(const std::string& text,
return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
}
-bool ContainsOnlyChars(const StringPiece& input,
- const StringPiece& characters) {
+bool ContainsOnlyChars(StringPiece input, StringPiece characters) {
return input.find_first_not_of(characters) == StringPiece::npos;
}
-bool ContainsOnlyChars(const StringPiece16& input,
- const StringPiece16& characters) {
+bool ContainsOnlyChars(StringPiece16 input, StringPiece16 characters) {
return input.find_first_not_of(characters) == StringPiece16::npos;
}
@@ -499,11 +497,11 @@ inline bool DoIsStringASCII(const Char* characters, size_t length) {
return !(all_char_bits & non_ascii_bit_mask);
}
-bool IsStringASCII(const StringPiece& str) {
+bool IsStringASCII(StringPiece str) {
return DoIsStringASCII(str.data(), str.length());
}
-bool IsStringASCII(const StringPiece16& str) {
+bool IsStringASCII(StringPiece16 str) {
return DoIsStringASCII(str.data(), str.length());
}
@@ -517,7 +515,7 @@ bool IsStringASCII(const std::wstring& str) {
}
#endif
-bool IsStringUTF8(const StringPiece& str) {
+bool IsStringUTF8(StringPiece str) {
const char *src = str.data();
int32_t src_len = static_cast<int32_t>(str.length());
int32_t char_index = 0;
@@ -938,6 +936,11 @@ char16* WriteInto(string16* str, size_t length_with_null) {
return WriteIntoT(str, length_with_null);
}
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", off)
+#endif
+
// Generic version for all JoinString overloads. |list_type| must be a sequence
// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
// string16, StringPiece or StringPiece16). |string_type| is either std::string
@@ -985,6 +988,11 @@ string16 JoinString(const std::vector<string16>& parts,
return JoinStringT(parts, separator);
}
+#if defined(_MSC_VER) && !defined(__clang__)
+// Work around VC++ code-gen bug. https://crbug.com/804884
+#pragma optimize("", on)
+#endif
+
std::string JoinString(const std::vector<StringPiece>& parts,
StringPiece separator) {
return JoinStringT(parts, separator);
@@ -1066,7 +1074,7 @@ string16 ReplaceStringPlaceholders(const string16& format_string,
return DoReplaceStringPlaceholders(format_string, subst, offsets);
}
-std::string ReplaceStringPlaceholders(const StringPiece& format_string,
+std::string ReplaceStringPlaceholders(StringPiece format_string,
const std::vector<std::string>& subst,
std::vector<size_t>* offsets) {
return DoReplaceStringPlaceholders(format_string, subst, offsets);
diff --git a/chromium/base/strings/string_util.h b/chromium/base/strings/string_util.h
index 55ceb44504d..a4cba6330bd 100644
--- a/chromium/base/strings/string_util.h
+++ b/chromium/base/strings/string_util.h
@@ -174,10 +174,10 @@ BASE_EXPORT extern const char kUtf8ByteOrderMark[];
// if any characters were removed. |remove_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
BASE_EXPORT bool RemoveChars(const string16& input,
- const StringPiece16& remove_chars,
+ StringPiece16 remove_chars,
string16* output);
BASE_EXPORT bool RemoveChars(const std::string& input,
- const StringPiece& remove_chars,
+ StringPiece remove_chars,
std::string* output);
// Replaces characters in |replace_chars| from anywhere in |input| with
@@ -186,11 +186,11 @@ BASE_EXPORT bool RemoveChars(const std::string& input,
// |replace_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
BASE_EXPORT bool ReplaceChars(const string16& input,
- const StringPiece16& replace_chars,
+ StringPiece16 replace_chars,
const string16& replace_with,
string16* output);
BASE_EXPORT bool ReplaceChars(const std::string& input,
- const StringPiece& replace_chars,
+ StringPiece replace_chars,
const std::string& replace_with,
std::string* output);
@@ -217,10 +217,10 @@ BASE_EXPORT bool TrimString(const std::string& input,
// StringPiece versions of the above. The returned pieces refer to the original
// buffer.
BASE_EXPORT StringPiece16 TrimString(StringPiece16 input,
- const StringPiece16& trim_chars,
+ StringPiece16 trim_chars,
TrimPositions positions);
BASE_EXPORT StringPiece TrimString(StringPiece input,
- const StringPiece& trim_chars,
+ StringPiece trim_chars,
TrimPositions positions);
// Truncates a string to the nearest UTF-8 character that will leave
@@ -264,10 +264,9 @@ BASE_EXPORT std::string CollapseWhitespaceASCII(
// Returns true if |input| is empty or contains only characters found in
// |characters|.
-BASE_EXPORT bool ContainsOnlyChars(const StringPiece& input,
- const StringPiece& characters);
-BASE_EXPORT bool ContainsOnlyChars(const StringPiece16& input,
- const StringPiece16& characters);
+BASE_EXPORT bool ContainsOnlyChars(StringPiece input, StringPiece characters);
+BASE_EXPORT bool ContainsOnlyChars(StringPiece16 input,
+ StringPiece16 characters);
// Returns true if the specified string matches the criteria. How can a wide
// string be 8-bit or UTF8? It contains only characters that are < 256 (in the
@@ -283,9 +282,9 @@ BASE_EXPORT bool ContainsOnlyChars(const StringPiece16& input,
//
// IsStringASCII assumes the input is likely all ASCII, and does not leave early
// if it is not the case.
-BASE_EXPORT bool IsStringUTF8(const StringPiece& str);
-BASE_EXPORT bool IsStringASCII(const StringPiece& str);
-BASE_EXPORT bool IsStringASCII(const StringPiece16& str);
+BASE_EXPORT bool IsStringUTF8(StringPiece str);
+BASE_EXPORT bool IsStringASCII(StringPiece str);
+BASE_EXPORT bool IsStringASCII(StringPiece16 str);
BASE_EXPORT bool IsStringASCII(const string16& str);
#if defined(WCHAR_T_IS_UTF32)
BASE_EXPORT bool IsStringASCII(const std::wstring& str);
@@ -437,6 +436,8 @@ BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
// strings. For example, instead of using SplitString, modifying the vector,
// then using JoinString, use SplitStringPiece followed by JoinString so that no
// copies of those strings are created until the final join operation.
+//
+// Use StrCat (in base/strings/strcat.h) if you don't need a separator.
BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
StringPiece separator);
BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
@@ -463,7 +464,7 @@ BASE_EXPORT string16 ReplaceStringPlaceholders(
std::vector<size_t>* offsets);
BASE_EXPORT std::string ReplaceStringPlaceholders(
- const StringPiece& format_string,
+ StringPiece format_string,
const std::vector<std::string>& subst,
std::vector<size_t>* offsets);
diff --git a/chromium/base/strings/sys_string_conversions.h b/chromium/base/strings/sys_string_conversions.h
index b41a2288ca9..1ad0307816d 100644
--- a/chromium/base/strings/sys_string_conversions.h
+++ b/chromium/base/strings/sys_string_conversions.h
@@ -32,13 +32,13 @@ namespace base {
// Converts between wide and UTF-8 representations of a string. On error, the
// result is system-dependent.
BASE_EXPORT std::string SysWideToUTF8(const std::wstring& wide);
-BASE_EXPORT std::wstring SysUTF8ToWide(const StringPiece& utf8);
+BASE_EXPORT std::wstring SysUTF8ToWide(StringPiece utf8);
// Converts between wide and the system multi-byte representations of a string.
// DANGER: This will lose information and can change (on Windows, this can
// change between reboots).
BASE_EXPORT std::string SysWideToNativeMB(const std::wstring& wide);
-BASE_EXPORT std::wstring SysNativeMBToWide(const StringPiece& native_mb);
+BASE_EXPORT std::wstring SysNativeMBToWide(StringPiece native_mb);
// Windows-specific ------------------------------------------------------------
@@ -47,8 +47,7 @@ BASE_EXPORT std::wstring SysNativeMBToWide(const StringPiece& native_mb);
// Converts between 8-bit and wide strings, using the given code page. The
// code page identifier is one accepted by the Windows function
// MultiByteToWideChar().
-BASE_EXPORT std::wstring SysMultiByteToWide(const StringPiece& mb,
- uint32_t code_page);
+BASE_EXPORT std::wstring SysMultiByteToWide(StringPiece mb, uint32_t code_page);
BASE_EXPORT std::string SysWideToMultiByte(const std::wstring& wide,
uint32_t code_page);
diff --git a/chromium/base/strings/sys_string_conversions_mac.mm b/chromium/base/strings/sys_string_conversions_mac.mm
index 32fe89cc367..637d941a5e2 100644
--- a/chromium/base/strings/sys_string_conversions_mac.mm
+++ b/chromium/base/strings/sys_string_conversions_mac.mm
@@ -131,7 +131,7 @@ std::string SysWideToUTF8(const std::wstring& wide) {
}
// Do not assert in this function since it is used by the asssertion code!
-std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+std::wstring SysUTF8ToWide(StringPiece utf8) {
return STLStringToSTLStringWithEncodingsT<StringPiece, std::wstring>(
utf8, kNarrowStringEncoding, kWideStringEncoding);
}
@@ -140,7 +140,7 @@ std::string SysWideToNativeMB(const std::wstring& wide) {
return SysWideToUTF8(wide);
}
-std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
return SysUTF8ToWide(native_mb);
}
diff --git a/chromium/base/strings/sys_string_conversions_posix.cc b/chromium/base/strings/sys_string_conversions_posix.cc
index cc039db79f8..6b1591c78ac 100644
--- a/chromium/base/strings/sys_string_conversions_posix.cc
+++ b/chromium/base/strings/sys_string_conversions_posix.cc
@@ -18,7 +18,7 @@ std::string SysWideToUTF8(const std::wstring& wide) {
// than our ICU, but this will do for now.
return WideToUTF8(wide);
}
-std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+std::wstring SysUTF8ToWide(StringPiece utf8) {
// In theory this should be using the system-provided conversion rather
// than our ICU, but this will do for now.
std::wstring out;
@@ -34,7 +34,7 @@ std::string SysWideToNativeMB(const std::wstring& wide) {
return WideToUTF8(wide);
}
-std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
return SysUTF8ToWide(native_mb);
}
@@ -100,7 +100,7 @@ std::string SysWideToNativeMB(const std::wstring& wide) {
return out;
}
-std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
mbstate_t ps;
// Calculate the number of wide characters. We walk through the string
diff --git a/chromium/base/strings/sys_string_conversions_win.cc b/chromium/base/strings/sys_string_conversions_win.cc
index b70854421df..356064fb98f 100644
--- a/chromium/base/strings/sys_string_conversions_win.cc
+++ b/chromium/base/strings/sys_string_conversions_win.cc
@@ -17,7 +17,7 @@ std::string SysWideToUTF8(const std::wstring& wide) {
}
// Do not assert in this function since it is used by the asssertion code!
-std::wstring SysUTF8ToWide(const StringPiece& utf8) {
+std::wstring SysUTF8ToWide(StringPiece utf8) {
return SysMultiByteToWide(utf8, CP_UTF8);
}
@@ -25,12 +25,12 @@ std::string SysWideToNativeMB(const std::wstring& wide) {
return SysWideToMultiByte(wide, CP_ACP);
}
-std::wstring SysNativeMBToWide(const StringPiece& native_mb) {
+std::wstring SysNativeMBToWide(StringPiece native_mb) {
return SysMultiByteToWide(native_mb, CP_ACP);
}
// Do not assert in this function since it is used by the asssertion code!
-std::wstring SysMultiByteToWide(const StringPiece& mb, uint32_t code_page) {
+std::wstring SysMultiByteToWide(StringPiece mb, uint32_t code_page) {
if (mb.empty())
return std::wstring();
diff --git a/chromium/base/sync_socket.h b/chromium/base/sync_socket.h
index 53fbeb613fa..59d26bbd63b 100644
--- a/chromium/base/sync_socket.h
+++ b/chromium/base/sync_socket.h
@@ -110,7 +110,7 @@ class BASE_EXPORT CancelableSyncSocket : public SyncSocket {
public:
CancelableSyncSocket();
explicit CancelableSyncSocket(Handle handle);
- ~CancelableSyncSocket() override {}
+ ~CancelableSyncSocket() override = default;
// Initializes a pair of cancelable sockets. See documentation for
// SyncSocket::CreatePair for more details.
diff --git a/chromium/base/synchronization/condition_variable.h b/chromium/base/synchronization/condition_variable.h
index b5677511728..6d95c678683 100644
--- a/chromium/base/synchronization/condition_variable.h
+++ b/chromium/base/synchronization/condition_variable.h
@@ -76,7 +76,7 @@
#endif
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#endif
namespace base {
@@ -105,8 +105,8 @@ class BASE_EXPORT ConditionVariable {
private:
#if defined(OS_WIN)
- CONDITION_VARIABLE cv_;
- SRWLOCK* const srwlock_;
+ CHROME_CONDITION_VARIABLE cv_;
+ CHROME_SRWLOCK* const srwlock_;
#elif defined(OS_POSIX)
pthread_cond_t condition_;
pthread_mutex_t* user_mutex_;
diff --git a/chromium/base/synchronization/condition_variable_win.cc b/chromium/base/synchronization/condition_variable_win.cc
index 378785a426c..ddaef07a60c 100644
--- a/chromium/base/synchronization/condition_variable_win.cc
+++ b/chromium/base/synchronization/condition_variable_win.cc
@@ -9,6 +9,8 @@
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
+#include <windows.h>
+
namespace base {
ConditionVariable::ConditionVariable(Lock* user_lock)
@@ -18,7 +20,7 @@ ConditionVariable::ConditionVariable(Lock* user_lock)
#endif
{
DCHECK(user_lock);
- InitializeConditionVariable(&cv_);
+ InitializeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
ConditionVariable::~ConditionVariable() = default;
@@ -36,7 +38,9 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
user_lock_->CheckHeldAndUnmark();
#endif
- if (!SleepConditionVariableSRW(&cv_, srwlock_, timeout, 0)) {
+ if (!SleepConditionVariableSRW(reinterpret_cast<PCONDITION_VARIABLE>(&cv_),
+ reinterpret_cast<PSRWLOCK>(srwlock_), timeout,
+ 0)) {
// On failure, we only expect the CV to timeout. Any other error value means
// that we've unexpectedly woken up.
// Note that WAIT_TIMEOUT != ERROR_TIMEOUT. WAIT_TIMEOUT is used with the
@@ -51,11 +55,11 @@ void ConditionVariable::TimedWait(const TimeDelta& max_time) {
}
void ConditionVariable::Broadcast() {
- WakeAllConditionVariable(&cv_);
+ WakeAllConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
void ConditionVariable::Signal() {
- WakeConditionVariable(&cv_);
+ WakeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
} // namespace base
diff --git a/chromium/base/synchronization/lock_impl.h b/chromium/base/synchronization/lock_impl.h
index 880e70db36c..7ec081f48ac 100644
--- a/chromium/base/synchronization/lock_impl.h
+++ b/chromium/base/synchronization/lock_impl.h
@@ -11,7 +11,7 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#elif defined(OS_POSIX)
#include <errno.h>
#include <pthread.h>
@@ -26,7 +26,7 @@ namespace internal {
class BASE_EXPORT LockImpl {
public:
#if defined(OS_WIN)
- using NativeHandle = SRWLOCK;
+ using NativeHandle = CHROME_SRWLOCK;
#elif defined(OS_POSIX)
using NativeHandle = pthread_mutex_t;
#endif
@@ -63,7 +63,7 @@ class BASE_EXPORT LockImpl {
#if defined(OS_WIN)
void LockImpl::Unlock() {
- ::ReleaseSRWLockExclusive(&native_handle_);
+ ::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
}
#elif defined(OS_POSIX)
void LockImpl::Unlock() {
diff --git a/chromium/base/synchronization/lock_impl_posix.cc b/chromium/base/synchronization/lock_impl_posix.cc
index 3bfd9c2e5d2..43c9d4948f4 100644
--- a/chromium/base/synchronization/lock_impl_posix.cc
+++ b/chromium/base/synchronization/lock_impl_posix.cc
@@ -8,6 +8,8 @@
#include "base/debug/activity_tracker.h"
#include "base/synchronization/lock.h"
+#include "base/synchronization/synchronization_flags.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
@@ -76,11 +78,13 @@ void LockImpl::Lock() {
// static
bool LockImpl::PriorityInheritanceAvailable() {
-#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
+#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
+ return true;
+#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
return true;
#else
// Security concerns prevent the use of priority inheritance mutexes on Linux.
- // * CVE-2010-0622 - wake_futex_pi unlocks incorrect, possible DoS.
+ // * CVE-2010-0622 - Linux < 2.6.33-rc7, wake_futex_pi possible DoS.
// https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
// * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
// https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
@@ -92,7 +96,7 @@ bool LockImpl::PriorityInheritanceAvailable() {
// * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
// Fixed in glibc 2.17.
// Priority inheritance mutexes may deadlock with condition variables
- // during recacquisition of the mutex after the condition variable is
+ // during reacquisition of the mutex after the condition variable is
// signalled.
return false;
#endif
diff --git a/chromium/base/synchronization/lock_impl_win.cc b/chromium/base/synchronization/lock_impl_win.cc
index 80a5316e6e2..e0c4e9d7fc6 100644
--- a/chromium/base/synchronization/lock_impl_win.cc
+++ b/chromium/base/synchronization/lock_impl_win.cc
@@ -6,6 +6,8 @@
#include "base/debug/activity_tracker.h"
+#include <windows.h>
+
namespace base {
namespace internal {
@@ -14,7 +16,8 @@ LockImpl::LockImpl() : native_handle_(SRWLOCK_INIT) {}
LockImpl::~LockImpl() = default;
bool LockImpl::Try() {
- return !!::TryAcquireSRWLockExclusive(&native_handle_);
+ return !!::TryAcquireSRWLockExclusive(
+ reinterpret_cast<PSRWLOCK>(&native_handle_));
}
void LockImpl::Lock() {
@@ -30,7 +33,7 @@ void LockImpl::Lock() {
return;
base::debug::ScopedLockAcquireActivity lock_activity(this);
- ::AcquireSRWLockExclusive(&native_handle_);
+ ::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
}
} // namespace internal
diff --git a/chromium/base/synchronization/waitable_event.h b/chromium/base/synchronization/waitable_event.h
index 925d4b6b5fb..1d1136ce035 100644
--- a/chromium/base/synchronization/waitable_event.h
+++ b/chromium/base/synchronization/waitable_event.h
@@ -155,7 +155,7 @@ class BASE_EXPORT WaitableEvent {
virtual bool Compare(void* tag) = 0;
protected:
- virtual ~Waiter() {}
+ virtual ~Waiter() = default;
};
private:
diff --git a/chromium/base/synchronization/waitable_event_watcher.h b/chromium/base/synchronization/waitable_event_watcher.h
index 74c826c3d8c..51728e3bca7 100644
--- a/chromium/base/synchronization/waitable_event_watcher.h
+++ b/chromium/base/synchronization/waitable_event_watcher.h
@@ -7,6 +7,7 @@
#include "base/base_export.h"
#include "base/macros.h"
+#include "base/sequenced_task_runner.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -76,6 +77,7 @@ class BASE_EXPORT WaitableEventWatcher
{
public:
using EventCallback = OnceCallback<void(WaitableEvent*)>;
+
WaitableEventWatcher();
#if defined(OS_WIN)
@@ -86,7 +88,10 @@ class BASE_EXPORT WaitableEventWatcher
// When |event| is signaled, |callback| is called on the sequence that called
// StartWatching().
- bool StartWatching(WaitableEvent* event, EventCallback callback);
+ // |task_runner| is used for asynchronous executions of calling |callback|.
+ bool StartWatching(WaitableEvent* event,
+ EventCallback callback,
+ scoped_refptr<SequencedTaskRunner> task_runner);
// Cancel the current watch. Must be called from the same sequence which
// started the watch.
diff --git a/chromium/base/synchronization/waitable_event_watcher_mac.cc b/chromium/base/synchronization/waitable_event_watcher_mac.cc
index c4527475a09..772fd10992e 100644
--- a/chromium/base/synchronization/waitable_event_watcher_mac.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_mac.cc
@@ -16,8 +16,11 @@ WaitableEventWatcher::~WaitableEventWatcher() {
StopWatching();
}
-bool WaitableEventWatcher::StartWatching(WaitableEvent* event,
- EventCallback callback) {
+bool WaitableEventWatcher::StartWatching(
+ WaitableEvent* event,
+ EventCallback callback,
+ scoped_refptr<SequencedTaskRunner> task_runner) {
+ DCHECK(task_runner->RunsTasksInCurrentSequence());
DCHECK(!source_ || dispatch_source_testcancel(source_));
// Keep a reference to the receive right, so that if the event is deleted
@@ -29,8 +32,6 @@ bool WaitableEventWatcher::StartWatching(WaitableEvent* event,
// Locals for capture by the block. Accessing anything through the |this| or
// |event| pointers is not safe, since either may have been deleted by the
// time the handler block is invoked.
- scoped_refptr<SequencedTaskRunner> task_runner =
- SequencedTaskRunnerHandle::Get();
WeakPtr<WaitableEventWatcher> weak_this = weak_ptr_factory_.GetWeakPtr();
const bool auto_reset =
event->policy_ == WaitableEvent::ResetPolicy::AUTOMATIC;
diff --git a/chromium/base/synchronization/waitable_event_watcher_posix.cc b/chromium/base/synchronization/waitable_event_watcher_posix.cc
index 21368a87104..2b296dafd75 100644
--- a/chromium/base/synchronization/waitable_event_watcher_posix.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_posix.cc
@@ -122,10 +122,11 @@ WaitableEventWatcher::~WaitableEventWatcher() {
// The Handle is how the user cancels a wait. After deleting the Handle we
// insure that the delegate cannot be called.
// -----------------------------------------------------------------------------
-bool WaitableEventWatcher::StartWatching(WaitableEvent* event,
- EventCallback callback) {
+bool WaitableEventWatcher::StartWatching(
+ WaitableEvent* event,
+ EventCallback callback,
+ scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(sequence_checker_.CalledOnValidSequence());
- DCHECK(SequencedTaskRunnerHandle::Get());
// A user may call StartWatching from within the callback function. In this
// case, we won't know that we have finished watching, expect that the Flag
@@ -148,14 +149,13 @@ bool WaitableEventWatcher::StartWatching(WaitableEvent* event,
kernel->signaled_ = false;
// No hairpinning - we can't call the delegate directly here. We have to
- // post a task to the SequencedTaskRunnerHandle as usual.
- SequencedTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- std::move(internal_callback));
+ // post a task to |task_runner| as usual.
+ task_runner->PostTask(FROM_HERE, std::move(internal_callback));
return true;
}
kernel_ = kernel;
- waiter_ = new AsyncWaiter(SequencedTaskRunnerHandle::Get(),
+ waiter_ = new AsyncWaiter(std::move(task_runner),
std::move(internal_callback), cancel_flag_.get());
event->Enqueue(waiter_);
diff --git a/chromium/base/synchronization/waitable_event_watcher_unittest.cc b/chromium/base/synchronization/waitable_event_watcher_unittest.cc
index 21657879df7..ec056effe5d 100644
--- a/chromium/base/synchronization/waitable_event_watcher_unittest.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_unittest.cc
@@ -58,7 +58,8 @@ TEST_P(WaitableEventWatcherTest, BasicSignalManual) {
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventWatcher watcher;
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
@@ -74,7 +75,8 @@ TEST_P(WaitableEventWatcherTest, BasicSignalAutomatic) {
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEventWatcher watcher;
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
@@ -93,7 +95,8 @@ TEST_P(WaitableEventWatcherTest, BasicCancel) {
WaitableEventWatcher watcher;
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
watcher.StopWatching();
}
@@ -111,12 +114,13 @@ TEST_P(WaitableEventWatcherTest, CancelAfterSet) {
DecrementCountContainer delegate(&counter);
WaitableEventWatcher::EventCallback callback = BindOnce(
&DecrementCountContainer::OnWaitableEventSignaled, Unretained(&delegate));
- watcher.StartWatching(&event, std::move(callback));
+ watcher.StartWatching(&event, std::move(callback),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
// Let the background thread do its business
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(30));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
watcher.StopWatching();
@@ -133,11 +137,13 @@ TEST_P(WaitableEventWatcherTest, OutlivesMessageLoop) {
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
{
- WaitableEventWatcher watcher;
+ std::unique_ptr<WaitableEventWatcher> watcher;
{
MessageLoop message_loop(GetParam());
+ watcher = std::make_unique<WaitableEventWatcher>();
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher->StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
}
}
}
@@ -149,7 +155,8 @@ TEST_P(WaitableEventWatcherTest, SignaledAtStartManual) {
WaitableEvent::InitialState::SIGNALED);
WaitableEventWatcher watcher;
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
RunLoop().Run();
@@ -163,7 +170,8 @@ TEST_P(WaitableEventWatcherTest, SignaledAtStartAutomatic) {
WaitableEvent::InitialState::SIGNALED);
WaitableEventWatcher watcher;
- watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(&event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
RunLoop().Run();
@@ -179,13 +187,16 @@ TEST_P(WaitableEventWatcherTest, StartWatchingInCallback) {
WaitableEventWatcher watcher;
watcher.StartWatching(
- &event, BindOnce(
- [](WaitableEventWatcher* watcher, WaitableEvent* event) {
- // |event| is manual, so the second watcher will run
- // immediately.
- watcher->StartWatching(event, BindOnce(&QuitWhenSignaled));
- },
- &watcher));
+ &event,
+ BindOnce(
+ [](WaitableEventWatcher* watcher, WaitableEvent* event) {
+ // |event| is manual, so the second watcher will run
+ // immediately.
+ watcher->StartWatching(event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
+ },
+ &watcher),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
@@ -210,11 +221,13 @@ TEST_P(WaitableEventWatcherTest, MultipleWatchersManual) {
WaitableEventWatcher watcher1;
watcher1.StartWatching(
- &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter1)));
+ &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter1)),
+ SequencedTaskRunnerHandle::Get());
WaitableEventWatcher watcher2;
watcher2.StartWatching(
- &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter2)));
+ &event, BindOnce(callback, Unretained(&run_loop), Unretained(&counter2)),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
run_loop.Run();
@@ -248,12 +261,14 @@ TEST_P(WaitableEventWatcherTest, MultipleWatchersAutomatic) {
WaitableEventWatcher watcher1;
watcher1.StartWatching(
&event,
- BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter1)));
+ BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter1)),
+ SequencedTaskRunnerHandle::Get());
WaitableEventWatcher watcher2;
watcher2.StartWatching(
&event,
- BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter2)));
+ BindOnce(callback, Unretained(&current_run_loop), Unretained(&counter2)),
+ SequencedTaskRunnerHandle::Get());
event.Signal();
{
@@ -302,7 +317,8 @@ TEST_P(WaitableEventWatcherDeletionTest, DeleteUnder) {
auto* event = new WaitableEvent(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
- watcher.StartWatching(event, BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(event, BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
if (delay_after_delete) {
// On Windows that sleep() improves the chance to catch some problems.
@@ -310,7 +326,7 @@ TEST_P(WaitableEventWatcherDeletionTest, DeleteUnder) {
// and gives some time to run to a created background thread.
// Unfortunately, that thread is under OS control and we can't
// manipulate it directly.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(30));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
}
delete event;
@@ -333,7 +349,8 @@ TEST_P(WaitableEventWatcherDeletionTest, SignalAndDelete) {
WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
- watcher.StartWatching(event.get(), BindOnce(&QuitWhenSignaled));
+ watcher.StartWatching(event.get(), BindOnce(&QuitWhenSignaled),
+ SequencedTaskRunnerHandle::Get());
event->Signal();
event.reset();
@@ -343,7 +360,7 @@ TEST_P(WaitableEventWatcherDeletionTest, SignalAndDelete) {
// and gives some time to run to a created background thread.
// Unfortunately, that thread is under OS control and we can't
// manipulate it directly.
- base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(30));
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(30));
}
// Wait for the watcher callback.
@@ -385,7 +402,7 @@ TEST_P(WaitableEventWatcherDeletionTest, DeleteWatcherBeforeCallback) {
task_runner->PostTask(
FROM_HERE, BindOnce(IgnoreResult(&WaitableEventWatcher::StartWatching),
Unretained(watcher.get()), Unretained(&event),
- std::move(watcher_callback)));
+ std::move(watcher_callback), task_runner));
task_runner->PostTask(FROM_HERE,
BindOnce(&WaitableEvent::Signal, Unretained(&event)));
task_runner->DeleteSoon(FROM_HERE, std::move(watcher));
diff --git a/chromium/base/synchronization/waitable_event_watcher_win.cc b/chromium/base/synchronization/waitable_event_watcher_win.cc
index d28b6a76574..6003fd44e61 100644
--- a/chromium/base/synchronization/waitable_event_watcher_win.cc
+++ b/chromium/base/synchronization/waitable_event_watcher_win.cc
@@ -8,14 +8,18 @@
#include "base/synchronization/waitable_event.h"
#include "base/win/object_watcher.h"
+#include <windows.h>
+
namespace base {
WaitableEventWatcher::WaitableEventWatcher() = default;
WaitableEventWatcher::~WaitableEventWatcher() {}
-bool WaitableEventWatcher::StartWatching(WaitableEvent* event,
- EventCallback callback) {
+bool WaitableEventWatcher::StartWatching(
+ WaitableEvent* event,
+ EventCallback callback,
+ scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(event);
callback_ = std::move(callback);
event_ = event;
diff --git a/chromium/base/sys_info_internal.h b/chromium/base/sys_info_internal.h
index a1792191f5b..2168e9fc1c7 100644
--- a/chromium/base/sys_info_internal.h
+++ b/chromium/base/sys_info_internal.h
@@ -17,7 +17,7 @@ class LazySysInfoValue {
LazySysInfoValue()
: value_(F()) { }
- ~LazySysInfoValue() { }
+ ~LazySysInfoValue() = default;
T value() { return value_; }
diff --git a/chromium/base/syslog_logging.cc b/chromium/base/syslog_logging.cc
index 54e6e96e692..03c2b5ea3e3 100644
--- a/chromium/base/syslog_logging.cc
+++ b/chromium/base/syslog_logging.cc
@@ -5,6 +5,7 @@
#include "base/syslog_logging.h"
#if defined(OS_WIN)
+#include <windows.h>
#include "base/bind.h"
#include "base/callback_helpers.h"
#include "base/debug/stack_trace.h"
diff --git a/chromium/base/system_monitor/system_monitor.h b/chromium/base/system_monitor/system_monitor.h
index 2fa5aec2f76..7f21e47bad8 100644
--- a/chromium/base/system_monitor/system_monitor.h
+++ b/chromium/base/system_monitor/system_monitor.h
@@ -40,7 +40,7 @@ class BASE_EXPORT SystemMonitor {
virtual void OnDevicesChanged(DeviceType device_type) {}
protected:
- virtual ~DevicesChangedObserver() {}
+ virtual ~DevicesChangedObserver() = default;
};
// Add a new observer.
diff --git a/chromium/base/task_scheduler/delayed_task_manager.cc b/chromium/base/task_scheduler/delayed_task_manager.cc
index 5cf1895fa44..e6f0e49891b 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager.cc
@@ -41,23 +41,23 @@ void DelayedTaskManager::Start(
const TimeTicks now = tick_clock_->NowTicks();
for (auto& task_and_callback : tasks_added_before_start) {
const TimeDelta delay =
- std::max(TimeDelta(), task_and_callback.first->delayed_run_time - now);
+ std::max(TimeDelta(), task_and_callback.first.delayed_run_time - now);
AddDelayedTaskNow(std::move(task_and_callback.first), delay,
std::move(task_and_callback.second));
}
}
void DelayedTaskManager::AddDelayedTask(
- std::unique_ptr<Task> task,
+ Task task,
PostTaskNowCallback post_task_now_callback) {
- DCHECK(task);
+ DCHECK(task.task);
- const TimeDelta delay = task->delay;
+ const TimeDelta delay = task.delay;
DCHECK(!delay.is_zero());
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
// for details.
- CHECK(task->task);
+ CHECK(task.task);
// If |started_| is set, the DelayedTaskManager is in a stable state and
// AddDelayedTaskNow() can be called without synchronization. Otherwise, it is
@@ -78,10 +78,10 @@ void DelayedTaskManager::AddDelayedTask(
}
void DelayedTaskManager::AddDelayedTaskNow(
- std::unique_ptr<Task> task,
+ Task task,
TimeDelta delay,
PostTaskNowCallback post_task_now_callback) {
- DCHECK(task);
+ DCHECK(task.task);
DCHECK(started_.IsSet());
// TODO(fdoray): Use |task->delayed_run_time| on the service thread
// MessageLoop rather than recomputing it from |delay|.
diff --git a/chromium/base/task_scheduler/delayed_task_manager.h b/chromium/base/task_scheduler/delayed_task_manager.h
index 86ab56aa697..2d6babbc5a7 100644
--- a/chromium/base/task_scheduler/delayed_task_manager.h
+++ b/chromium/base/task_scheduler/delayed_task_manager.h
@@ -33,7 +33,7 @@ struct Task;
class BASE_EXPORT DelayedTaskManager {
public:
// Posts |task| for execution immediately.
- using PostTaskNowCallback = OnceCallback<void(std::unique_ptr<Task> task)>;
+ using PostTaskNowCallback = OnceCallback<void(Task task)>;
// |tick_clock| can be specified for testing.
DelayedTaskManager(std::unique_ptr<TickClock> tick_clock =
@@ -48,13 +48,12 @@ class BASE_EXPORT DelayedTaskManager {
// Schedules a call to |post_task_now_callback| with |task| as argument when
// |task| is ripe for execution and Start() has been called.
- void AddDelayedTask(std::unique_ptr<Task> task,
- PostTaskNowCallback post_task_now_callback);
+ void AddDelayedTask(Task task, PostTaskNowCallback post_task_now_callback);
private:
// Schedules a call to |post_task_now_callback| with |task| as argument when
// |delay| expires. Start() must have been called before this.
- void AddDelayedTaskNow(std::unique_ptr<Task> task,
+ void AddDelayedTaskNow(Task task,
TimeDelta delay,
PostTaskNowCallback post_task_now_callback);
@@ -70,8 +69,7 @@ class BASE_EXPORT DelayedTaskManager {
SchedulerLock lock_;
scoped_refptr<TaskRunner> service_thread_task_runner_;
- std::vector<std::pair<std::unique_ptr<Task>, PostTaskNowCallback>>
- tasks_added_before_start_;
+ std::vector<std::pair<Task, PostTaskNowCallback>> tasks_added_before_start_;
DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
};
diff --git a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
index b3dbf070315..8cfe04a2a2e 100644
--- a/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
+++ b/chromium/base/task_scheduler/delayed_task_manager_unittest.cc
@@ -22,49 +22,36 @@ namespace {
constexpr TimeDelta kLongDelay = TimeDelta::FromHours(1);
-class MockTaskTarget {
+class MockTask {
public:
- MockTaskTarget() = default;
- ~MockTaskTarget() = default;
-
- // gMock currently doesn't support move-only types, so PostTaskNowCallback()
- // handles the move-only type and forwards to the mocked method.
- MOCK_METHOD1(DoPostTaskNowCallback, void(const Task*));
-
- void PostTaskNowCallback(std::unique_ptr<Task> task) {
- DoPostTaskNowCallback(task.get());
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MockTaskTarget);
+ MOCK_METHOD0(Run, void());
};
-class TaskSchedulerDelayedTaskManagerTest : public testing::Test {
- public:
- TaskSchedulerDelayedTaskManagerTest()
- : delayed_task_manager_(service_thread_task_runner_->GetMockTickClock()) {
- }
- ~TaskSchedulerDelayedTaskManagerTest() override = default;
+void RunTask(Task task) {
+ std::move(task.task).Run();
+}
+class TaskSchedulerDelayedTaskManagerTest : public testing::Test {
protected:
- std::unique_ptr<Task> CreateTask(TimeDelta delay) {
- auto task = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), delay);
-
+ TaskSchedulerDelayedTaskManagerTest()
+ : delayed_task_manager_(service_thread_task_runner_->GetMockTickClock()),
+ task_(FROM_HERE,
+ BindOnce(&MockTask::Run, Unretained(&mock_task_)),
+ TaskTraits(),
+ kLongDelay) {
// The constructor of Task computes |delayed_run_time| by adding |delay| to
// the real time. Recompute it by adding |delay| to the mock time.
- task->delayed_run_time =
- service_thread_task_runner_->GetMockTickClock()->NowTicks() + delay;
-
- return task;
+ task_.delayed_run_time =
+ service_thread_task_runner_->GetMockTickClock()->NowTicks() +
+ kLongDelay;
}
+ ~TaskSchedulerDelayedTaskManagerTest() override = default;
- testing::StrictMock<MockTaskTarget> task_target_;
const scoped_refptr<TestMockTimeTaskRunner> service_thread_task_runner_ =
MakeRefCounted<TestMockTimeTaskRunner>();
DelayedTaskManager delayed_task_manager_;
- std::unique_ptr<Task> task_ = CreateTask(kLongDelay);
- Task* const task_raw_ = task_.get();
+ testing::StrictMock<MockTask> mock_task_;
+ Task task_;
private:
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerDelayedTaskManagerTest);
@@ -75,14 +62,11 @@ class TaskSchedulerDelayedTaskManagerTest : public testing::Test {
// Verify that a delayed task isn't forwarded before Start().
TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunBeforeStart) {
// Send |task| to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
// Fast-forward time until the task is ripe for execution. Since Start() has
- // not been called, the task should be forwarded to |task_target_|
- // (|task_target_| is a StrictMock without expectations, so the test will fail
- // if the task is forwarded to it).
+ // not been called, the task should not be forwarded to RunTask() (MockTask is
+ // a StrictMock without expectations so test will fail if RunTask() runs it).
service_thread_task_runner_->FastForwardBy(kLongDelay);
}
@@ -91,9 +75,7 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunBeforeStart) {
TEST_F(TaskSchedulerDelayedTaskManagerTest,
DelayedTaskPostedBeforeStartExpiresAfterStartRunsOnExpire) {
// Send |task| to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
delayed_task_manager_.Start(service_thread_task_runner_);
@@ -102,8 +84,8 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest,
service_thread_task_runner_->RunUntilIdle();
// Fast-forward time until the task is ripe for execution. Expect the task to
- // be forwarded to |task_target_|.
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_raw_));
+ // be forwarded to RunTask().
+ EXPECT_CALL(mock_task_, Run());
service_thread_task_runner_->FastForwardBy(kLongDelay);
}
@@ -112,9 +94,7 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest,
TEST_F(TaskSchedulerDelayedTaskManagerTest,
DelayedTaskPostedBeforeStartExpiresBeforeStartRunsOnStart) {
// Send |task| to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
// Run tasks on the service thread. Don't expect any forwarding to
// |task_target_| since the task isn't ripe for execution.
@@ -124,9 +104,8 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest,
// task to be forwarded since Start() hasn't been called yet.
service_thread_task_runner_->FastForwardBy(kLongDelay);
- // Start the DelayedTaskManager. Expect the task to be forwarded to
- // |task_target_|.
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_raw_));
+ // Start the DelayedTaskManager. Expect the task to be forwarded to RunTask().
+ EXPECT_CALL(mock_task_, Run());
delayed_task_manager_.Start(service_thread_task_runner_);
service_thread_task_runner_->RunUntilIdle();
}
@@ -137,12 +116,10 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskDoesNotRunTooEarly) {
delayed_task_manager_.Start(service_thread_task_runner_);
// Send |task| to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
// Run tasks that are ripe for execution. Don't expect any forwarding to
- // |task_target_|.
+ // RunTask().
service_thread_task_runner_->RunUntilIdle();
}
@@ -152,12 +129,10 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskRunsAfterDelay) {
delayed_task_manager_.Start(service_thread_task_runner_);
// Send |task| to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_), BindOnce(&RunTask));
- // Fast-forward time. Expect the task is forwarded to |task_target_|.
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_raw_));
+ // Fast-forward time. Expect the task to be forwarded to RunTask().
+ EXPECT_CALL(mock_task_, Run());
service_thread_task_runner_->FastForwardBy(kLongDelay);
}
@@ -165,44 +140,40 @@ TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTaskRunsAfterDelay) {
// they are ripe for execution.
TEST_F(TaskSchedulerDelayedTaskManagerTest, DelayedTasksRunAfterDelay) {
delayed_task_manager_.Start(service_thread_task_runner_);
- auto task_a = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), TimeDelta::FromHours(1));
- const Task* task_a_raw = task_a.get();
- auto task_b = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), TimeDelta::FromHours(2));
- const Task* task_b_raw = task_b.get();
+ testing::StrictMock<MockTask> mock_task_a;
+ Task task_a(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_a)),
+ TaskTraits(), TimeDelta::FromHours(1));
+
+ testing::StrictMock<MockTask> mock_task_b;
+ Task task_b(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_b)),
+ TaskTraits(), TimeDelta::FromHours(2));
- auto task_c = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), TimeDelta::FromHours(1));
- const Task* task_c_raw = task_c.get();
+ testing::StrictMock<MockTask> mock_task_c;
+ Task task_c(FROM_HERE, BindOnce(&MockTask::Run, Unretained(&mock_task_c)),
+ TaskTraits(), TimeDelta::FromHours(1));
// Send tasks to the DelayedTaskManager.
- delayed_task_manager_.AddDelayedTask(
- std::move(task_a), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
- delayed_task_manager_.AddDelayedTask(
- std::move(task_b), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
- delayed_task_manager_.AddDelayedTask(
- std::move(task_c), BindOnce(&MockTaskTarget::PostTaskNowCallback,
- Unretained(&task_target_)));
+ delayed_task_manager_.AddDelayedTask(std::move(task_a), BindOnce(&RunTask));
+ delayed_task_manager_.AddDelayedTask(std::move(task_b), BindOnce(&RunTask));
+ delayed_task_manager_.AddDelayedTask(std::move(task_c), BindOnce(&RunTask));
// Run tasks that are ripe for execution on the service thread. Don't expect
- // any call to |task_target_|.
+ // any call to RunTask().
service_thread_task_runner_->RunUntilIdle();
- // Fast-forward time. Expect |task_a_raw| and |task_c_raw| to be forwarded to
+ // Fast-forward time. Expect |task_a| and |task_c| to be forwarded to
// |task_target_|.
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_a_raw));
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_c_raw));
+ EXPECT_CALL(mock_task_a, Run());
+ EXPECT_CALL(mock_task_c, Run());
service_thread_task_runner_->FastForwardBy(TimeDelta::FromHours(1));
- testing::Mock::VerifyAndClear(&task_target_);
+ testing::Mock::VerifyAndClear(&mock_task_a);
+ testing::Mock::VerifyAndClear(&mock_task_c);
- // Fast-forward time. Expect |task_b_raw| to be forwarded to |task_target_|.
- EXPECT_CALL(task_target_, DoPostTaskNowCallback(task_b_raw));
+ // Fast-forward time. Expect |task_b| to be forwarded to RunTask().
+ EXPECT_CALL(mock_task_b, Run());
service_thread_task_runner_->FastForwardBy(TimeDelta::FromHours(1));
- testing::Mock::VerifyAndClear(&task_target_);
+ testing::Mock::VerifyAndClear(&mock_task_b);
}
} // namespace internal
diff --git a/chromium/base/task_scheduler/lazy_task_runner.h b/chromium/base/task_scheduler/lazy_task_runner.h
index 6304d792f66..2d609e6bd6f 100644
--- a/chromium/base/task_scheduler/lazy_task_runner.h
+++ b/chromium/base/task_scheduler/lazy_task_runner.h
@@ -10,7 +10,7 @@
#include "base/atomicops.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
-#include "base/lazy_instance.h"
+#include "base/lazy_instance_helpers.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task_scheduler/scheduler_lock.h"
diff --git a/chromium/base/task_scheduler/priority_queue_unittest.cc b/chromium/base/task_scheduler/priority_queue_unittest.cc
index 3d89d62359b..13756c8ee95 100644
--- a/chromium/base/task_scheduler/priority_queue_unittest.cc
+++ b/chromium/base/task_scheduler/priority_queue_unittest.cc
@@ -60,27 +60,26 @@ class ThreadBeginningTransaction : public SimpleThread {
TEST(TaskSchedulerPriorityQueueTest, PushPopPeek) {
// Create test sequences.
scoped_refptr<Sequence> sequence_a(new Sequence);
- sequence_a->PushTask(std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(TaskPriority::USER_VISIBLE),
- TimeDelta()));
+ sequence_a->PushTask(Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(TaskPriority::USER_VISIBLE),
+ TimeDelta()));
SequenceSortKey sort_key_a = sequence_a->GetSortKey();
scoped_refptr<Sequence> sequence_b(new Sequence);
- sequence_b->PushTask(std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(TaskPriority::USER_BLOCKING),
- TimeDelta()));
+ sequence_b->PushTask(Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(TaskPriority::USER_BLOCKING),
+ TimeDelta()));
SequenceSortKey sort_key_b = sequence_b->GetSortKey();
scoped_refptr<Sequence> sequence_c(new Sequence);
- sequence_c->PushTask(std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(TaskPriority::USER_BLOCKING),
- TimeDelta()));
+ sequence_c->PushTask(Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(TaskPriority::USER_BLOCKING),
+ TimeDelta()));
SequenceSortKey sort_key_c = sequence_c->GetSortKey();
scoped_refptr<Sequence> sequence_d(new Sequence);
- sequence_d->PushTask(std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(TaskPriority::BACKGROUND),
- TimeDelta()));
+ sequence_d->PushTask(Task(FROM_HERE, Bind(&DoNothing),
+ TaskTraits(TaskPriority::BACKGROUND), TimeDelta()));
SequenceSortKey sort_key_d = sequence_d->GetSortKey();
// Create a PriorityQueue and a Transaction.
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
index 0ebe5590c6c..a47760cb0c3 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.cc
@@ -237,16 +237,15 @@ class SchedulerWorkerCOMDelegate : public SchedulerWorkerDelegate {
scoped_refptr<Sequence> GetWorkFromWindowsMessageQueue() {
MSG msg;
if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
- auto pump_message_task =
- std::make_unique<Task>(FROM_HERE,
- Bind(
- [](MSG msg) {
- TranslateMessage(&msg);
- DispatchMessage(&msg);
- },
- std::move(msg)),
- TaskTraits(MayBlock()), TimeDelta());
- if (task_tracker_->WillPostTask(pump_message_task.get())) {
+ Task pump_message_task(FROM_HERE,
+ Bind(
+ [](MSG msg) {
+ TranslateMessage(&msg);
+ DispatchMessage(&msg);
+ },
+ std::move(msg)),
+ TaskTraits(MayBlock()), TimeDelta());
+ if (task_tracker_->WillPostTask(pump_message_task)) {
bool was_empty =
message_pump_sequence_->PushTask(std::move(pump_message_task));
DCHECK(was_empty) << "GetWorkFromWindowsMessageQueue() does not expect "
@@ -294,14 +293,13 @@ class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
if (!g_manager_is_alive)
return false;
- auto task =
- std::make_unique<Task>(from_here, std::move(closure), traits_, delay);
- task->single_thread_task_runner_ref = this;
+ Task task(from_here, std::move(closure), traits_, delay);
+ task.single_thread_task_runner_ref = this;
- if (!outer_->task_tracker_->WillPostTask(task.get()))
+ if (!outer_->task_tracker_->WillPostTask(task))
return false;
- if (task->delayed_run_time.is_null()) {
+ if (task.delayed_run_time.is_null()) {
PostTaskNow(std::move(task));
} else {
outer_->delayed_task_manager_->AddDelayedTask(
@@ -351,7 +349,7 @@ class SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner
}
}
- void PostTaskNow(std::unique_ptr<Task> task) {
+ void PostTaskNow(Task task) {
scoped_refptr<Sequence> sequence = GetDelegate()->sequence();
// If |sequence| is null, then the thread is effectively gone (either
// shutdown or joined).
@@ -421,21 +419,19 @@ void SchedulerSingleThreadTaskRunnerManager::Start() {
scoped_refptr<SingleThreadTaskRunner>
SchedulerSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunnerWithTraits(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
- return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerDelegate>(name, traits,
+ return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerDelegate>(traits,
thread_mode);
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner>
SchedulerSingleThreadTaskRunnerManager::CreateCOMSTATaskRunnerWithTraits(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return CreateTaskRunnerWithTraitsImpl<SchedulerWorkerCOMDelegate>(
- name, traits, thread_mode);
+ traits, thread_mode);
}
#endif // defined(OS_WIN)
@@ -443,7 +439,6 @@ template <typename DelegateType>
scoped_refptr<
SchedulerSingleThreadTaskRunnerManager::SchedulerSingleThreadTaskRunner>
SchedulerSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
@@ -469,12 +464,12 @@ SchedulerSingleThreadTaskRunnerManager::CreateTaskRunnerWithTraitsImpl(
if (!worker) {
const auto& environment_params =
kEnvironmentParams[GetEnvironmentIndexForTraits(traits)];
- std::string processed_name =
- thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
- ? name + environment_params.name_suffix
- : "Shared" + name + environment_params.name_suffix;
+ std::string worker_name;
+ if (thread_mode == SingleThreadTaskRunnerThreadMode::SHARED)
+ worker_name += "Shared";
+ worker_name += environment_params.name_suffix;
worker = CreateAndRegisterSchedulerWorker<DelegateType>(
- processed_name, environment_params.priority_hint);
+ worker_name, environment_params.priority_hint);
new_worker = true;
}
started = started_;
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
index 34eb81fd42c..1153a7b0c37 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager.h
@@ -58,21 +58,19 @@ class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
void Start();
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a thread
- // named "TaskSchedulerSingleThread[Shared]" + |name| +
+ // named "TaskSchedulerSingleThread[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunnerWithTraits(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
#if defined(OS_WIN)
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a COM
- // STA thread named "TaskSchedulerSingleThreadCOMSTA[Shared]" + |name| +
+ // STA thread named "TaskSchedulerSingleThreadCOMSTA[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunnerWithTraits(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
#endif // defined(OS_WIN)
@@ -84,7 +82,6 @@ class BASE_EXPORT SchedulerSingleThreadTaskRunnerManager final {
template <typename DelegateType>
scoped_refptr<SchedulerSingleThreadTaskRunner> CreateTaskRunnerWithTraitsImpl(
- const std::string& name,
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
diff --git a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
index 345a116ee12..a47173cee7c 100644
--- a/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_single_thread_task_runner_manager_unittest.cc
@@ -65,7 +65,7 @@ class TaskSchedulerSingleThreadTaskRunnerManagerTest : public testing::Test {
std::unique_ptr<SchedulerSingleThreadTaskRunnerManager>
single_thread_task_runner_manager_;
- TaskTracker task_tracker_;
+ TaskTracker task_tracker_ = {"Test"};
private:
Thread service_thread_;
@@ -98,12 +98,12 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, DifferentThreadsUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "B", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
PlatformThreadRef thread_ref_1;
@@ -124,12 +124,12 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, SameThreadUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "B", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
PlatformThreadRef thread_ref_1;
@@ -151,12 +151,12 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "B", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_FALSE(task_runner_1->RunsTasksInCurrentSequence());
@@ -189,8 +189,7 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest,
SharedWithBaseSyncPrimitivesDCHECKs) {
EXPECT_DCHECK_DEATH({
single_thread_task_runner_manager_->CreateSingleThreadTaskRunnerWithTraits(
- "A", {WithBaseSyncPrimitives()},
- SingleThreadTaskRunnerThreadMode::SHARED);
+ {WithBaseSyncPrimitives()}, SingleThreadTaskRunnerThreadMode::SHARED);
});
}
@@ -216,12 +215,12 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
// events to determine when a task is run.
scoped_refptr<SingleThreadTaskRunner> task_runner_background =
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- "Background", {TaskPriority::BACKGROUND}, GetParam());
+ ->CreateSingleThreadTaskRunnerWithTraits({TaskPriority::BACKGROUND},
+ GetParam());
scoped_refptr<SingleThreadTaskRunner> task_runner_normal =
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- "Normal", {TaskPriority::USER_VISIBLE}, GetParam());
+ ->CreateSingleThreadTaskRunnerWithTraits({TaskPriority::USER_VISIBLE},
+ GetParam());
ThreadPriority thread_priority_background;
task_runner_background->PostTask(
@@ -246,10 +245,12 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
waitable_event_background.Wait();
waitable_event_normal.Wait();
- if (Lock::HandlesMultipleThreadPriorities())
+ if (Lock::HandlesMultipleThreadPriorities() &&
+ PlatformThread::CanIncreaseCurrentThreadPriority()) {
EXPECT_EQ(ThreadPriority::BACKGROUND, thread_priority_background);
- else
+ } else {
EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_background);
+ }
EXPECT_EQ(ThreadPriority::NORMAL, thread_priority_normal);
}
@@ -258,8 +259,7 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
TaskShutdownBehavior::BLOCK_SHUTDOWN};
scoped_refptr<SingleThreadTaskRunner> foo_task_runner =
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits("MyName", foo_traits,
- GetParam());
+ ->CreateSingleThreadTaskRunnerWithTraits(foo_traits, GetParam());
std::string foo_captured_name;
foo_task_runner->PostTask(FROM_HERE,
BindOnce(&CaptureThreadName, &foo_captured_name));
@@ -269,7 +269,7 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
TaskShutdownBehavior::BLOCK_SHUTDOWN};
scoped_refptr<SingleThreadTaskRunner> user_blocking_task_runner =
single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits("A", user_blocking_traits,
+ ->CreateSingleThreadTaskRunnerWithTraits(user_blocking_traits,
GetParam());
std::string user_blocking_captured_name;
@@ -278,7 +278,6 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
task_tracker_.Shutdown();
- EXPECT_NE(std::string::npos, foo_captured_name.find("MyName"));
EXPECT_NE(std::string::npos,
foo_captured_name.find(
kEnvironmentParams[GetEnvironmentIndexForTraits(foo_traits)]
@@ -300,9 +299,9 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, ThreadNamesSet) {
TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
PostTaskAfterShutdown) {
- auto task_runner = single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- "A", TaskTraits(), GetParam());
+ auto task_runner =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
task_tracker_.Shutdown();
EXPECT_FALSE(task_runner->PostTask(FROM_HERE, BindOnce(&ShouldNotRun)));
}
@@ -314,9 +313,9 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, PostDelayedTask) {
// Post a task with a short delay.
WaitableEvent task_ran(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
- auto task_runner = single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- "A", TaskTraits(), GetParam());
+ auto task_runner =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
EXPECT_TRUE(task_runner->PostDelayedTask(
FROM_HERE, BindOnce(&WaitableEvent::Signal, Unretained(&task_ran)),
TestTimeouts::tiny_timeout()));
@@ -336,9 +335,9 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest, PostDelayedTask) {
// but doesn't crash.
TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
PostTaskAfterDestroy) {
- auto task_runner = single_thread_task_runner_manager_
- ->CreateSingleThreadTaskRunnerWithTraits(
- "A", TaskTraits(), GetParam());
+ auto task_runner =
+ single_thread_task_runner_manager_
+ ->CreateSingleThreadTaskRunnerWithTraits(TaskTraits(), GetParam());
EXPECT_TRUE(task_runner->PostTask(FROM_HERE, BindOnce(&DoNothing)));
task_tracker_.Shutdown();
TearDownSingleThreadTaskRunnerManager();
@@ -407,7 +406,7 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest, ConcurrentJoin) {
{
auto task_runner = single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", {WithBaseSyncPrimitives()},
+ {WithBaseSyncPrimitives()},
SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_TRUE(task_runner->PostTask(
FROM_HERE,
@@ -437,7 +436,7 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerJoinTest,
{
auto task_runner = single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", {WithBaseSyncPrimitives()},
+ {WithBaseSyncPrimitives()},
SingleThreadTaskRunnerThreadMode::DEDICATED);
EXPECT_TRUE(task_runner->PostTask(
FROM_HERE,
@@ -462,7 +461,7 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
COMSTAInitialized) {
scoped_refptr<SingleThreadTaskRunner> com_task_runner =
single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN}, GetParam());
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN}, GetParam());
com_task_runner->PostTask(FROM_HERE, BindOnce(&win::AssertComApartmentType,
win::ComApartmentType::STA));
@@ -473,11 +472,11 @@ TEST_P(TaskSchedulerSingleThreadTaskRunnerManagerCommonTest,
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTest, COMSTASameThreadUsed) {
scoped_refptr<SingleThreadTaskRunner> task_runner_1 =
single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
scoped_refptr<SingleThreadTaskRunner> task_runner_2 =
single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
- "B", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::SHARED);
PlatformThreadRef thread_ref_1;
@@ -542,7 +541,7 @@ class TaskSchedulerSingleThreadTaskRunnerManagerTestWin
TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerTestWin, PumpsMessages) {
scoped_refptr<SingleThreadTaskRunner> com_task_runner =
single_thread_task_runner_manager_->CreateCOMSTATaskRunnerWithTraits(
- "A", {TaskShutdownBehavior::BLOCK_SHUTDOWN},
+ {TaskShutdownBehavior::BLOCK_SHUTDOWN},
SingleThreadTaskRunnerThreadMode::DEDICATED);
HWND hwnd = nullptr;
// HWNDs process messages on the thread that created them, so we have to
@@ -595,7 +594,7 @@ TEST_F(TaskSchedulerSingleThreadTaskRunnerManagerStartTest,
WaitableEvent::InitialState::NOT_SIGNALED);
single_thread_task_runner_manager_
->CreateSingleThreadTaskRunnerWithTraits(
- "A", TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
+ TaskTraits(), SingleThreadTaskRunnerThreadMode::DEDICATED)
->PostTask(
FROM_HERE,
BindOnce(
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.cc b/chromium/base/task_scheduler/scheduler_worker_pool.cc
index 1063b40b01f..ee3c232cc40 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.cc
@@ -62,7 +62,7 @@ class SchedulerParallelTaskRunner : public TaskRunner {
// Post the task as part of a one-off single-task Sequence.
return worker_pool_->PostTaskWithSequence(
- std::make_unique<Task>(from_here, std::move(closure), traits_, delay),
+ Task(from_here, std::move(closure), traits_, delay),
MakeRefCounted<Sequence>());
}
@@ -98,9 +98,8 @@ class SchedulerSequencedTaskRunner : public SequencedTaskRunner {
if (!g_active_pools_count)
return false;
- std::unique_ptr<Task> task =
- std::make_unique<Task>(from_here, std::move(closure), traits_, delay);
- task->sequenced_task_runner_ref = this;
+ Task task(from_here, std::move(closure), traits_, delay);
+ task.sequenced_task_runner_ref = this;
// Post the task as part of |sequence_|.
return worker_pool_->PostTaskWithSequence(std::move(task), sequence_);
@@ -141,29 +140,28 @@ SchedulerWorkerPool::CreateSequencedTaskRunnerWithTraits(
}
bool SchedulerWorkerPool::PostTaskWithSequence(
- std::unique_ptr<Task> task,
+ Task task,
scoped_refptr<Sequence> sequence) {
- DCHECK(task);
+ DCHECK(task.task);
DCHECK(sequence);
- if (!task_tracker_->WillPostTask(task.get()))
+ if (!task_tracker_->WillPostTask(task))
return false;
- if (task->delayed_run_time.is_null()) {
+ if (task.delayed_run_time.is_null()) {
PostTaskWithSequenceNow(std::move(task), std::move(sequence));
} else {
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
// for details.
- CHECK(task->task);
+ CHECK(task.task);
delayed_task_manager_->AddDelayedTask(
- std::move(task),
- BindOnce(
- [](scoped_refptr<Sequence> sequence,
- SchedulerWorkerPool* worker_pool, std::unique_ptr<Task> task) {
- worker_pool->PostTaskWithSequenceNow(std::move(task),
- std::move(sequence));
- },
- std::move(sequence), Unretained(this)));
+ std::move(task), BindOnce(
+ [](scoped_refptr<Sequence> sequence,
+ SchedulerWorkerPool* worker_pool, Task task) {
+ worker_pool->PostTaskWithSequenceNow(
+ std::move(task), std::move(sequence));
+ },
+ std::move(sequence), Unretained(this)));
}
return true;
@@ -194,14 +192,14 @@ void SchedulerWorkerPool::UnbindFromCurrentThread() {
}
void SchedulerWorkerPool::PostTaskWithSequenceNow(
- std::unique_ptr<Task> task,
+ Task task,
scoped_refptr<Sequence> sequence) {
- DCHECK(task);
+ DCHECK(task.task);
DCHECK(sequence);
// Confirm that |task| is ready to run (its delayed run time is either null or
// in the past).
- DCHECK_LE(task->delayed_run_time, TimeTicks::Now());
+ DCHECK_LE(task.delayed_run_time, TimeTicks::Now());
const bool sequence_was_empty = sequence->PushTask(std::move(task));
if (sequence_was_empty) {
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool.h b/chromium/base/task_scheduler/scheduler_worker_pool.h
index 9aa237d40b2..1b80d04b86a 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool.h
@@ -5,8 +5,6 @@
#ifndef BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
#define BASE_TASK_SCHEDULER_SCHEDULER_WORKER_POOL_H_
-#include <memory>
-
#include "base/base_export.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
@@ -42,8 +40,7 @@ class BASE_EXPORT SchedulerWorkerPool : public CanScheduleSequenceObserver {
// Posts |task| to be executed by this SchedulerWorkerPool as part of
// |sequence|. |task| won't be executed before its delayed run time, if any.
// Returns true if |task| is posted.
- bool PostTaskWithSequence(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence);
+ bool PostTaskWithSequence(Task task, scoped_refptr<Sequence> sequence);
// Registers the worker pool in TLS.
void BindToCurrentThread();
@@ -66,8 +63,7 @@ class BASE_EXPORT SchedulerWorkerPool : public CanScheduleSequenceObserver {
// Posts |task| to be executed by this SchedulerWorkerPool as part of
// |sequence|. This must only be called after |task| has gone through
// PostTaskWithSequence() and after |task|'s delayed run time.
- void PostTaskWithSequenceNow(std::unique_ptr<Task> task,
- scoped_refptr<Sequence> sequence);
+ void PostTaskWithSequenceNow(Task task, scoped_refptr<Sequence> sequence);
TaskTracker* const task_tracker_;
DelayedTaskManager* const delayed_task_manager_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
index 5e510b12aaf..fb10e392cde 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.cc
@@ -16,6 +16,7 @@
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/sequence_token.h"
+#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
#include "base/task_scheduler/task_tracker.h"
@@ -153,12 +154,13 @@ class SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl
};
SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
- const std::string& name,
+ StringPiece histogram_label,
+ StringPiece pool_label,
ThreadPriority priority_hint,
TaskTracker* task_tracker,
DelayedTaskManager* delayed_task_manager)
: SchedulerWorkerPool(task_tracker, delayed_task_manager),
- name_(name),
+ pool_label_(pool_label.as_string()),
priority_hint_(priority_hint),
lock_(shared_priority_queue_.container_lock()),
idle_workers_stack_cv_for_testing_(lock_.CreateConditionVariable()),
@@ -166,7 +168,9 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
WaitableEvent::InitialState::NOT_SIGNALED),
// Mimics the UMA_HISTOGRAM_LONG_TIMES macro.
detach_duration_histogram_(Histogram::FactoryTimeGet(
- kDetachDurationHistogramPrefix + name_ + kPoolNameSuffix,
+ JoinString({kDetachDurationHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1),
50,
@@ -175,7 +179,9 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
// than 1000 tasks before detaching, there is no need to know the exact
// number of tasks that ran.
num_tasks_before_detach_histogram_(Histogram::FactoryGet(
- kNumTasksBeforeDetachHistogramPrefix + name_ + kPoolNameSuffix,
+ JoinString({kNumTasksBeforeDetachHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
1,
1000,
50,
@@ -185,11 +191,16 @@ SchedulerWorkerPoolImpl::SchedulerWorkerPoolImpl(
// When it runs more than 100 tasks, there is no need to know the exact
// number of tasks that ran.
num_tasks_between_waits_histogram_(Histogram::FactoryGet(
- kNumTasksBetweenWaitsHistogramPrefix + name_ + kPoolNameSuffix,
+ JoinString({kNumTasksBetweenWaitsHistogramPrefix, histogram_label,
+ kPoolNameSuffix},
+ ""),
1,
100,
50,
- HistogramBase::kUmaTargetedHistogramFlag)) {}
+ HistogramBase::kUmaTargetedHistogramFlag)) {
+ DCHECK(!histogram_label.empty());
+ DCHECK(!pool_label_.empty());
+}
void SchedulerWorkerPoolImpl::Start(
const SchedulerWorkerPoolParams& params,
@@ -366,7 +377,7 @@ void SchedulerWorkerPoolImpl::SchedulerWorkerDelegateImpl::OnMainEntry(
DCHECK_EQ(num_tasks_since_last_wait_, 0U);
PlatformThread::SetName(
- StringPrintf("TaskScheduler%sWorker", outer_->name_.c_str()));
+ StringPrintf("TaskScheduler%sWorker", outer_->pool_label_.c_str()));
outer_->BindToCurrentThread();
SetBlockingObserverForCurrentThread(this);
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
index 49a2378585f..7a3d3140227 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl.h
@@ -16,6 +16,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/strings/string_piece.h"
#include "base/synchronization/atomic_flag.h"
#include "base/synchronization/condition_variable.h"
#include "base/task_runner.h"
@@ -58,17 +59,18 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
// Constructs a pool without workers.
//
- // |name| is used to label the pool's threads ("TaskScheduler" + |name| +
- // index) and histograms ("TaskScheduler." + histogram name + "." + |name| +
- // extra suffixes). |priority_hint| is the preferred thread priority; the
- // actual thread priority depends on shutdown state and platform capabilities.
+ // |histogram_label| is used to label the pool's histograms ("TaskScheduler."
+ // + histogram_name + "." + |histogram_label| + extra suffixes), it must not
+ // be empty. |pool_label| is used to label the pool's threads, it must not be
+ // empty. |priority_hint| is the preferred thread priority; the actual thread
+ // priority depends on shutdown state and platform capabilities.
// |task_tracker| keeps track of tasks. |delayed_task_manager| handles tasks
// posted with a delay.
- SchedulerWorkerPoolImpl(
- const std::string& name,
- ThreadPriority priority_hint,
- TaskTracker* task_tracker,
- DelayedTaskManager* delayed_task_manager);
+ SchedulerWorkerPoolImpl(StringPiece histogram_label,
+ StringPiece pool_label,
+ ThreadPriority priority_hint,
+ TaskTracker* task_tracker,
+ DelayedTaskManager* delayed_task_manager);
// Creates workers following the |params| specification, allowing existing and
// future tasks to run. Uses |service_thread_task_runner| to monitor for
@@ -204,7 +206,7 @@ class BASE_EXPORT SchedulerWorkerPoolImpl : public SchedulerWorkerPool {
void DecrementWorkerCapacityLockRequired();
void IncrementWorkerCapacityLockRequired();
- const std::string name_;
+ const std::string pool_label_;
const ThreadPriority priority_hint_;
// PriorityQueue from which all threads of this worker pool get work.
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
index cc80d605c4c..43624a4cab9 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_impl_unittest.cc
@@ -85,7 +85,7 @@ class TaskSchedulerWorkerPoolImplTestBase {
service_thread_.Start();
delayed_task_manager_.Start(service_thread_.task_runner());
worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>(
- "TestWorkerPool", ThreadPriority::NORMAL, &task_tracker_,
+ "TestWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker_,
&delayed_task_manager_);
ASSERT_TRUE(worker_pool_);
}
@@ -107,7 +107,7 @@ class TaskSchedulerWorkerPoolImplTestBase {
std::unique_ptr<SchedulerWorkerPoolImpl> worker_pool_;
- TaskTracker task_tracker_;
+ TaskTracker task_tracker_ = {"Test"};
Thread service_thread_;
private:
@@ -779,13 +779,13 @@ TEST_F(TaskSchedulerWorkerPoolHistogramTest, NumTasksBeforeCleanup) {
}
TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
DelayedTaskManager delayed_task_manager;
scoped_refptr<TaskRunner> service_thread_task_runner =
MakeRefCounted<TestSimpleTaskRunner>();
delayed_task_manager.Start(service_thread_task_runner);
auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
- "OnePolicyWorkerPool", ThreadPriority::NORMAL, &task_tracker,
+ "OnePolicyWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker,
&delayed_task_manager);
worker_pool->Start(SchedulerWorkerPoolParams(8U, TimeDelta::Max()),
service_thread_task_runner,
@@ -800,13 +800,13 @@ TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, InitOne) {
TEST(TaskSchedulerWorkerPoolStandbyPolicyTest, VerifyStandbyThread) {
constexpr size_t worker_capacity = 3;
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
DelayedTaskManager delayed_task_manager;
scoped_refptr<TaskRunner> service_thread_task_runner =
MakeRefCounted<TestSimpleTaskRunner>();
delayed_task_manager.Start(service_thread_task_runner);
auto worker_pool = std::make_unique<SchedulerWorkerPoolImpl>(
- "StandbyThreadWorkerPool", ThreadPriority::NORMAL, &task_tracker,
+ "StandbyThreadWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker,
&delayed_task_manager);
worker_pool->Start(
SchedulerWorkerPoolParams(worker_capacity, kReclaimTimeForCleanupTests),
@@ -968,19 +968,16 @@ class TaskSchedulerWorkerPoolBlockingTest
TestTimeouts::tiny_timeout();
}
- // Waits up to some amount of time until |worker_pool_|'s worker capacity
- // reaches |expected_worker_capacity|.
- void ExpectWorkerCapacityAfterDelay(size_t expected_worker_capacity) {
- constexpr int kMaxAttempts = 4;
- for (int i = 0;
- i < kMaxAttempts && worker_pool_->GetWorkerCapacityForTesting() !=
- expected_worker_capacity;
- ++i) {
+ // Waits indefinitely, until |worker_pool_|'s worker capacity increases to
+ // |expected_worker_capacity|.
+ void ExpectWorkerCapacityIncreasesTo(size_t expected_worker_capacity) {
+ size_t capacity = worker_pool_->GetWorkerCapacityForTesting();
+ while (capacity != expected_worker_capacity) {
PlatformThread::Sleep(GetWorkerCapacityChangeSleepTime());
+ size_t new_capacity = worker_pool_->GetWorkerCapacityForTesting();
+ ASSERT_GE(new_capacity, capacity);
+ capacity = new_capacity;
}
-
- EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
- expected_worker_capacity);
}
// Unblocks tasks posted by SaturateWithBlockingTasks().
@@ -1004,7 +1001,7 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, ThreadBlockedUnblocked) {
SaturateWithBlockingTasks(GetParam());
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool);
+ ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
// A range of possible number of workers is accepted because of
// crbug.com/757897.
EXPECT_GE(worker_pool_->NumberOfWorkersForTesting(),
@@ -1100,7 +1097,7 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, PostBeforeBlocking) {
// tasks we just posted.
thread_can_block.Signal();
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool);
+ ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
// Should not block forever.
extra_thread_running.Wait();
@@ -1119,7 +1116,7 @@ TEST_P(TaskSchedulerWorkerPoolBlockingTest, WorkersIdleWhenOverCapacity) {
SaturateWithBlockingTasks(GetParam());
if (GetParam().behaves_as == BlockingType::MAY_BLOCK)
- ExpectWorkerCapacityAfterDelay(2 * kNumWorkersInWorkerPool);
+ ExpectWorkerCapacityIncreasesTo(2 * kNumWorkersInWorkerPool);
EXPECT_EQ(worker_pool_->GetWorkerCapacityForTesting(),
2 * kNumWorkersInWorkerPool);
// A range of possible number of workers is accepted because of
@@ -1303,7 +1300,7 @@ TEST_F(TaskSchedulerWorkerPoolBlockingTest,
Unretained(&did_instantiate_will_block), Unretained(&can_return)));
// After a short delay, worker capacity should be incremented.
- ExpectWorkerCapacityAfterDelay(kNumWorkersInWorkerPool + 1);
+ ExpectWorkerCapacityIncreasesTo(kNumWorkersInWorkerPool + 1);
// Wait until the task instantiates a WILL_BLOCK ScopedBlockingCall.
can_instantiate_will_block.Signal();
@@ -1325,12 +1322,12 @@ TEST_F(TaskSchedulerWorkerPoolBlockingTest,
TEST(TaskSchedulerWorkerPoolOverWorkerCapacityTest, VerifyCleanup) {
constexpr size_t kWorkerCapacity = 3;
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
DelayedTaskManager delayed_task_manager;
scoped_refptr<TaskRunner> service_thread_task_runner =
MakeRefCounted<TestSimpleTaskRunner>();
delayed_task_manager.Start(service_thread_task_runner);
- SchedulerWorkerPoolImpl worker_pool("OverWorkerCapacityTestWorkerPool",
+ SchedulerWorkerPoolImpl worker_pool("OverWorkerCapacityTestWorkerPool", "A",
ThreadPriority::NORMAL, &task_tracker,
&delayed_task_manager);
worker_pool.Start(
diff --git a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
index 818af0dde78..053abf7bb94 100644
--- a/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_pool_unittest.cc
@@ -111,7 +111,7 @@ class TaskSchedulerWorkerPoolTest
switch (GetParam().pool_type) {
case PoolType::GENERIC:
worker_pool_ = std::make_unique<SchedulerWorkerPoolImpl>(
- "TestWorkerPool", ThreadPriority::NORMAL, &task_tracker_,
+ "TestWorkerPool", "A", ThreadPriority::NORMAL, &task_tracker_,
&delayed_task_manager_);
break;
#if defined(OS_WIN)
@@ -150,7 +150,7 @@ class TaskSchedulerWorkerPoolTest
std::unique_ptr<SchedulerWorkerPool> worker_pool_;
- TaskTracker task_tracker_;
+ TaskTracker task_tracker_ = {"Test"};
Thread service_thread_;
DelayedTaskManager delayed_task_manager_;
diff --git a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
index ededad0f060..204320297af 100644
--- a/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_stack_unittest.cc
@@ -62,7 +62,7 @@ class TaskSchedulerWorkerStackTest : public testing::Test {
scoped_refptr<SchedulerWorker> worker_c_;
private:
- TaskTracker task_tracker_;
+ TaskTracker task_tracker_ = {"Test"};
};
} // namespace
diff --git a/chromium/base/task_scheduler/scheduler_worker_unittest.cc b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
index 21b482dcfb8..a890621ca46 100644
--- a/chromium/base/task_scheduler/scheduler_worker_unittest.cc
+++ b/chromium/base/task_scheduler/scheduler_worker_unittest.cc
@@ -176,12 +176,11 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
// Create a Sequence with TasksPerSequence() Tasks.
scoped_refptr<Sequence> sequence(new Sequence);
for (size_t i = 0; i < outer_->TasksPerSequence(); ++i) {
- std::unique_ptr<Task> task(
- new Task(FROM_HERE,
- BindOnce(&TaskSchedulerWorkerTest::RunTaskCallback,
- Unretained(outer_)),
- TaskTraits(), TimeDelta()));
- EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task.get()));
+ Task task(FROM_HERE,
+ BindOnce(&TaskSchedulerWorkerTest::RunTaskCallback,
+ Unretained(outer_)),
+ TaskTraits(), TimeDelta());
+ EXPECT_TRUE(outer_->task_tracker_.WillPostTask(task));
sequence->PushTask(std::move(task));
}
@@ -256,7 +255,7 @@ class TaskSchedulerWorkerTest : public testing::TestWithParam<size_t> {
EXPECT_LE(num_run_tasks_, created_sequences_.size());
}
- TaskTracker task_tracker_;
+ TaskTracker task_tracker_ = {"Test"};
// Synchronizes access to all members below.
mutable SchedulerLock lock_;
@@ -446,7 +445,7 @@ class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
controls_->work_requested_ = true;
scoped_refptr<Sequence> sequence(new Sequence);
- std::unique_ptr<Task> task(new Task(
+ Task task(
FROM_HERE,
BindOnce(
[](WaitableEvent* work_processed, WaitableEvent* work_running) {
@@ -456,8 +455,8 @@ class ControllableCleanupDelegate : public SchedulerWorkerDefaultDelegate {
Unretained(&controls_->work_processed_),
Unretained(&controls_->work_running_)),
{WithBaseSyncPrimitives(), TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN},
- TimeDelta()));
- EXPECT_TRUE(task_tracker_->WillPostTask(task.get()));
+ TimeDelta());
+ EXPECT_TRUE(task_tracker_->WillPostTask(task));
sequence->PushTask(std::move(task));
sequence =
task_tracker_->WillScheduleSequence(std::move(sequence), nullptr);
@@ -513,7 +512,7 @@ class MockedControllableCleanupDelegate : public ControllableCleanupDelegate {
// Verify that calling SchedulerWorker::Cleanup() from GetWork() causes
// the SchedulerWorker's thread to exit.
TEST(TaskSchedulerWorkerTest, WorkerCleanupFromGetWork) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
MockedControllableCleanupDelegate* delegate =
new StrictMock<MockedControllableCleanupDelegate>(&task_tracker);
@@ -531,7 +530,7 @@ TEST(TaskSchedulerWorkerTest, WorkerCleanupFromGetWork) {
}
TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWork) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
// No mock here as that's reasonably covered by other tests and the delegate
// may destroy on a different thread. Mocks aren't designed with that in mind.
@@ -555,7 +554,7 @@ TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWork) {
}
TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWait) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
// No mock here as that's reasonably covered by other tests and the delegate
// may destroy on a different thread. Mocks aren't designed with that in mind.
@@ -576,7 +575,7 @@ TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringWait) {
}
TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringShutdown) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
// No mock here as that's reasonably covered by other tests and the delegate
// may destroy on a different thread. Mocks aren't designed with that in mind.
@@ -602,7 +601,7 @@ TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringShutdown) {
// Verify that Start() is a no-op after Cleanup().
TEST(TaskSchedulerWorkerTest, CleanupBeforeStart) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
// No mock here as that's reasonably covered by other tests and the delegate
// may destroy on a different thread. Mocks aren't designed with that in mind.
@@ -649,7 +648,7 @@ class CallJoinFromDifferentThread : public SimpleThread {
} // namespace
TEST(TaskSchedulerWorkerTest, WorkerCleanupDuringJoin) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
// Will be owned by SchedulerWorker.
// No mock here as that's reasonably covered by other tests and the
// delegate may destroy on a different thread. Mocks aren't designed with that
@@ -730,7 +729,7 @@ class ExpectThreadPriorityDelegate : public SchedulerWorkerDefaultDelegate {
} // namespace
TEST(TaskSchedulerWorkerTest, BumpPriorityOfAliveThreadDuringShutdown) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
std::unique_ptr<ExpectThreadPriorityDelegate> delegate(
new ExpectThreadPriorityDelegate);
@@ -794,7 +793,7 @@ class CoInitializeDelegate : public SchedulerWorkerDefaultDelegate {
} // namespace
TEST(TaskSchedulerWorkerTest, BackwardCompatibilityEnabled) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
auto delegate = std::make_unique<CoInitializeDelegate>();
CoInitializeDelegate* const delegate_raw = delegate.get();
@@ -821,7 +820,7 @@ TEST(TaskSchedulerWorkerTest, BackwardCompatibilityEnabled) {
}
TEST(TaskSchedulerWorkerTest, BackwardCompatibilityDisabled) {
- TaskTracker task_tracker;
+ TaskTracker task_tracker("Test");
auto delegate = std::make_unique<CoInitializeDelegate>();
CoInitializeDelegate* const delegate_raw = delegate.get();
diff --git a/chromium/base/task_scheduler/sequence.cc b/chromium/base/task_scheduler/sequence.cc
index 4db5478fe4a..4737f8eab10 100644
--- a/chromium/base/task_scheduler/sequence.cc
+++ b/chromium/base/task_scheduler/sequence.cc
@@ -14,30 +14,27 @@ namespace internal {
Sequence::Sequence() = default;
-bool Sequence::PushTask(std::unique_ptr<Task> task) {
- DCHECK(task);
-
+bool Sequence::PushTask(Task task) {
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
// for details.
- CHECK(task->task);
- DCHECK(task->sequenced_time.is_null());
- task->sequenced_time = base::TimeTicks::Now();
+ CHECK(task.task);
+ DCHECK(task.sequenced_time.is_null());
+ task.sequenced_time = base::TimeTicks::Now();
AutoSchedulerLock auto_lock(lock_);
- ++num_tasks_per_priority_[static_cast<int>(task->traits.priority())];
+ ++num_tasks_per_priority_[static_cast<int>(task.traits.priority())];
queue_.push(std::move(task));
// Return true if the sequence was empty before the push.
return queue_.size() == 1;
}
-std::unique_ptr<Task> Sequence::TakeTask() {
+Optional<Task> Sequence::TakeTask() {
AutoSchedulerLock auto_lock(lock_);
DCHECK(!queue_.empty());
- DCHECK(queue_.front());
+ DCHECK(queue_.front().task);
- const int priority_index =
- static_cast<int>(queue_.front()->traits.priority());
+ const int priority_index = static_cast<int>(queue_.front().traits.priority());
DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
--num_tasks_per_priority_[priority_index];
@@ -47,14 +44,14 @@ std::unique_ptr<Task> Sequence::TakeTask() {
TaskTraits Sequence::PeekTaskTraits() const {
AutoSchedulerLock auto_lock(lock_);
DCHECK(!queue_.empty());
- DCHECK(queue_.front());
- return queue_.front()->traits;
+ DCHECK(queue_.front().task);
+ return queue_.front().traits;
}
bool Sequence::Pop() {
AutoSchedulerLock auto_lock(lock_);
DCHECK(!queue_.empty());
- DCHECK(!queue_.front());
+ DCHECK(!queue_.front().task);
queue_.pop();
return queue_.empty();
}
@@ -78,7 +75,7 @@ SequenceSortKey Sequence::GetSortKey() const {
}
// Save the sequenced time of the next task in the sequence.
- next_task_sequenced_time = queue_.front()->sequenced_time;
+ next_task_sequenced_time = queue_.front().sequenced_time;
}
return SequenceSortKey(priority, next_task_sequenced_time);
diff --git a/chromium/base/task_scheduler/sequence.h b/chromium/base/task_scheduler/sequence.h
index feec70a5fe2..ec5e8c15092 100644
--- a/chromium/base/task_scheduler/sequence.h
+++ b/chromium/base/task_scheduler/sequence.h
@@ -7,12 +7,11 @@
#include <stddef.h>
-#include <memory>
-
#include "base/base_export.h"
#include "base/containers/queue.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/optional.h"
#include "base/sequence_token.h"
#include "base/task_scheduler/scheduler_lock.h"
#include "base/task_scheduler/sequence_sort_key.h"
@@ -47,13 +46,18 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
// Adds |task| in a new slot at the end of the Sequence. Returns true if the
// Sequence was empty before this operation.
- bool PushTask(std::unique_ptr<Task> task);
+ bool PushTask(Task task);
// Transfers ownership of the Task in the front slot of the Sequence to the
// caller. The front slot of the Sequence will be nullptr and remain until
// Pop(). Cannot be called on an empty Sequence or a Sequence whose front slot
// is already nullptr.
- std::unique_ptr<Task> TakeTask();
+ //
+ // Because this method cannot be called on an empty Sequence, the returned
+ // Optional<Task> is never nullptr. An Optional is used in preparation for the
+ // merge between TaskScheduler and TaskQueueManager (in Blink).
+ // https://crbug.com/783309
+ Optional<Task> TakeTask();
// Returns the TaskTraits of the Task in front of the Sequence. Cannot be
// called on an empty Sequence or on a Sequence whose front slot is empty.
@@ -85,7 +89,7 @@ class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
mutable SchedulerLock lock_;
// Queue of tasks to execute.
- base::queue<std::unique_ptr<Task>> queue_;
+ base::queue<Task> queue_;
// Number of tasks contained in the Sequence for each priority.
size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
diff --git a/chromium/base/task_scheduler/sequence_unittest.cc b/chromium/base/task_scheduler/sequence_unittest.cc
index bd4959a0991..aa634a0c46e 100644
--- a/chromium/base/task_scheduler/sequence_unittest.cc
+++ b/chromium/base/task_scheduler/sequence_unittest.cc
@@ -8,10 +8,10 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
-#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/test/gtest_util.h"
#include "base/time/time.h"
+#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -19,201 +19,154 @@ namespace internal {
namespace {
-class TaskSchedulerSequenceTest : public testing::Test {
+class MockTask {
public:
- TaskSchedulerSequenceTest()
- : task_a_owned_(new Task(FROM_HERE,
- BindOnce(&DoNothing),
- {TaskPriority::BACKGROUND},
- TimeDelta())),
- task_b_owned_(new Task(FROM_HERE,
- BindOnce(&DoNothing),
- {TaskPriority::USER_VISIBLE},
- TimeDelta())),
- task_c_owned_(new Task(FROM_HERE,
- BindOnce(&DoNothing),
- {TaskPriority::USER_BLOCKING},
- TimeDelta())),
- task_d_owned_(new Task(FROM_HERE,
- BindOnce(&DoNothing),
- {TaskPriority::USER_BLOCKING},
- TimeDelta())),
- task_e_owned_(new Task(FROM_HERE,
- BindOnce(&DoNothing),
- {TaskPriority::BACKGROUND},
- TimeDelta())),
- task_a_(task_a_owned_.get()),
- task_b_(task_b_owned_.get()),
- task_c_(task_c_owned_.get()),
- task_d_(task_d_owned_.get()),
- task_e_(task_e_owned_.get()) {}
-
- protected:
- // Tasks to be handed off to a Sequence for testing.
- std::unique_ptr<Task> task_a_owned_;
- std::unique_ptr<Task> task_b_owned_;
- std::unique_ptr<Task> task_c_owned_;
- std::unique_ptr<Task> task_d_owned_;
- std::unique_ptr<Task> task_e_owned_;
-
- // Raw pointers to those same tasks for verification. This is needed because
- // the unique_ptrs above no longer point to the tasks once they have been
- // moved into a Sequence.
- const Task* task_a_;
- const Task* task_b_;
- const Task* task_c_;
- const Task* task_d_;
- const Task* task_e_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSequenceTest);
+ MOCK_METHOD0(Run, void());
};
-} // namespace
+Task CreateTask(MockTask* mock_task) {
+ return Task(FROM_HERE, BindOnce(&MockTask::Run, Unretained(mock_task)),
+ {TaskPriority::BACKGROUND}, TimeDelta());
+}
+
+void ExpectMockTask(MockTask* mock_task, Task* task) {
+ EXPECT_CALL(*mock_task, Run());
+ std::move(task->task).Run();
+ testing::Mock::VerifyAndClear(mock_task);
+}
-TEST_F(TaskSchedulerSequenceTest, PushTakeRemove) {
- scoped_refptr<Sequence> sequence(new Sequence);
+} // namespace
- // Push task A in the sequence. Its sequenced time should be updated and it
- // should be in front of the sequence.
- EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
- EXPECT_FALSE(task_a_->sequenced_time.is_null());
- EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+TEST(TaskSchedulerSequenceTest, PushTakeRemove) {
+ testing::StrictMock<MockTask> mock_task_a;
+ testing::StrictMock<MockTask> mock_task_b;
+ testing::StrictMock<MockTask> mock_task_c;
+ testing::StrictMock<MockTask> mock_task_d;
+ testing::StrictMock<MockTask> mock_task_e;
- // Push task B, C and D in the sequence. Their sequenced time should be
- // updated and task A should always remain in front of the sequence.
- EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
- EXPECT_FALSE(task_b_->sequenced_time.is_null());
- EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
- EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
- EXPECT_FALSE(task_c_->sequenced_time.is_null());
- EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+ // Push task A in the sequence. PushTask() should return true since it's the
+ // first task->
+ EXPECT_TRUE(sequence->PushTask(CreateTask(&mock_task_a)));
- EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
- EXPECT_FALSE(task_d_->sequenced_time.is_null());
- EXPECT_EQ(task_a_->traits.priority(), sequence->PeekTaskTraits().priority());
+ // Push task B, C and D in the sequence. PushTask() should return false since
+ // there is already a task in a sequence.
+ EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_b)));
+ EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_c)));
+ EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_d)));
- // Get the task in front of the sequence. It should be task A.
- EXPECT_EQ(task_a_, sequence->TakeTask().get());
+ // Take the task in front of the sequence. It should be task A.
+ Optional<Task> task = sequence->TakeTask();
+ ExpectMockTask(&mock_task_a, &task.value());
+ EXPECT_FALSE(task->sequenced_time.is_null());
// Remove the empty slot. Task B should now be in front.
EXPECT_FALSE(sequence->Pop());
- EXPECT_EQ(task_b_, sequence->TakeTask().get());
+ task = sequence->TakeTask();
+ ExpectMockTask(&mock_task_b, &task.value());
+ EXPECT_FALSE(task->sequenced_time.is_null());
// Remove the empty slot. Task C should now be in front.
EXPECT_FALSE(sequence->Pop());
- EXPECT_EQ(task_c_, sequence->TakeTask().get());
+ task = sequence->TakeTask();
+ ExpectMockTask(&mock_task_c, &task.value());
+ EXPECT_FALSE(task->sequenced_time.is_null());
- // Remove the empty slot. Task D should now be in front.
+ // Remove the empty slot.
EXPECT_FALSE(sequence->Pop());
- EXPECT_EQ(task_d_, sequence->TakeTask().get());
- // Push task E in the sequence. Its sequenced time should be updated.
- EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
- EXPECT_FALSE(task_e_->sequenced_time.is_null());
+ // Push task E in the sequence.
+ EXPECT_FALSE(sequence->PushTask(CreateTask(&mock_task_e)));
+
+ // Task D should be in front.
+ task = sequence->TakeTask();
+ ExpectMockTask(&mock_task_d, &task.value());
+ EXPECT_FALSE(task->sequenced_time.is_null());
// Remove the empty slot. Task E should now be in front.
EXPECT_FALSE(sequence->Pop());
- EXPECT_EQ(task_e_, sequence->TakeTask().get());
+ task = sequence->TakeTask();
+ ExpectMockTask(&mock_task_e, &task.value());
+ EXPECT_FALSE(task->sequenced_time.is_null());
// Remove the empty slot. The sequence should now be empty.
EXPECT_TRUE(sequence->Pop());
}
-TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
- scoped_refptr<Sequence> sequence(new Sequence);
-
- // Push task A in the sequence. The highest priority is from task A
- // (BACKGROUND). Task A is in front of the sequence.
- sequence->PushTask(std::move(task_a_owned_));
- EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time),
- sequence->GetSortKey());
-
- // Push task B in the sequence. The highest priority is from task B
- // (USER_VISIBLE). Task A is still in front of the sequence.
- sequence->PushTask(std::move(task_b_owned_));
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time),
- sequence->GetSortKey());
-
- // Push task C in the sequence. The highest priority is from task C
- // (USER_BLOCKING). Task A is still in front of the sequence.
- sequence->PushTask(std::move(task_c_owned_));
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
- sequence->GetSortKey());
-
- // Push task D in the sequence. The highest priority is from tasks C/D
- // (USER_BLOCKING). Task A is still in front of the sequence.
- sequence->PushTask(std::move(task_d_owned_));
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time),
- sequence->GetSortKey());
-
- // Pop task A. The highest priority is still USER_BLOCKING. The task in front
- // of the sequence is now task B.
- sequence->TakeTask();
- sequence->Pop();
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time),
- sequence->GetSortKey());
-
- // Pop task B. The highest priority is still USER_BLOCKING. The task in front
- // of the sequence is now task C.
- sequence->TakeTask();
- sequence->Pop();
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time),
- sequence->GetSortKey());
-
- // Pop task C. The highest priority is still USER_BLOCKING. The task in front
- // of the sequence is now task D.
- sequence->TakeTask();
- sequence->Pop();
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
- sequence->GetSortKey());
-
- // Push task E in the sequence. The highest priority is still USER_BLOCKING.
- // The task in front of the sequence is still task D.
- sequence->PushTask(std::move(task_e_owned_));
- EXPECT_EQ(
- SequenceSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time),
- sequence->GetSortKey());
-
- // Pop task D. The highest priority is now from task E (BACKGROUND). The
- // task in front of the sequence is now task E.
- sequence->TakeTask();
- sequence->Pop();
- EXPECT_EQ(SequenceSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time),
- sequence->GetSortKey());
+// Verifies the sort key of a sequence that contains one BACKGROUND task.
+TEST(TaskSchedulerSequenceTest, GetSortKeyBackground) {
+ // Create a sequence with a BACKGROUND task.
+ Task background_task(FROM_HERE, BindOnce(&DoNothing),
+ {TaskPriority::BACKGROUND}, TimeDelta());
+ scoped_refptr<Sequence> background_sequence = MakeRefCounted<Sequence>();
+ background_sequence->PushTask(std::move(background_task));
+
+ // Get the sort key.
+ const SequenceSortKey background_sort_key = background_sequence->GetSortKey();
+
+ // Take the task from the sequence, so that its sequenced time is available
+ // for the check below.
+ auto take_background_task = background_sequence->TakeTask();
+
+ // Verify the sort key.
+ EXPECT_EQ(TaskPriority::BACKGROUND, background_sort_key.priority());
+ EXPECT_EQ(take_background_task->sequenced_time,
+ background_sort_key.next_task_sequenced_time());
+
+ // Pop for correctness.
+ background_sequence->Pop();
+}
+
+// Same as TaskSchedulerSequenceTest.GetSortKeyBackground, but with a
+// USER_VISIBLE task.
+TEST(TaskSchedulerSequenceTest, GetSortKeyForeground) {
+ // Create a sequence with a USER_VISIBLE task.
+ Task foreground_task(FROM_HERE, BindOnce(&DoNothing),
+ {TaskPriority::USER_VISIBLE}, TimeDelta());
+ scoped_refptr<Sequence> foreground_sequence = MakeRefCounted<Sequence>();
+ foreground_sequence->PushTask(std::move(foreground_task));
+
+ // Get the sort key.
+ const SequenceSortKey foreground_sort_key = foreground_sequence->GetSortKey();
+
+ // Take the task from the sequence, so that its sequenced time is available
+ // for the check below.
+ auto take_foreground_task = foreground_sequence->TakeTask();
+
+ // Verify the sort key.
+ EXPECT_EQ(TaskPriority::USER_VISIBLE, foreground_sort_key.priority());
+ EXPECT_EQ(take_foreground_task->sequenced_time,
+ foreground_sort_key.next_task_sequenced_time());
+
+ // Pop for correctness.
+ foreground_sequence->Pop();
}
// Verify that a DCHECK fires if Pop() is called on a sequence whose front slot
// isn't empty.
-TEST_F(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
- scoped_refptr<Sequence> sequence(new Sequence);
- sequence->PushTask(std::make_unique<Task>(FROM_HERE, Bind(&DoNothing),
- TaskTraits(), TimeDelta()));
+TEST(TaskSchedulerSequenceTest, PopNonEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+ sequence->PushTask(
+ Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
EXPECT_DCHECK_DEATH({ sequence->Pop(); });
}
// Verify that a DCHECK fires if TakeTask() is called on a sequence whose front
// slot is empty.
-TEST_F(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
- scoped_refptr<Sequence> sequence(new Sequence);
- sequence->PushTask(std::make_unique<Task>(FROM_HERE, Bind(&DoNothing),
- TaskTraits(), TimeDelta()));
+TEST(TaskSchedulerSequenceTest, TakeEmptyFrontSlot) {
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
+ sequence->PushTask(
+ Task(FROM_HERE, Bind(&DoNothing), TaskTraits(), TimeDelta()));
EXPECT_TRUE(sequence->TakeTask());
EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
}
// Verify that a DCHECK fires if TakeTask() is called on an empty sequence.
-TEST_F(TaskSchedulerSequenceTest, TakeEmptySequence) {
- scoped_refptr<Sequence> sequence(new Sequence);
+TEST(TaskSchedulerSequenceTest, TakeEmptySequence) {
+ scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
EXPECT_DCHECK_DEATH({ sequence->TakeTask(); });
}
diff --git a/chromium/base/task_scheduler/switches.cc b/chromium/base/task_scheduler/switches.cc
deleted file mode 100644
index 4dd3bbc2b4a..00000000000
--- a/chromium/base/task_scheduler/switches.cc
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/task_scheduler/switches.h"
-
-namespace switches {
-
-const char kDisableBrowserTaskScheduler[] = "disable-browser-task-scheduler";
-const char kEnableBrowserTaskScheduler[] = "enable-browser-task-scheduler";
-
-} // namespace switches
diff --git a/chromium/base/task_scheduler/switches.h b/chromium/base/task_scheduler/switches.h
deleted file mode 100644
index 967c7260c71..00000000000
--- a/chromium/base/task_scheduler/switches.h
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2016 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TASK_SCHEDULER_SWITCHES_H_
-#define BASE_TASK_SCHEDULER_SWITCHES_H_
-
-namespace switches {
-
-extern const char kDisableBrowserTaskScheduler[];
-extern const char kEnableBrowserTaskScheduler[];
-
-} // namespace switches
-
-#endif // BASE_TASK_SCHEDULER_SWITCHES_H_
diff --git a/chromium/base/task_scheduler/task.cc b/chromium/base/task_scheduler/task.cc
index e4d35a35b7b..cf21aacb718 100644
--- a/chromium/base/task_scheduler/task.cc
+++ b/chromium/base/task_scheduler/task.cc
@@ -45,5 +45,7 @@ Task::Task(Task&& other) noexcept
Task::~Task() = default;
+Task& Task::operator=(Task&& other) = default;
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/task_scheduler/task.h b/chromium/base/task_scheduler/task.h
index 5e74072248c..3e937a8a413 100644
--- a/chromium/base/task_scheduler/task.h
+++ b/chromium/base/task_scheduler/task.h
@@ -35,15 +35,16 @@ struct BASE_EXPORT Task : public PendingTask {
// Task is move-only to avoid mistakes that cause reference counts to be
// accidentally bumped.
Task(Task&& other) noexcept;
- Task(const Task&) = delete;
~Task();
+ Task& operator=(Task&& other);
+
// The TaskTraits of this task.
- const TaskTraits traits;
+ TaskTraits traits;
// The delay that must expire before the task runs.
- const TimeDelta delay;
+ TimeDelta delay;
// The time at which the task was inserted in its sequence. For an undelayed
// task, this happens at post time. For a delayed task, this happens some
@@ -62,6 +63,9 @@ struct BASE_EXPORT Task : public PendingTask {
// support TaskRunnerHandles.
scoped_refptr<SequencedTaskRunner> sequenced_task_runner_ref;
scoped_refptr<SingleThreadTaskRunner> single_thread_task_runner_ref;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(Task);
};
} // namespace internal
diff --git a/chromium/base/task_scheduler/task_scheduler.h b/chromium/base/task_scheduler/task_scheduler.h
index 56393ab2e4f..6d67f25e8ca 100644
--- a/chromium/base/task_scheduler/task_scheduler.h
+++ b/chromium/base/task_scheduler/task_scheduler.h
@@ -172,9 +172,9 @@ class BASE_EXPORT TaskScheduler {
#if !defined(OS_NACL)
// Creates and starts a task scheduler using default params. |name| is used to
- // label threads and histograms. It should identify the component that calls
- // this. Start() is called by this method; it is invalid to call it again
- // afterwards. CHECKs on failure. For tests, prefer
+ // label histograms, it must not be empty. It should identify the component
+ // that calls this. Start() is called by this method; it is invalid to call it
+ // again afterwards. CHECKs on failure. For tests, prefer
// base::test::ScopedTaskEnvironment (ensures isolation).
static void CreateAndStartWithDefaultParams(StringPiece name);
@@ -183,12 +183,12 @@ class BASE_EXPORT TaskScheduler {
void StartWithDefaultParams();
#endif // !defined(OS_NACL)
- // Creates a ready to start task scheduler. |name| is used to label threads
- // and histograms. It should identify the component that creates the
- // TaskScheduler. The task scheduler doesn't create threads until Start() is
- // called. Tasks can be posted at any time but will not run until after
- // Start() is called. For tests, prefer base::test::ScopedTaskEnvironment
- // (ensures isolation).
+ // Creates a ready to start task scheduler. |name| is used to label
+ // histograms, it must not be empty. It should identify the component that
+ // creates the TaskScheduler. The task scheduler doesn't create threads until
+ // Start() is called. Tasks can be posted at any time but will not run until
+ // after Start() is called. For tests, prefer
+ // base::test::ScopedTaskEnvironment (ensures isolation).
static void Create(StringPiece name);
// Registers |task_scheduler| to handle tasks posted through the post_task.h
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.cc b/chromium/base/task_scheduler/task_scheduler_impl.cc
index 942c844c953..cf514bc00eb 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.cc
+++ b/chromium/base/task_scheduler/task_scheduler_impl.cc
@@ -4,9 +4,11 @@
#include "base/task_scheduler/task_scheduler_impl.h"
+#include <string>
#include <utility>
#include "base/metrics/field_trial_params.h"
+#include "base/strings/string_util.h"
#include "base/task_scheduler/delayed_task_manager.h"
#include "base/task_scheduler/environment_config.h"
#include "base/task_scheduler/scheduler_worker_pool_params.h"
@@ -18,14 +20,19 @@
namespace base {
namespace internal {
+TaskSchedulerImpl::TaskSchedulerImpl(StringPiece histogram_label)
+ : TaskSchedulerImpl(histogram_label,
+ std::make_unique<TaskTrackerImpl>(histogram_label)) {}
+
TaskSchedulerImpl::TaskSchedulerImpl(
- StringPiece name,
+ StringPiece histogram_label,
std::unique_ptr<TaskTrackerImpl> task_tracker)
- : name_(name),
- service_thread_("TaskSchedulerServiceThread"),
+ : service_thread_("TaskSchedulerServiceThread"),
task_tracker_(std::move(task_tracker)),
single_thread_task_runner_manager_(task_tracker_.get(),
&delayed_task_manager_) {
+ DCHECK(!histogram_label.empty());
+
static_assert(arraysize(worker_pools_) == ENVIRONMENT_COUNT,
"The size of |worker_pools_| must match ENVIRONMENT_COUNT.");
static_assert(
@@ -35,7 +42,10 @@ TaskSchedulerImpl::TaskSchedulerImpl(
for (int environment_type = 0; environment_type < ENVIRONMENT_COUNT;
++environment_type) {
worker_pools_[environment_type] = std::make_unique<SchedulerWorkerPoolImpl>(
- name_ + kEnvironmentParams[environment_type].name_suffix,
+ JoinString(
+ {histogram_label, kEnvironmentParams[environment_type].name_suffix},
+ "."),
+ kEnvironmentParams[environment_type].name_suffix,
kEnvironmentParams[environment_type].priority_hint, task_tracker_.get(),
&delayed_task_manager_);
}
@@ -118,7 +128,7 @@ void TaskSchedulerImpl::PostDelayedTaskWithTraits(const Location& from_here,
const TaskTraits new_traits = SetUserBlockingPriorityIfNeeded(traits);
GetWorkerPoolForTraits(new_traits)
->PostTaskWithSequence(
- std::make_unique<Task>(from_here, std::move(task), new_traits, delay),
+ Task(from_here, std::move(task), new_traits, delay),
MakeRefCounted<Sequence>());
}
@@ -143,7 +153,7 @@ TaskSchedulerImpl::CreateSingleThreadTaskRunnerWithTraits(
SingleThreadTaskRunnerThreadMode thread_mode) {
return single_thread_task_runner_manager_
.CreateSingleThreadTaskRunnerWithTraits(
- name_, SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+ SetUserBlockingPriorityIfNeeded(traits), thread_mode);
}
#if defined(OS_WIN)
@@ -152,7 +162,7 @@ TaskSchedulerImpl::CreateCOMSTATaskRunnerWithTraits(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return single_thread_task_runner_manager_.CreateCOMSTATaskRunnerWithTraits(
- name_, SetUserBlockingPriorityIfNeeded(traits), thread_mode);
+ SetUserBlockingPriorityIfNeeded(traits), thread_mode);
}
#endif // defined(OS_WIN)
diff --git a/chromium/base/task_scheduler/task_scheduler_impl.h b/chromium/base/task_scheduler/task_scheduler_impl.h
index 2714989aa44..7f99f5aea15 100644
--- a/chromium/base/task_scheduler/task_scheduler_impl.h
+++ b/chromium/base/task_scheduler/task_scheduler_impl.h
@@ -50,12 +50,14 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
TaskTracker;
#endif
- // |name| is used to label threads and histograms. |task_tracker| can be used
- // for tests that need more execution control. By default, the production
- // TaskTracker is used.
- explicit TaskSchedulerImpl(StringPiece name,
- std::unique_ptr<TaskTrackerImpl> task_tracker =
- std::make_unique<TaskTrackerImpl>());
+ // Creates a TaskSchedulerImpl with a production TaskTracker.
+ //|histogram_label| is used to label histograms, it must not be empty.
+ explicit TaskSchedulerImpl(StringPiece histogram_label);
+
+ // For testing only. Creates a TaskSchedulerImpl with a custom TaskTracker.
+ TaskSchedulerImpl(StringPiece histogram_label,
+ std::unique_ptr<TaskTrackerImpl> task_tracker);
+
~TaskSchedulerImpl() override;
// TaskScheduler:
@@ -92,7 +94,6 @@ class BASE_EXPORT TaskSchedulerImpl : public TaskScheduler {
// |all_tasks_user_blocking_| is set.
TaskTraits SetUserBlockingPriorityIfNeeded(const TaskTraits& traits) const;
- const std::string name_;
Thread service_thread_;
const std::unique_ptr<TaskTrackerImpl> task_tracker_;
DelayedTaskManager delayed_task_manager_;
diff --git a/chromium/base/task_scheduler/task_tracker.cc b/chromium/base/task_scheduler/task_tracker.cc
index c5be7d24f25..12e3ec1b366 100644
--- a/chromium/base/task_scheduler/task_tracker.cc
+++ b/chromium/base/task_scheduler/task_tracker.cc
@@ -73,16 +73,22 @@ const char kQueueFunctionName[] = "base::PostTask";
// its implementation details.
const char kRunFunctionName[] = "TaskSchedulerRunTask";
-HistogramBase* GetTaskLatencyHistogram(const char* suffix) {
+HistogramBase* GetTaskLatencyHistogram(StringPiece histogram_label,
+ StringPiece task_type_suffix) {
+ DCHECK(!histogram_label.empty());
+ DCHECK(!task_type_suffix.empty());
// Mimics the UMA_HISTOGRAM_TIMES macro except we don't specify bounds with
// TimeDeltas as FactoryTimeGet assumes millisecond granularity. The minimums
// and maximums were chosen to place the 1ms mark at around the 70% range
// coverage for buckets giving us good info for tasks that have a latency
// below 1ms (most of them) and enough info to assess how bad the latency is
// for tasks that exceed this threshold.
- return Histogram::FactoryGet(
- std::string("TaskScheduler.TaskLatencyMicroseconds.") + suffix, 1, 20000,
- 50, HistogramBase::kUmaTargetedHistogramFlag);
+ std::string histogram_name =
+ JoinString({"TaskScheduler.TaskLatencyMicroseconds", histogram_label,
+ task_type_suffix},
+ ".");
+ return Histogram::FactoryGet(histogram_name, 1, 20000, 50,
+ HistogramBase::kUmaTargetedHistogramFlag);
}
// Upper bound for the
@@ -214,19 +220,23 @@ struct TaskTracker::PreemptedBackgroundSequence {
CanScheduleSequenceObserver* observer = nullptr;
};
-TaskTracker::TaskTracker(int max_num_scheduled_background_sequences)
+TaskTracker::TaskTracker(StringPiece histogram_label,
+ int max_num_scheduled_background_sequences)
: state_(new State),
flush_cv_(flush_lock_.CreateConditionVariable()),
shutdown_lock_(&flush_lock_),
max_num_scheduled_background_sequences_(
max_num_scheduled_background_sequences),
task_latency_histograms_{
- {GetTaskLatencyHistogram("BackgroundTaskPriority"),
- GetTaskLatencyHistogram("BackgroundTaskPriority.MayBlock")},
- {GetTaskLatencyHistogram("UserVisibleTaskPriority"),
- GetTaskLatencyHistogram("UserVisibleTaskPriority.MayBlock")},
- {GetTaskLatencyHistogram("UserBlockingTaskPriority"),
- GetTaskLatencyHistogram("UserBlockingTaskPriority.MayBlock")}} {
+ {GetTaskLatencyHistogram(histogram_label, "BackgroundTaskPriority"),
+ GetTaskLatencyHistogram(histogram_label,
+ "BackgroundTaskPriority_MayBlock")},
+ {GetTaskLatencyHistogram(histogram_label, "UserVisibleTaskPriority"),
+ GetTaskLatencyHistogram(histogram_label,
+ "UserVisibleTaskPriority_MayBlock")},
+ {GetTaskLatencyHistogram(histogram_label, "UserBlockingTaskPriority"),
+ GetTaskLatencyHistogram(histogram_label,
+ "UserBlockingTaskPriority_MayBlock")}} {
// Confirm that all |task_latency_histograms_| have been initialized above.
DCHECK(*(&task_latency_histograms_[static_cast<int>(TaskPriority::HIGHEST) +
1][0] -
@@ -252,17 +262,17 @@ void TaskTracker::Flush() {
}
}
-bool TaskTracker::WillPostTask(const Task* task) {
- DCHECK(task);
+bool TaskTracker::WillPostTask(const Task& task) {
+ DCHECK(task.task);
- if (!BeforePostTask(task->traits.shutdown_behavior()))
+ if (!BeforePostTask(task.traits.shutdown_behavior()))
return false;
- if (task->delayed_run_time.is_null())
+ if (task.delayed_run_time.is_null())
subtle::NoBarrier_AtomicIncrement(&num_incomplete_undelayed_tasks_, 1);
debug::TaskAnnotator task_annotator;
- task_annotator.DidQueueTask(kQueueFunctionName, *task);
+ task_annotator.DidQueueTask(kQueueFunctionName, task);
return true;
}
@@ -299,7 +309,8 @@ scoped_refptr<Sequence> TaskTracker::RunNextTask(
DCHECK(sequence);
// Run the next task in |sequence|.
- std::unique_ptr<Task> task = sequence->TakeTask();
+ Optional<Task> task = sequence->TakeTask();
+ // TODO(fdoray): Support TakeTask() returning null. https://crbug.com/783309
DCHECK(task);
const TaskShutdownBehavior shutdown_behavior =
@@ -308,7 +319,7 @@ scoped_refptr<Sequence> TaskTracker::RunNextTask(
const bool can_run_task = BeforeRunTask(shutdown_behavior);
const bool is_delayed = !task->delayed_run_time.is_null();
- RunOrSkipTask(std::move(task), sequence.get(), can_run_task);
+ RunOrSkipTask(std::move(task.value()), sequence.get(), can_run_task);
if (can_run_task)
AfterRunTask(shutdown_behavior);
@@ -353,19 +364,19 @@ void TaskTracker::SetHasShutdownStartedForTesting() {
state_->StartShutdown();
}
-void TaskTracker::RunOrSkipTask(std::unique_ptr<Task> task,
+void TaskTracker::RunOrSkipTask(Task task,
Sequence* sequence,
bool can_run_task) {
- RecordTaskLatencyHistogram(task.get());
+ RecordTaskLatencyHistogram(task);
const bool previous_singleton_allowed =
ThreadRestrictions::SetSingletonAllowed(
- task->traits.shutdown_behavior() !=
+ task.traits.shutdown_behavior() !=
TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
const bool previous_io_allowed =
- ThreadRestrictions::SetIOAllowed(task->traits.may_block());
+ ThreadRestrictions::SetIOAllowed(task.traits.may_block());
const bool previous_wait_allowed = ThreadRestrictions::SetWaitAllowed(
- task->traits.with_base_sync_primitives());
+ task.traits.with_base_sync_primitives());
{
const SequenceToken& sequence_token = sequence->token();
@@ -373,7 +384,7 @@ void TaskTracker::RunOrSkipTask(std::unique_ptr<Task> task,
ScopedSetSequenceTokenForCurrentThread
scoped_set_sequence_token_for_current_thread(sequence_token);
ScopedSetTaskPriorityForCurrentThread
- scoped_set_task_priority_for_current_thread(task->traits.priority());
+ scoped_set_task_priority_for_current_thread(task.traits.priority());
ScopedSetSequenceLocalStorageMapForCurrentThread
scoped_set_sequence_local_storage_map_for_current_thread(
sequence->sequence_local_storage());
@@ -381,35 +392,37 @@ void TaskTracker::RunOrSkipTask(std::unique_ptr<Task> task,
// Set up TaskRunnerHandle as expected for the scope of the task.
std::unique_ptr<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
std::unique_ptr<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
- DCHECK(!task->sequenced_task_runner_ref ||
- !task->single_thread_task_runner_ref);
- if (task->sequenced_task_runner_ref) {
+ DCHECK(!task.sequenced_task_runner_ref ||
+ !task.single_thread_task_runner_ref);
+ if (task.sequenced_task_runner_ref) {
sequenced_task_runner_handle.reset(
- new SequencedTaskRunnerHandle(task->sequenced_task_runner_ref));
- } else if (task->single_thread_task_runner_ref) {
+ new SequencedTaskRunnerHandle(task.sequenced_task_runner_ref));
+ } else if (task.single_thread_task_runner_ref) {
single_thread_task_runner_handle.reset(
- new ThreadTaskRunnerHandle(task->single_thread_task_runner_ref));
+ new ThreadTaskRunnerHandle(task.single_thread_task_runner_ref));
}
if (can_run_task) {
- TRACE_TASK_EXECUTION(kRunFunctionName, *task);
+ TRACE_TASK_EXECUTION(kRunFunctionName, task);
const char* const execution_mode =
- task->single_thread_task_runner_ref
+ task.single_thread_task_runner_ref
? kSingleThreadExecutionMode
- : (task->sequenced_task_runner_ref ? kSequencedExecutionMode
- : kParallelExecutionMode);
+ : (task.sequenced_task_runner_ref ? kSequencedExecutionMode
+ : kParallelExecutionMode);
// TODO(gab): In a better world this would be tacked on as an extra arg
// to the trace event generated above. This is not possible however until
// http://crbug.com/652692 is resolved.
TRACE_EVENT1("task_scheduler", "TaskTracker::RunTask", "task_info",
std::make_unique<TaskTracingInfo>(
- task->traits, execution_mode, sequence_token));
+ task.traits, execution_mode, sequence_token));
- debug::TaskAnnotator().RunTask(kQueueFunctionName, task.get());
+ debug::TaskAnnotator().RunTask(kQueueFunctionName, &task);
}
- task.reset();
+ // Make sure the arguments bound to the callback are deleted within the
+ // scope in which the callback runs.
+ task.task = OnceClosure();
}
ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
@@ -474,8 +487,8 @@ bool TaskTracker::IsPostingBlockShutdownTaskAfterShutdownAllowed() {
}
#endif
-int TaskTracker::GetNumIncompleteUndelayedTasksForTesting() const {
- return subtle::NoBarrier_Load(&num_incomplete_undelayed_tasks_);
+bool TaskTracker::HasIncompleteUndelayedTasksForTesting() const {
+ return subtle::Acquire_Load(&num_incomplete_undelayed_tasks_) != 0;
}
bool TaskTracker::BeforePostTask(TaskShutdownBehavior shutdown_behavior) {
@@ -661,11 +674,11 @@ scoped_refptr<Sequence> TaskTracker::ManageBackgroundSequencesAfterRunningTask(
return nullptr;
}
-void TaskTracker::RecordTaskLatencyHistogram(Task* task) {
- const TimeDelta task_latency = TimeTicks::Now() - task->sequenced_time;
- task_latency_histograms_[static_cast<int>(task->traits.priority())]
- [task->traits.may_block() ||
- task->traits.with_base_sync_primitives()
+void TaskTracker::RecordTaskLatencyHistogram(const Task& task) {
+ const TimeDelta task_latency = TimeTicks::Now() - task.sequenced_time;
+ task_latency_histograms_[static_cast<int>(task.traits.priority())]
+ [task.traits.may_block() ||
+ task.traits.with_base_sync_primitives()
? 1
: 0]
->Add(task_latency.InMicroseconds());
diff --git a/chromium/base/task_scheduler/task_tracker.h b/chromium/base/task_scheduler/task_tracker.h
index f249b73c445..df6157cf563 100644
--- a/chromium/base/task_scheduler/task_tracker.h
+++ b/chromium/base/task_scheduler/task_tracker.h
@@ -15,6 +15,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
#include "base/synchronization/waitable_event.h"
#include "base/task_scheduler/can_schedule_sequence_observer.h"
#include "base/task_scheduler/scheduler_lock.h"
@@ -84,9 +85,11 @@ namespace internal {
// TaskPriority::USER_BLOCKING.
class BASE_EXPORT TaskTracker {
public:
+ // |histogram_label| is used as a suffix for histograms, it must not be empty.
// |max_num_scheduled_background_sequences| is the maximum number of
- // background sequences that be scheduled concurrently.
- TaskTracker(int max_num_scheduled_background_sequences =
+ // background sequences that can be scheduled concurrently (default to max())
+ TaskTracker(StringPiece histogram_label,
+ int max_num_scheduled_background_sequences =
std::numeric_limits<int>::max());
virtual ~TaskTracker();
@@ -108,7 +111,7 @@ class BASE_EXPORT TaskTracker {
// Informs this TaskTracker that |task| is about to be posted. Returns true if
// this operation is allowed (|task| should be posted if-and-only-if it is).
- bool WillPostTask(const Task* task);
+ bool WillPostTask(const Task& task);
// Informs this TaskTracker that |sequence| is about to be scheduled. If this
// returns |sequence|, it is expected that RunNextTask() will soon be called
@@ -155,9 +158,7 @@ class BASE_EXPORT TaskTracker {
// have run. |sequence| is the sequence from which |task| was extracted. An
// override is expected to call its parent's implementation but is free to
// perform extra work before and after doing so.
- virtual void RunOrSkipTask(std::unique_ptr<Task> task,
- Sequence* sequence,
- bool can_run_task);
+ virtual void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task);
#if DCHECK_IS_ON()
// Returns true if this context should be exempt from blocking shutdown
@@ -166,9 +167,10 @@ class BASE_EXPORT TaskTracker {
virtual bool IsPostingBlockShutdownTaskAfterShutdownAllowed();
#endif
- // Returns the number of undelayed tasks that haven't completed their
- // execution (still queued or in progress).
- int GetNumIncompleteUndelayedTasksForTesting() const;
+ // Returns true if there are undelayed tasks that haven't completed their
+ // execution (still queued or in progress). If it returns false: the side-
+ // effects of all completed tasks are guaranteed to be visible to the caller.
+ bool HasIncompleteUndelayedTasksForTesting() const;
private:
class State;
@@ -219,7 +221,7 @@ class BASE_EXPORT TaskTracker {
// Records the TaskScheduler.TaskLatency.[task priority].[may block] histogram
// for |task|.
- void RecordTaskLatencyHistogram(Task* task);
+ void RecordTaskLatencyHistogram(const Task& task);
// Number of tasks blocking shutdown and boolean indicating whether shutdown
// has started.
diff --git a/chromium/base/task_scheduler/task_tracker_posix.cc b/chromium/base/task_scheduler/task_tracker_posix.cc
index d3317615fd7..8289d909dc4 100644
--- a/chromium/base/task_scheduler/task_tracker_posix.cc
+++ b/chromium/base/task_scheduler/task_tracker_posix.cc
@@ -11,10 +11,10 @@
namespace base {
namespace internal {
-TaskTrackerPosix::TaskTrackerPosix() = default;
+TaskTrackerPosix::TaskTrackerPosix(StringPiece name) : TaskTracker(name) {}
TaskTrackerPosix::~TaskTrackerPosix() = default;
-void TaskTrackerPosix::RunOrSkipTask(std::unique_ptr<Task> task,
+void TaskTrackerPosix::RunOrSkipTask(Task task,
Sequence* sequence,
bool can_run_task) {
DCHECK(watch_file_descriptor_message_loop_);
diff --git a/chromium/base/task_scheduler/task_tracker_posix.h b/chromium/base/task_scheduler/task_tracker_posix.h
index 2992b9fc4dc..4689f7a13e5 100644
--- a/chromium/base/task_scheduler/task_tracker_posix.h
+++ b/chromium/base/task_scheduler/task_tracker_posix.h
@@ -27,7 +27,7 @@ struct Task;
// TaskTracker can run tasks.
class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
public:
- TaskTrackerPosix();
+ TaskTrackerPosix(StringPiece name);
~TaskTrackerPosix() override;
// Sets the MessageLoopForIO with which to setup FileDescriptorWatcher in the
@@ -52,9 +52,7 @@ class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
protected:
// TaskTracker:
- void RunOrSkipTask(std::unique_ptr<Task> task,
- Sequence* sequence,
- bool can_run_task) override;
+ void RunOrSkipTask(Task task, Sequence* sequence, bool can_run_task) override;
private:
#if DCHECK_IS_ON()
diff --git a/chromium/base/task_scheduler/task_tracker_posix_unittest.cc b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
index 9ca9a913d97..6eb1642504f 100644
--- a/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_posix_unittest.cc
@@ -42,7 +42,7 @@ class TaskSchedulerTaskTrackerPosixTest : public testing::Test {
protected:
Thread service_thread_;
- TaskTrackerPosix tracker_;
+ TaskTrackerPosix tracker_ = {"Test"};
private:
DISALLOW_COPY_AND_ASSIGN(TaskSchedulerTaskTrackerPosixTest);
@@ -53,12 +53,11 @@ class TaskSchedulerTaskTrackerPosixTest : public testing::Test {
// Verify that TaskTrackerPosix runs a Task it receives.
TEST_F(TaskSchedulerTaskTrackerPosixTest, RunTask) {
bool did_run = false;
- auto task = std::make_unique<Task>(
- FROM_HERE,
- Bind([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
- TaskTraits(), TimeDelta());
+ Task task(FROM_HERE,
+ Bind([](bool* did_run) { *did_run = true; }, Unretained(&did_run)),
+ TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
auto sequence = test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
@@ -74,15 +73,14 @@ TEST_F(TaskSchedulerTaskTrackerPosixTest, RunTask) {
TEST_F(TaskSchedulerTaskTrackerPosixTest, FileDescriptorWatcher) {
int fds[2];
ASSERT_EQ(0, pipe(fds));
- auto task = std::make_unique<Task>(
- FROM_HERE,
- Bind(IgnoreResult(&FileDescriptorWatcher::WatchReadable), fds[0],
- Bind(&DoNothing)),
- TaskTraits(), TimeDelta());
+ Task task(FROM_HERE,
+ Bind(IgnoreResult(&FileDescriptorWatcher::WatchReadable), fds[0],
+ Bind(&DoNothing)),
+ TaskTraits(), TimeDelta());
// FileDescriptorWatcher::WatchReadable needs a SequencedTaskRunnerHandle.
- task->sequenced_task_runner_ref = MakeRefCounted<NullTaskRunner>();
+ task.sequenced_task_runner_ref = MakeRefCounted<NullTaskRunner>();
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
auto sequence = test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence, tracker_.WillScheduleSequence(sequence, nullptr));
diff --git a/chromium/base/task_scheduler/task_tracker_unittest.cc b/chromium/base/task_scheduler/task_tracker_unittest.cc
index 685fb3573e8..df78b7410e5 100644
--- a/chromium/base/task_scheduler/task_tracker_unittest.cc
+++ b/chromium/base/task_scheduler/task_tracker_unittest.cc
@@ -91,6 +91,7 @@ class ThreadPostingAndRunningTask : public SimpleThread {
bool expect_post_succeeds)
: SimpleThread("ThreadPostingAndRunningTask"),
tracker_(tracker),
+ owned_task_(FROM_HERE, OnceClosure(), TaskTraits(), TimeDelta()),
task_(task),
action_(action),
expect_post_succeeds_(expect_post_succeeds) {
@@ -102,28 +103,28 @@ class ThreadPostingAndRunningTask : public SimpleThread {
}
ThreadPostingAndRunningTask(TaskTracker* tracker,
- std::unique_ptr<Task> task,
+ Task task,
Action action,
bool expect_post_succeeds)
: SimpleThread("ThreadPostingAndRunningTask"),
tracker_(tracker),
- task_(task.get()),
owned_task_(std::move(task)),
+ task_(&owned_task_),
action_(action),
expect_post_succeeds_(expect_post_succeeds) {
- EXPECT_TRUE(task_);
+ EXPECT_TRUE(owned_task_.task);
}
private:
void Run() override {
bool post_succeeded = true;
if (action_ == Action::WILL_POST || action_ == Action::WILL_POST_AND_RUN) {
- post_succeeded = tracker_->WillPostTask(task_);
+ post_succeeded = tracker_->WillPostTask(*task_);
EXPECT_EQ(expect_post_succeeds_, post_succeeded);
}
if (post_succeeded &&
(action_ == Action::RUN || action_ == Action::WILL_POST_AND_RUN)) {
- EXPECT_TRUE(owned_task_);
+ EXPECT_TRUE(owned_task_.task);
testing::StrictMock<MockCanScheduleSequenceObserver>
never_notified_observer;
@@ -139,8 +140,8 @@ class ThreadPostingAndRunningTask : public SimpleThread {
}
TaskTracker* const tracker_;
- Task* const task_;
- std::unique_ptr<Task> owned_task_;
+ Task owned_task_;
+ Task* task_;
const Action action_;
const bool expect_post_succeeds_;
@@ -166,14 +167,14 @@ class TaskSchedulerTaskTrackerTest
TaskSchedulerTaskTrackerTest() = default;
// Creates a task with |shutdown_behavior|.
- std::unique_ptr<Task> CreateTask(TaskShutdownBehavior shutdown_behavior) {
- return std::make_unique<Task>(
+ Task CreateTask(TaskShutdownBehavior shutdown_behavior) {
+ return Task(
FROM_HERE,
Bind(&TaskSchedulerTaskTrackerTest::RunTaskCallback, Unretained(this)),
TaskTraits(shutdown_behavior), TimeDelta());
}
- void DispatchAndRunTaskWithTracker(std::unique_ptr<Task> task) {
+ void DispatchAndRunTaskWithTracker(Task task) {
auto sequence = tracker_.WillScheduleSequence(
test::CreateSequenceWithTask(std::move(task)),
&never_notified_observer_);
@@ -231,7 +232,7 @@ class TaskSchedulerTaskTrackerTest
return num_tasks_executed_;
}
- TaskTracker tracker_;
+ TaskTracker tracker_ = {"Test"};
testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer_;
private:
@@ -278,10 +279,10 @@ class TaskSchedulerTaskTrackerTest
} // namespace
TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunBeforeShutdown) {
- std::unique_ptr<Task> task(CreateTask(GetParam()));
+ Task task(CreateTask(GetParam()));
// Inform |task_tracker_| that |task| will be posted.
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
// Run the task.
EXPECT_EQ(0U, NumTasksExecuted());
@@ -300,7 +301,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
WaitableEvent::InitialState::NOT_SIGNALED);
WaitableEvent task_barrier(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
- auto blocked_task = std::make_unique<Task>(
+ Task blocked_task(
FROM_HERE,
Bind(
[](WaitableEvent* task_running, WaitableEvent* task_barrier) {
@@ -311,7 +312,7 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
TaskTraits(WithBaseSyncPrimitives(), GetParam()), TimeDelta());
// Inform |task_tracker_| that |blocked_task| will be posted.
- EXPECT_TRUE(tracker_.WillPostTask(blocked_task.get()));
+ EXPECT_TRUE(tracker_.WillPostTask(blocked_task));
// Create a thread to run the task. Wait until the task starts running.
ThreadPostingAndRunningTask thread_running_task(
@@ -342,14 +343,13 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunLongTaskBeforeShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
// Inform |task_tracker_| that a task will be posted.
- std::unique_ptr<Task> task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ Task task(CreateTask(GetParam()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
- std::unique_ptr<Task> block_shutdown_task(
- CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task.get()));
+ Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -372,8 +372,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunDuringShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
// Inform |task_tracker_| that a task will be posted.
- std::unique_ptr<Task> task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ Task task(CreateTask(GetParam()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -402,9 +402,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostBeforeShutdownRunAfterShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
- std::unique_ptr<Task> block_shutdown_task(
- CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task.get()));
+ Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -412,8 +411,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
if (GetParam() == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted.
- std::unique_ptr<Task> task(CreateTask(GetParam()));
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ Task task(CreateTask(GetParam()));
+ EXPECT_TRUE(tracker_.WillPostTask(task));
// Run the BLOCK_SHUTDOWN task.
EXPECT_EQ(0U, NumTasksExecuted());
@@ -421,8 +420,8 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
EXPECT_EQ(1U, NumTasksExecuted());
} else {
// It shouldn't be allowed to post a non BLOCK_SHUTDOWN task.
- std::unique_ptr<Task> task(CreateTask(GetParam()));
- EXPECT_FALSE(tracker_.WillPostTask(task.get()));
+ Task task(CreateTask(GetParam()));
+ EXPECT_FALSE(tracker_.WillPostTask(task));
// Don't try to run the task, because it wasn't allowed to be posted.
}
@@ -438,10 +437,10 @@ TEST_P(TaskSchedulerTaskTrackerTest, WillPostAndRunDuringShutdown) {
TEST_P(TaskSchedulerTaskTrackerTest, WillPostAfterShutdown) {
tracker_.Shutdown();
- std::unique_ptr<Task> task(CreateTask(GetParam()));
+ Task task(CreateTask(GetParam()));
// |task_tracker_| shouldn't allow a task to be posted after shutdown.
- EXPECT_FALSE(tracker_.WillPostTask(task.get()));
+ EXPECT_FALSE(tracker_.WillPostTask(task));
}
// Verify that BLOCK_SHUTDOWN and SKIP_ON_SHUTDOWN tasks can
@@ -450,10 +449,9 @@ TEST_P(TaskSchedulerTaskTrackerTest, SingletonAllowed) {
const bool can_use_singletons =
(GetParam() != TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
- std::unique_ptr<Task> task(
- new Task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
- TaskTraits(GetParam()), TimeDelta()));
- EXPECT_TRUE(tracker_.WillPostTask(task.get()));
+ Task task(FROM_HERE, BindOnce(&ThreadRestrictions::AssertSingletonAllowed),
+ TaskTraits(GetParam()), TimeDelta());
+ EXPECT_TRUE(tracker_.WillPostTask(task));
// Set the singleton allowed bit to the opposite of what it is expected to be
// when |tracker| runs |task| to verify that |tracker| actually sets the
@@ -473,31 +471,29 @@ TEST_P(TaskSchedulerTaskTrackerTest, IOAllowed) {
// Unset the IO allowed bit. Expect TaskTracker to set it before running a
// task with the MayBlock() trait.
ThreadRestrictions::SetIOAllowed(false);
- auto task_with_may_block =
- std::make_unique<Task>(FROM_HERE, Bind([]() {
- // Shouldn't fail.
- AssertBlockingAllowed();
- }),
- TaskTraits(MayBlock(), GetParam()), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_with_may_block.get()));
+ Task task_with_may_block(FROM_HERE, Bind([]() {
+ // Shouldn't fail.
+ AssertBlockingAllowed();
+ }),
+ TaskTraits(MayBlock(), GetParam()), TimeDelta());
+ EXPECT_TRUE(tracker_.WillPostTask(task_with_may_block));
DispatchAndRunTaskWithTracker(std::move(task_with_may_block));
// Set the IO allowed bit. Expect TaskTracker to unset it before running a
// task without the MayBlock() trait.
ThreadRestrictions::SetIOAllowed(true);
- auto task_without_may_block = std::make_unique<Task>(
+ Task task_without_may_block(
FROM_HERE,
Bind([]() { EXPECT_DCHECK_DEATH({ AssertBlockingAllowed(); }); }),
TaskTraits(GetParam()), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_without_may_block.get()));
+ EXPECT_TRUE(tracker_.WillPostTask(task_without_may_block));
DispatchAndRunTaskWithTracker(std::move(task_without_may_block));
}
-static void RunTaskRunnerHandleVerificationTask(
- TaskTracker* tracker,
- std::unique_ptr<Task> verify_task) {
+static void RunTaskRunnerHandleVerificationTask(TaskTracker* tracker,
+ Task verify_task) {
// Pretend |verify_task| is posted to respect TaskTracker's contract.
- EXPECT_TRUE(tracker->WillPostTask(verify_task.get()));
+ EXPECT_TRUE(tracker->WillPostTask(verify_task));
// Confirm that the test conditions are right (no TaskRunnerHandles set
// already).
@@ -524,9 +520,8 @@ static void VerifyNoTaskRunnerHandle() {
TEST_P(TaskSchedulerTaskTrackerTest, TaskRunnerHandleIsNotSetOnParallel) {
// Create a task that will verify that TaskRunnerHandles are not set in its
// scope per no TaskRunner ref being set to it.
- std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE, BindOnce(&VerifyNoTaskRunnerHandle),
- TaskTraits(GetParam()), TimeDelta()));
+ Task verify_task(FROM_HERE, BindOnce(&VerifyNoTaskRunnerHandle),
+ TaskTraits(GetParam()), TimeDelta());
RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
}
@@ -545,12 +540,11 @@ TEST_P(TaskSchedulerTaskTrackerTest,
// Create a task that will verify that SequencedTaskRunnerHandle is properly
// set to |test_task_runner| in its scope per |sequenced_task_runner_ref|
// being set to it.
- std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE,
- BindOnce(&VerifySequencedTaskRunnerHandle,
- Unretained(test_task_runner.get())),
- TaskTraits(GetParam()), TimeDelta()));
- verify_task->sequenced_task_runner_ref = test_task_runner;
+ Task verify_task(FROM_HERE,
+ BindOnce(&VerifySequencedTaskRunnerHandle,
+ Unretained(test_task_runner.get())),
+ TaskTraits(GetParam()), TimeDelta());
+ verify_task.sequenced_task_runner_ref = test_task_runner;
RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
}
@@ -571,12 +565,11 @@ TEST_P(TaskSchedulerTaskTrackerTest,
// Create a task that will verify that ThreadTaskRunnerHandle is properly set
// to |test_task_runner| in its scope per |single_thread_task_runner_ref|
// being set on it.
- std::unique_ptr<Task> verify_task(
- new Task(FROM_HERE,
- BindOnce(&VerifyThreadTaskRunnerHandle,
- Unretained(test_task_runner.get())),
- TaskTraits(GetParam()), TimeDelta()));
- verify_task->single_thread_task_runner_ref = test_task_runner;
+ Task verify_task(FROM_HERE,
+ BindOnce(&VerifyThreadTaskRunnerHandle,
+ Unretained(test_task_runner.get())),
+ TaskTraits(GetParam()), TimeDelta());
+ verify_task.single_thread_task_runner_ref = test_task_runner;
RunTaskRunnerHandleVerificationTask(&tracker_, std::move(verify_task));
}
@@ -584,15 +577,15 @@ TEST_P(TaskSchedulerTaskTrackerTest,
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingDelayedTask) {
const Task delayed_task(FROM_HERE, BindOnce(&DoNothing),
TaskTraits(GetParam()), TimeDelta::FromDays(1));
- tracker_.WillPostTask(&delayed_task);
+ tracker_.WillPostTask(delayed_task);
// Flush() should return even if the delayed task didn't run.
tracker_.Flush();
}
TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
- auto undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(undelayed_task.get());
+ Task undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(undelayed_task);
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -605,9 +598,9 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushPendingUndelayedTask) {
}
TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
- auto undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(undelayed_task.get());
+ Task undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(undelayed_task);
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -615,9 +608,9 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
VERIFY_ASYNC_FLUSH_IN_PROGRESS();
// Simulate posting another undelayed task.
- auto other_undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(other_undelayed_task.get());
+ Task other_undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(other_undelayed_task);
// Run the first undelayed task.
DispatchAndRunTaskWithTracker(std::move(undelayed_task));
@@ -633,13 +626,12 @@ TEST_P(TaskSchedulerTaskTrackerTest, PostTaskDuringFlush) {
TEST_P(TaskSchedulerTaskTrackerTest, RunDelayedTaskDuringFlush) {
// Simulate posting a delayed and an undelayed task.
- auto delayed_task =
- std::make_unique<Task>(FROM_HERE, Bind(&DoNothing),
- TaskTraits(GetParam()), TimeDelta::FromDays(1));
- tracker_.WillPostTask(delayed_task.get());
- auto undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(undelayed_task.get());
+ Task delayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta::FromDays(1));
+ tracker_.WillPostTask(delayed_task);
+ Task undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(undelayed_task);
// Flush() shouldn't return before the undelayed task runs.
CallFlushAsync();
@@ -666,9 +658,9 @@ TEST_P(TaskSchedulerTaskTrackerTest, FlushAfterShutdown) {
return;
// Simulate posting a task.
- auto undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(undelayed_task.get());
+ Task undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(undelayed_task);
// Shutdown() should return immediately since there are no pending
// BLOCK_SHUTDOWN tasks.
@@ -684,9 +676,9 @@ TEST_P(TaskSchedulerTaskTrackerTest, ShutdownDuringFlush) {
return;
// Simulate posting a task.
- auto undelayed_task = std::make_unique<Task>(
- FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()), TimeDelta());
- tracker_.WillPostTask(undelayed_task.get());
+ Task undelayed_task(FROM_HERE, Bind(&DoNothing), TaskTraits(GetParam()),
+ TimeDelta());
+ tracker_.WillPostTask(undelayed_task);
// Flush() shouldn't return before the undelayed task runs or
// shutdown completes.
@@ -729,10 +721,9 @@ TEST_F(TaskSchedulerTaskTrackerTest, CurrentSequenceToken) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
const SequenceToken sequence_token = sequence->token();
- auto task = std::make_unique<Task>(FROM_HERE,
- Bind(&ExpectSequenceToken, sequence_token),
- TaskTraits(), TimeDelta());
- tracker_.WillPostTask(task.get());
+ Task task(FROM_HERE, Bind(&ExpectSequenceToken, sequence_token), TaskTraits(),
+ TimeDelta());
+ tracker_.WillPostTask(task);
sequence->PushTask(std::move(task));
@@ -778,25 +769,32 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunBeforeShutdown) {
TEST_F(TaskSchedulerTaskTrackerTest,
LoadWillPostBeforeShutdownAndRunDuringShutdown) {
// Post tasks asynchronously.
- std::vector<std::unique_ptr<Task>> tasks;
- std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> post_threads;
+ std::vector<Task> tasks_continue_on_shutdown;
+ std::vector<Task> tasks_skip_on_shutdown;
+ std::vector<Task> tasks_block_shutdown;
+ for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+ tasks_continue_on_shutdown.push_back(
+ CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
+ tasks_skip_on_shutdown.push_back(
+ CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
+ tasks_block_shutdown.push_back(
+ CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ }
+ std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> post_threads;
for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
- tasks.push_back(CreateTask(TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN));
post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, &tasks_continue_on_shutdown[i],
ThreadPostingAndRunningTask::Action::WILL_POST, true));
post_threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::SKIP_ON_SHUTDOWN));
post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, &tasks_skip_on_shutdown[i],
ThreadPostingAndRunningTask::Action::WILL_POST, true));
post_threads.back()->Start();
- tasks.push_back(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
post_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
- &tracker_, tasks.back().get(),
+ &tracker_, &tasks_block_shutdown[i],
ThreadPostingAndRunningTask::Action::WILL_POST, true));
post_threads.back()->Start();
}
@@ -809,11 +807,20 @@ TEST_F(TaskSchedulerTaskTrackerTest,
// Run tasks asynchronously.
std::vector<std::unique_ptr<ThreadPostingAndRunningTask>> run_threads;
+ for (size_t i = 0; i < kLoadTestNumIterations; ++i) {
+ run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+ &tracker_, std::move(tasks_continue_on_shutdown[i]),
+ ThreadPostingAndRunningTask::Action::RUN, false));
+ run_threads.back()->Start();
+
+ run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
+ &tracker_, std::move(tasks_skip_on_shutdown[i]),
+ ThreadPostingAndRunningTask::Action::RUN, false));
+ run_threads.back()->Start();
- for (auto& task : tasks) {
run_threads.push_back(std::make_unique<ThreadPostingAndRunningTask>(
- &tracker_, std::move(task), ThreadPostingAndRunningTask::Action::RUN,
- false));
+ &tracker_, std::move(tasks_block_shutdown[i]),
+ ThreadPostingAndRunningTask::Action::RUN, false));
run_threads.back()->Start();
}
@@ -829,9 +836,8 @@ TEST_F(TaskSchedulerTaskTrackerTest,
TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
// Inform |task_tracker_| that a BLOCK_SHUTDOWN task will be posted just to
// block shutdown.
- std::unique_ptr<Task> block_shutdown_task(
- CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
- EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task.get()));
+ Task block_shutdown_task(CreateTask(TaskShutdownBehavior::BLOCK_SHUTDOWN));
+ EXPECT_TRUE(tracker_.WillPostTask(block_shutdown_task));
// Call Shutdown() asynchronously.
CallShutdownAsync();
@@ -874,12 +880,10 @@ TEST_F(TaskSchedulerTaskTrackerTest, LoadWillPostAndRunDuringShutdown) {
// Verify that RunNextTask() returns the sequence from which it ran a task when
// it can be rescheduled.
TEST_F(TaskSchedulerTaskTrackerTest, RunNextTaskReturnsSequenceToReschedule) {
- auto task_1 = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_1.get()));
- auto task_2 = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker_.WillPostTask(task_2.get()));
+ Task task_1(FROM_HERE, BindOnce(&DoNothing), TaskTraits(), TimeDelta());
+ EXPECT_TRUE(tracker_.WillPostTask(task_1));
+ Task task_2(FROM_HERE, BindOnce(&DoNothing), TaskTraits(), TimeDelta());
+ EXPECT_TRUE(tracker_.WillPostTask(task_2));
scoped_refptr<Sequence> sequence =
test::CreateSequenceWithTask(std::move(task_1));
@@ -897,17 +901,16 @@ TEST_F(TaskSchedulerTaskTrackerTest, RunNextTaskReturnsSequenceToReschedule) {
TEST_F(TaskSchedulerTaskTrackerTest,
WillScheduleBackgroundSequenceWithMaxBackgroundSequences) {
constexpr int kMaxNumDispatchedBackgroundSequences = 2;
- TaskTracker tracker(kMaxNumDispatchedBackgroundSequences);
+ TaskTracker tracker("Test", kMaxNumDispatchedBackgroundSequences);
// Simulate posting |kMaxNumDispatchedBackgroundSequences| background tasks
// and scheduling the associated sequences. This should succeed.
std::vector<scoped_refptr<Sequence>> scheduled_sequences;
testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
for (int i = 0; i < kMaxNumDispatchedBackgroundSequences; ++i) {
- auto task = std::make_unique<Task>(FROM_HERE, BindOnce(&DoNothing),
- TaskTraits(TaskPriority::BACKGROUND),
- TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task.get()));
+ Task task(FROM_HERE, BindOnce(&DoNothing),
+ TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task));
scoped_refptr<Sequence> sequence =
test::CreateSequenceWithTask(std::move(task));
EXPECT_EQ(sequence,
@@ -925,12 +928,12 @@ TEST_F(TaskSchedulerTaskTrackerTest,
std::vector<scoped_refptr<Sequence>> extra_sequences;
for (int i = 0; i < kMaxNumDispatchedBackgroundSequences; ++i) {
extra_tasks_did_run.push_back(std::make_unique<bool>());
- auto extra_task = std::make_unique<Task>(
+ Task extra_task(
FROM_HERE,
BindOnce([](bool* extra_task_did_run) { *extra_task_did_run = true; },
Unretained(extra_tasks_did_run.back().get())),
TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(extra_task.get()));
+ EXPECT_TRUE(tracker.WillPostTask(extra_task));
extra_sequences.push_back(
test::CreateSequenceWithTask(std::move(extra_task)));
extra_observers.push_back(
@@ -977,16 +980,15 @@ void SetBool(bool* arg) {
TEST_F(TaskSchedulerTaskTrackerTest,
RunNextBackgroundTaskWithEarlierPendingBackgroundTask) {
constexpr int kMaxNumDispatchedBackgroundSequences = 1;
- TaskTracker tracker(kMaxNumDispatchedBackgroundSequences);
+ TaskTracker tracker("Test", kMaxNumDispatchedBackgroundSequences);
testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
// Simulate posting a background task and scheduling the associated sequence.
// This should succeed.
bool task_a_1_did_run = false;
- auto task_a_1 = std::make_unique<Task>(
- FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_1_did_run)),
- TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_a_1.get()));
+ Task task_a_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_1_did_run)),
+ TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_a_1));
scoped_refptr<Sequence> sequence_a =
test::CreateSequenceWithTask(std::move(task_a_1));
EXPECT_EQ(sequence_a,
@@ -996,28 +998,24 @@ TEST_F(TaskSchedulerTaskTrackerTest,
// sequence. This should fail because the maximum number of background
// sequences that can be scheduled concurrently is already reached.
bool task_b_1_did_run = false;
- auto task_b_1 = std::make_unique<Task>(
- FROM_HERE, BindOnce(&SetBool, Unretained(&task_b_1_did_run)),
- TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- Task* const task_b_1_raw = task_b_1.get();
- EXPECT_TRUE(tracker.WillPostTask(task_b_1_raw));
+ Task task_b_1(FROM_HERE, BindOnce(&SetBool, Unretained(&task_b_1_did_run)),
+ TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_b_1));
scoped_refptr<Sequence> sequence_b =
test::CreateSequenceWithTask(std::move(task_b_1));
testing::StrictMock<MockCanScheduleSequenceObserver> task_b_1_observer;
EXPECT_FALSE(tracker.WillScheduleSequence(sequence_b, &task_b_1_observer));
+ // Wait to be sure that the sequence time of |task_a_2| is after the sequenced
+ // time of |task_b_1|.
+ PlatformThread::Sleep(TestTimeouts::tiny_timeout());
+
// Post an extra background task in |sequence_a|.
bool task_a_2_did_run = false;
- auto task_a_2 = std::make_unique<Task>(
- FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_2_did_run)),
- TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
- Task* const task_a_2_raw = task_a_2.get();
- EXPECT_TRUE(tracker.WillPostTask(task_a_2_raw));
+ Task task_a_2(FROM_HERE, BindOnce(&SetBool, Unretained(&task_a_2_did_run)),
+ TaskTraits(TaskPriority::BACKGROUND), TimeDelta());
+ EXPECT_TRUE(tracker.WillPostTask(task_a_2));
sequence_a->PushTask(std::move(task_a_2));
- // Make sure that the sequenced time of |task_a_2| is after the sequenced time
- // of |task_b_1|.
- task_a_2_raw->sequenced_time =
- task_b_1_raw->sequenced_time + TimeDelta::FromSeconds(1);
// Run the first task in |sequence_a|. RunNextTask() should return nullptr
// since |sequence_a| can't be rescheduled immediately. |task_b_1_observer|
@@ -1052,17 +1050,17 @@ class WaitAllowedTestThread : public SimpleThread {
private:
void Run() override {
- TaskTracker tracker;
+ TaskTracker tracker("Test");
// Waiting is allowed by default. Expect TaskTracker to disallow it before
// running a task without the WithBaseSyncPrimitives() trait.
internal::AssertBaseSyncPrimitivesAllowed();
- auto task_without_sync_primitives = std::make_unique<Task>(
+ Task task_without_sync_primitives(
FROM_HERE, Bind([]() {
EXPECT_DCHECK_DEATH({ internal::AssertBaseSyncPrimitivesAllowed(); });
}),
TaskTraits(), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_without_sync_primitives.get()));
+ EXPECT_TRUE(tracker.WillPostTask(task_without_sync_primitives));
testing::StrictMock<MockCanScheduleSequenceObserver>
never_notified_observer;
auto sequence_without_sync_primitives = tracker.WillScheduleSequence(
@@ -1075,13 +1073,13 @@ class WaitAllowedTestThread : public SimpleThread {
// Disallow waiting. Expect TaskTracker to allow it before running a task
// with the WithBaseSyncPrimitives() trait.
ThreadRestrictions::DisallowWaiting();
- auto task_with_sync_primitives = std::make_unique<Task>(
+ Task task_with_sync_primitives(
FROM_HERE, Bind([]() {
// Shouldn't fail.
internal::AssertBaseSyncPrimitivesAllowed();
}),
TaskTraits(WithBaseSyncPrimitives()), TimeDelta());
- EXPECT_TRUE(tracker.WillPostTask(task_with_sync_primitives.get()));
+ EXPECT_TRUE(tracker.WillPostTask(task_with_sync_primitives));
auto sequence_with_sync_primitives = tracker.WillScheduleSequence(
test::CreateSequenceWithTask(std::move(task_with_sync_primitives)),
&never_notified_observer);
@@ -1111,40 +1109,43 @@ TEST(TaskSchedulerTaskTrackerWaitAllowedTest, WaitAllowed) {
TEST(TaskSchedulerTaskTrackerHistogramTest, TaskLatency) {
auto statistics_recorder = StatisticsRecorder::CreateTemporaryForTesting();
- TaskTracker tracker;
+ TaskTracker tracker("Test");
testing::StrictMock<MockCanScheduleSequenceObserver> never_notified_observer;
struct {
const TaskTraits traits;
const char* const expected_histogram;
- } tests[] = {
- {{TaskPriority::BACKGROUND},
- "TaskScheduler.TaskLatencyMicroseconds.BackgroundTaskPriority"},
- {{MayBlock(), TaskPriority::BACKGROUND},
- "TaskScheduler.TaskLatencyMicroseconds.BackgroundTaskPriority.MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::BACKGROUND},
- "TaskScheduler.TaskLatencyMicroseconds.BackgroundTaskPriority.MayBlock"},
- {{TaskPriority::USER_VISIBLE},
- "TaskScheduler.TaskLatencyMicroseconds.UserVisibleTaskPriority"},
- {{MayBlock(), TaskPriority::USER_VISIBLE},
- "TaskScheduler.TaskLatencyMicroseconds.UserVisibleTaskPriority."
- "MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::USER_VISIBLE},
- "TaskScheduler.TaskLatencyMicroseconds.UserVisibleTaskPriority."
- "MayBlock"},
- {{TaskPriority::USER_BLOCKING},
- "TaskScheduler.TaskLatencyMicroseconds.UserBlockingTaskPriority"},
- {{MayBlock(), TaskPriority::USER_BLOCKING},
- "TaskScheduler.TaskLatencyMicroseconds.UserBlockingTaskPriority."
- "MayBlock"},
- {{WithBaseSyncPrimitives(), TaskPriority::USER_BLOCKING},
- "TaskScheduler.TaskLatencyMicroseconds.UserBlockingTaskPriority."
- "MayBlock"}};
+ } tests[] = {{{TaskPriority::BACKGROUND},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "BackgroundTaskPriority"},
+ {{MayBlock(), TaskPriority::BACKGROUND},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "BackgroundTaskPriority_MayBlock"},
+ {{WithBaseSyncPrimitives(), TaskPriority::BACKGROUND},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "BackgroundTaskPriority_MayBlock"},
+ {{TaskPriority::USER_VISIBLE},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserVisibleTaskPriority"},
+ {{MayBlock(), TaskPriority::USER_VISIBLE},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserVisibleTaskPriority_MayBlock"},
+ {{WithBaseSyncPrimitives(), TaskPriority::USER_VISIBLE},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserVisibleTaskPriority_MayBlock"},
+ {{TaskPriority::USER_BLOCKING},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserBlockingTaskPriority"},
+ {{MayBlock(), TaskPriority::USER_BLOCKING},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserBlockingTaskPriority_MayBlock"},
+ {{WithBaseSyncPrimitives(), TaskPriority::USER_BLOCKING},
+ "TaskScheduler.TaskLatencyMicroseconds.Test."
+ "UserBlockingTaskPriority_MayBlock"}};
for (const auto& test : tests) {
- auto task = std::make_unique<Task>(FROM_HERE, Bind(&DoNothing), test.traits,
- TimeDelta());
- ASSERT_TRUE(tracker.WillPostTask(task.get()));
+ Task task(FROM_HERE, Bind(&DoNothing), test.traits, TimeDelta());
+ ASSERT_TRUE(tracker.WillPostTask(task));
HistogramTester tester;
diff --git a/chromium/base/task_scheduler/test_utils.cc b/chromium/base/task_scheduler/test_utils.cc
index 963fdf20c66..e8d4c99612c 100644
--- a/chromium/base/task_scheduler/test_utils.cc
+++ b/chromium/base/task_scheduler/test_utils.cc
@@ -13,7 +13,7 @@ namespace base {
namespace internal {
namespace test {
-scoped_refptr<Sequence> CreateSequenceWithTask(std::unique_ptr<Task> task) {
+scoped_refptr<Sequence> CreateSequenceWithTask(Task task) {
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>();
sequence->PushTask(std::move(task));
return sequence;
diff --git a/chromium/base/task_scheduler/test_utils.h b/chromium/base/task_scheduler/test_utils.h
index 8e163c2fe3d..d7903d3768e 100644
--- a/chromium/base/task_scheduler/test_utils.h
+++ b/chromium/base/task_scheduler/test_utils.h
@@ -5,8 +5,6 @@
#ifndef BASE_TASK_SCHEDULER_TEST_UTILS_H_
#define BASE_TASK_SCHEDULER_TEST_UTILS_H_
-#include <memory>
-
#include "base/memory/ref_counted.h"
#include "base/task_runner.h"
@@ -25,7 +23,7 @@ namespace test {
enum class ExecutionMode { PARALLEL, SEQUENCED, SINGLE_THREADED };
// Creates a Sequence and pushes |task| to it. Returns that sequence.
-scoped_refptr<Sequence> CreateSequenceWithTask(std::unique_ptr<Task> task);
+scoped_refptr<Sequence> CreateSequenceWithTask(Task task);
// Creates a TaskRunner that posts tasks to |worker_pool| with the
// |execution_mode| execution mode and the WithBaseSyncPrimitives() trait.
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 5871b9f308e..e260022971a 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -28,6 +28,9 @@ static_library("test_support") {
"../trace_event/trace_config_memory_test_util.h",
"android/java_handler_thread_helpers.cc",
"android/java_handler_thread_helpers.h",
+ "android/url_utils.cc",
+ "android/url_utils.h",
+ "bind_test_util.h",
"copy_only_int.h",
"fuzzed_data_provider.cc",
"fuzzed_data_provider.h",
@@ -57,13 +60,10 @@ static_library("test_support") {
"mock_log.cc",
"mock_log.h",
"move_only_int.h",
- "multiprocess_test.cc",
"multiprocess_test.h",
"multiprocess_test_android.cc",
"null_task_runner.cc",
"null_task_runner.h",
- "opaque_ref_counted.cc",
- "opaque_ref_counted.h",
"perf_log.cc",
"perf_log.h",
"perf_test_suite.cc",
@@ -151,6 +151,7 @@ static_library("test_support") {
"launcher/test_launcher_tracer.h",
"launcher/test_results_tracker.cc",
"launcher/unit_test_launcher.cc",
+ "multiprocess_test.cc",
]
}
@@ -342,6 +343,7 @@ if (is_android) {
sources = [
"android/java/src/org/chromium/base/MainReturnCodeResult.java",
"android/java/src/org/chromium/base/MultiprocessTestClientLauncher.java",
+ "android/javatests/src/org/chromium/base/test/util/UrlUtils.java",
]
jni_package = "base"
}
diff --git a/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc b/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
index 5141e238c22..fb1ac8f368a 100644
--- a/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
+++ b/chromium/base/third_party/dmg_fp/dtoa_wrapper.cc
@@ -45,4 +45,5 @@ inline static void FREE_DTOA_LOCK(size_t n) {
#include "base/third_party/dmg_fp/dtoa.cc"
+#undef Bias // Avoid windows jumbo build breakage.
#undef Long // To avoid breaking jni code in jumbo builds
diff --git a/chromium/base/third_party/icu/README.chromium b/chromium/base/third_party/icu/README.chromium
index f755f27700b..297e89a2edd 100644
--- a/chromium/base/third_party/icu/README.chromium
+++ b/chromium/base/third_party/icu/README.chromium
@@ -2,7 +2,7 @@ Name: ICU
URL: http://site.icu-project.org/
Version: 60
License: Unicode
-License File: LICENSE
+License File: NOT_SHIPPED
This file has the relevant components from ICU copied to handle basic UTF8/16/32
conversions. Components are copied from umachine.h, utf.h, utf8.h, and utf16.h
@@ -12,3 +12,6 @@ The main change is that U_/U8_/U16_ prefixes have been replaced with
CBU_/CBU8_/CBU16_ (for "Chrome Base") to avoid confusion with the "real" ICU
macros should ICU be in use on the system. For the same reason, the functions
and types have been put in the "base_icu" namespace.
+
+Note that this license file is marked as NOT_SHIPPED, since a more complete
+ICU license is included from //third_party/icu/README.chromium
diff --git a/chromium/base/threading/platform_thread.h b/chromium/base/threading/platform_thread.h
index 8cdf9365aaa..7637e002dbf 100644
--- a/chromium/base/threading/platform_thread.h
+++ b/chromium/base/threading/platform_thread.h
@@ -17,7 +17,7 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#elif defined(OS_MACOSX)
#include <mach/mach_types.h>
#elif defined(OS_FUCHSIA)
@@ -55,13 +55,9 @@ class PlatformThreadRef {
#elif defined(OS_POSIX)
typedef pthread_t RefType;
#endif
- PlatformThreadRef()
- : id_(0) {
- }
+ constexpr PlatformThreadRef() : id_(0) {}
- explicit PlatformThreadRef(RefType id)
- : id_(id) {
- }
+ explicit constexpr PlatformThreadRef(RefType id) : id_(id) {}
bool operator==(PlatformThreadRef other) const {
return id_ == other.id_;
@@ -85,9 +81,9 @@ class PlatformThreadHandle {
typedef pthread_t Handle;
#endif
- PlatformThreadHandle() : handle_(0) {}
+ constexpr PlatformThreadHandle() : handle_(0) {}
- explicit PlatformThreadHandle(Handle handle) : handle_(handle) {}
+ explicit constexpr PlatformThreadHandle(Handle handle) : handle_(handle) {}
bool is_equal(const PlatformThreadHandle& other) const {
return handle_ == other.handle_;
@@ -130,7 +126,7 @@ class BASE_EXPORT PlatformThread {
virtual void ThreadMain() = 0;
protected:
- virtual ~Delegate() {}
+ virtual ~Delegate() = default;
};
// Gets the current thread id, which may be useful for logging purposes.
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index eb2edcee441..77603cc233d 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -16,6 +16,8 @@
#include "base/threading/thread_restrictions.h"
#include "base/win/scoped_handle.h"
+#include <windows.h>
+
namespace base {
namespace {
diff --git a/chromium/base/threading/sequenced_worker_pool.cc b/chromium/base/threading/sequenced_worker_pool.cc
index 4a2fdfda6b9..7ea030a2f7a 100644
--- a/chromium/base/threading/sequenced_worker_pool.cc
+++ b/chromium/base/threading/sequenced_worker_pool.cc
@@ -605,7 +605,7 @@ void SequencedWorkerPool::Worker::Run() {
SequencedWorkerPool::Worker*
SequencedWorkerPool::Worker::GetForCurrentThread() {
// Don't construct lazy instance on check.
- if (lazy_tls_ptr_ == nullptr)
+ if (!lazy_tls_ptr_.IsCreated())
return nullptr;
return lazy_tls_ptr_.Get().Get();
diff --git a/chromium/base/threading/sequenced_worker_pool.h b/chromium/base/threading/sequenced_worker_pool.h
index c74b648e0f0..724c833542d 100644
--- a/chromium/base/threading/sequenced_worker_pool.h
+++ b/chromium/base/threading/sequenced_worker_pool.h
@@ -121,7 +121,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
class BASE_EXPORT SequenceToken {
public:
SequenceToken() : id_(0) {}
- ~SequenceToken() {}
+ ~SequenceToken() = default;
bool Equals(const SequenceToken& other) const {
return id_ == other.id_;
@@ -147,7 +147,7 @@ class BASE_EXPORT SequencedWorkerPool : public TaskRunner {
// Allows tests to perform certain actions.
class TestingObserver {
public:
- virtual ~TestingObserver() {}
+ virtual ~TestingObserver() = default;
virtual void OnHasWork() = 0;
virtual void WillWaitForShutdown() = 0;
virtual void OnDestruct() = 0;
diff --git a/chromium/base/threading/thread_collision_warner.h b/chromium/base/threading/thread_collision_warner.h
index 4699a910dd2..b6993f64d9a 100644
--- a/chromium/base/threading/thread_collision_warner.h
+++ b/chromium/base/threading/thread_collision_warner.h
@@ -133,12 +133,12 @@ namespace base {
// used. During the unit tests is used another class that doesn't "DCHECK"
// in case of collision (check thread_collision_warner_unittests.cc)
struct BASE_EXPORT AsserterBase {
- virtual ~AsserterBase() {}
+ virtual ~AsserterBase() = default;
virtual void warn() = 0;
};
struct BASE_EXPORT DCheckAsserter : public AsserterBase {
- ~DCheckAsserter() override {}
+ ~DCheckAsserter() override = default;
void warn() override;
};
@@ -166,7 +166,7 @@ class BASE_EXPORT ThreadCollisionWarner {
warner_->EnterSelf();
}
- ~Check() {}
+ ~Check() = default;
private:
ThreadCollisionWarner* warner_;
diff --git a/chromium/base/threading/thread_local_storage.h b/chromium/base/threading/thread_local_storage.h
index b6f34c32872..09d426bc0a0 100644
--- a/chromium/base/threading/thread_local_storage.h
+++ b/chromium/base/threading/thread_local_storage.h
@@ -13,7 +13,7 @@
#include "build/build_config.h"
#if defined(OS_WIN)
-#include <windows.h>
+#include "base/win/windows_types.h"
#elif defined(OS_POSIX)
#include <pthread.h>
#endif
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 0056f10fdca..abfde3aaab1 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -13,6 +13,7 @@
class BrowserProcessImpl;
class HistogramSynchronizer;
class NativeBackendKWallet;
+class KeyStorageLinux;
namespace android_webview {
class AwFormDatabaseService;
@@ -51,7 +52,7 @@ class TextInputClientMac;
} // namespace content
namespace cronet {
class CronetPrefsManager;
-class CronetURLRequestContextAdapter;
+class CronetURLRequestContext;
} // namespace cronet
namespace dbus {
class Bus;
@@ -69,6 +70,9 @@ class LevelDBMojoProxy;
namespace media {
class BlockingUrlProtocol;
}
+namespace midi {
+class TaskService; // https://crbug.com/796830
+}
namespace mojo {
class SyncCallRestrictions;
namespace edk {
@@ -199,7 +203,7 @@ class BASE_EXPORT ScopedAllowBlocking {
// in unit tests to avoid the friend requirement.
FRIEND_TEST_ALL_PREFIXES(ThreadRestrictionsTest, ScopedAllowBlocking);
friend class cronet::CronetPrefsManager;
- friend class cronet::CronetURLRequestContextAdapter;
+ friend class cronet::CronetURLRequestContext;
friend class resource_coordinator::TabManagerDelegate; // crbug.com/778703
friend class ScopedAllowBlockingForTesting;
@@ -288,6 +292,8 @@ class BASE_EXPORT ScopedAllowBaseSyncPrimitivesOutsideBlockingScope {
FRIEND_TEST_ALL_PREFIXES(
ThreadRestrictionsTest,
ScopedAllowBaseSyncPrimitivesOutsideBlockingScopeResetsState);
+ friend class ::KeyStorageLinux;
+ friend class midi::TaskService; // https://crbug.com/796830
ScopedAllowBaseSyncPrimitivesOutsideBlockingScope()
EMPTY_BODY_IF_DCHECK_IS_OFF;
diff --git a/chromium/base/threading/thread_task_runner_handle.cc b/chromium/base/threading/thread_task_runner_handle.cc
index 1d9756fb235..1e27675de24 100644
--- a/chromium/base/threading/thread_task_runner_handle.cc
+++ b/chromium/base/threading/thread_task_runner_handle.cc
@@ -39,7 +39,8 @@ bool ThreadTaskRunnerHandle::IsSet() {
// static
ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
- scoped_refptr<SingleThreadTaskRunner> overriding_task_runner) {
+ scoped_refptr<SingleThreadTaskRunner> overriding_task_runner,
+ ThreadTaskRunnerHandle::OverrideType type) {
// OverrideForTesting() is not compatible with a SequencedTaskRunnerHandle
// being set (but SequencedTaskRunnerHandle::IsSet() includes
// ThreadTaskRunnerHandle::IsSet() so that's discounted as the only valid
@@ -67,7 +68,9 @@ ScopedClosureRunner ThreadTaskRunnerHandle::OverrideForTesting(
ttrh->task_runner_.swap(overriding_task_runner);
auto no_running_during_override =
- std::make_unique<RunLoop::ScopedDisallowRunningForTesting>();
+ type == OverrideType::kTakeOverThread
+ ? nullptr
+ : std::make_unique<RunLoop::ScopedDisallowRunningForTesting>();
return ScopedClosureRunner(base::Bind(
[](scoped_refptr<SingleThreadTaskRunner> task_runner_to_restore,
diff --git a/chromium/base/threading/thread_task_runner_handle.h b/chromium/base/threading/thread_task_runner_handle.h
index 480a03c9e12..2e41eb0ef3a 100644
--- a/chromium/base/threading/thread_task_runner_handle.h
+++ b/chromium/base/threading/thread_task_runner_handle.h
@@ -7,6 +7,7 @@
#include "base/base_export.h"
#include "base/callback_helpers.h"
+#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/single_thread_task_runner.h"
@@ -34,9 +35,19 @@ class BASE_EXPORT ThreadTaskRunnerHandle {
// ScopedClosureRunners expire in LIFO (stack) order. Note: nesting
// ThreadTaskRunnerHandles isn't generally desired but it's useful in unit
// tests where multiple task runners can share the main thread for simplicity
- // and determinism.
+ // and determinism (in which case RunLoop::Run() is banned for the scope of
+ // the override as it would execute tasks from the wrong task runner). It's
+ // also useful in unit test frameworks in which a task runner takes over the
+ // main thread; in that case it's fine to allow running through
+ // |type = kTakeOverThread| iff RunLoop::Run() will result in running tasks
+ // posted to the overriding ThreadTaskRunnerHandle.
+ enum class OverrideType {
+ kDefault,
+ kTakeOverThread,
+ };
static ScopedClosureRunner OverrideForTesting(
- scoped_refptr<SingleThreadTaskRunner> overriding_task_runner);
+ scoped_refptr<SingleThreadTaskRunner> overriding_task_runner,
+ OverrideType type = OverrideType::kDefault) WARN_UNUSED_RESULT;
// Binds |task_runner| to the current thread. |task_runner| must belong
// to the current thread for this to succeed.
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index 52be6aa8adb..2f888d1ed46 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -135,6 +135,15 @@ std::ostream& operator<<(std::ostream& os, TimeDelta time_delta) {
// Time -----------------------------------------------------------------------
// static
+Time Time::FromDeltaSinceWindowsEpoch(TimeDelta delta) {
+ return Time(delta.InMicroseconds());
+}
+
+TimeDelta Time::ToDeltaSinceWindowsEpoch() const {
+ return TimeDelta::FromMicroseconds(us_);
+}
+
+// static
Time Time::FromTimeT(time_t tt) {
if (tt == 0)
return Time(); // Preserve 0 so we can tell it doesn't exist.
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index c1e25134222..fd9afa244dd 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -15,7 +15,7 @@
//
// TimeTicks and ThreadTicks represent an abstract time that is most of the time
// incrementing, for use in measuring time durations. Internally, they are
-// represented in microseconds. They can not be converted to a human-readable
+// represented in microseconds. They cannot be converted to a human-readable
// time, but are guaranteed not to decrease (unlike the Time class). Note that
// TimeTicks may "stand still" (e.g., if the computer is suspended), and
// ThreadTicks will "stand still" whenever the thread has been de-scheduled by
@@ -33,11 +33,11 @@
//
// So many choices! Which time class should you use? Examples:
//
-// Time: Interpreting the wall-clock time provided by a remote
-// system. Detecting whether cached resources have
-// expired. Providing the user with a display of the current date
-// and time. Determining the amount of time between events across
-// re-boots of the machine.
+// Time: Interpreting the wall-clock time provided by a remote system.
+// Detecting whether cached resources have expired. Providing the
+// user with a display of the current date and time. Determining
+// the amount of time between events across re-boots of the
+// machine.
//
// TimeTicks: Tracking the amount of time a task runs. Executing delayed
// tasks at the right time. Computing presentation timestamps.
@@ -79,10 +79,8 @@
#endif
#if defined(OS_WIN)
-// For FILETIME in FromFileTime, until it moves to a new converter class.
-// See TODO(iyengar) below.
-#include <windows.h>
#include "base/gtest_prod_util.h"
+#include "base/win/windows_types.h"
#endif
namespace base {
@@ -359,7 +357,8 @@ class TimeBase {
// use this and do arithmetic on it, as it is more error prone than using the
// provided operators.
//
- // DEPRECATED - Do not use in new code. http://crbug.com/634507
+ // DEPRECATED - Do not use in new code. For serializing Time values, prefer
+ // Time::ToDeltaSinceWindowsEpoch().InMicroseconds(). http://crbug.com/634507
int64_t ToInternalValue() const { return us_; }
// The amount of time since the origin (or "zero") point. This is a syntactic
@@ -416,12 +415,13 @@ class TimeBase {
return us_ >= other.us_;
}
- // Converts an integer value representing TimeClass to a class. This is used
- // when deserializing a |TimeClass| structure, using a value known to be
+ // Converts an integer value representing TimeClass to a class. This may be
+ // used when deserializing a |TimeClass| structure, using a value known to be
// compatible. It is not provided as a constructor because the integer type
// may be unclear from the perspective of a caller.
//
- // DEPRECATED - Do not use in new code. http://crbug.com/634507
+ // DEPRECATED - Do not use in new code. For deserializing Time values, prefer
+ // Time::FromDeltaSinceWindowsEpoch(). http://crbug.com/634507
static TimeClass FromInternalValue(int64_t us) { return TimeClass(us); }
protected:
@@ -507,8 +507,7 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
};
// Contains the NULL time. Use Time::Now() to get the current time.
- Time() : TimeBase(0) {
- }
+ constexpr Time() : TimeBase(0) {}
// Returns the time for epoch in Unix-like system (Jan 1, 1970).
static Time UnixEpoch();
@@ -524,6 +523,20 @@ class BASE_EXPORT Time : public time_internal::TimeBase<Time> {
// For timing sensitive unittests, this function should be used.
static Time NowFromSystemTime();
+ // Converts to/from TimeDeltas relative to the Windows epoch (1601-01-01
+ // 00:00:00 UTC). Prefer these methods for opaque serialization and
+ // deserialization of time values, e.g.
+ //
+ // // Serialization:
+ // base::Time last_updated = ...;
+ // SaveToDatabase(last_updated.ToDeltaSinceWindowsEpoch().InMicroseconds());
+ //
+ // // Deserialization:
+ // base::Time last_updated = base::Time::FromDeltaSinceWindowsEpoch(
+ // base::TimeDelta::FromMicroseconds(LoadFromDatabase()));
+ static Time FromDeltaSinceWindowsEpoch(TimeDelta delta);
+ TimeDelta ToDeltaSinceWindowsEpoch() const;
+
// Converts to/from time_t in UTC and a Time class.
static Time FromTimeT(time_t tt);
time_t ToTimeT() const;
diff --git a/chromium/base/time/time_unittest.cc b/chromium/base/time/time_unittest.cc
index 28eff7aa6bb..4f73535a3f7 100644
--- a/chromium/base/time/time_unittest.cc
+++ b/chromium/base/time/time_unittest.cc
@@ -19,6 +19,8 @@
#if defined(OS_IOS)
#include "base/ios/ios_util.h"
+#elif defined(OS_WIN)
+#include <windows.h>
#endif
namespace base {
@@ -115,6 +117,26 @@ class TimeTest : public testing::Test {
Time comparison_time_pdt_;
};
+// Test conversion to/from TimeDeltas elapsed since the Windows epoch.
+// Conversions should be idempotent and non-lossy.
+TEST_F(TimeTest, DeltaSinceWindowsEpoch) {
+ const TimeDelta delta = TimeDelta::FromMicroseconds(123);
+ EXPECT_EQ(delta,
+ Time::FromDeltaSinceWindowsEpoch(delta).ToDeltaSinceWindowsEpoch());
+
+ const Time now = Time::Now();
+ const Time actual =
+ Time::FromDeltaSinceWindowsEpoch(now.ToDeltaSinceWindowsEpoch());
+ EXPECT_EQ(now, actual);
+
+ // Null times should remain null after a round-trip conversion. This is an
+ // important invariant for the common use case of serialization +
+ // deserialization.
+ const Time should_be_null =
+ Time::FromDeltaSinceWindowsEpoch(Time().ToDeltaSinceWindowsEpoch());
+ EXPECT_TRUE(should_be_null.is_null());
+}
+
// Test conversion to/from time_t.
TEST_F(TimeTest, TimeT) {
EXPECT_EQ(10, Time().FromTimeT(10).ToTimeT());
diff --git a/chromium/base/time/time_win_unittest.cc b/chromium/base/time/time_win_unittest.cc
index d8ee7e38388..62143f5629b 100644
--- a/chromium/base/time/time_win_unittest.cc
+++ b/chromium/base/time/time_win_unittest.cc
@@ -277,9 +277,9 @@ TEST(TimeTicks, FromQPCValue) {
// Test that the conversions using FromQPCValue() match those computed here
// using simple floating-point arithmetic. The floating-point math provides
- // enough precision to confirm the implementation is correct to the
- // microsecond for all |test_cases| (though it would be insufficient to
- // confirm many "very large" tick values which are not being tested here).
+ // enough precision for all reasonable values to confirm that the
+ // implementation is correct to the microsecond, and for "very large" values
+ // it confirms that the answer is very close to correct.
for (int64_t ticks : test_cases) {
const double expected_microseconds_since_origin =
(static_cast<double>(ticks) * Time::kMicrosecondsPerSecond) /
@@ -287,9 +287,18 @@ TEST(TimeTicks, FromQPCValue) {
const TimeTicks converted_value = TimeTicks::FromQPCValue(ticks);
const double converted_microseconds_since_origin =
static_cast<double>((converted_value - TimeTicks()).InMicroseconds());
+ // When we test with very large numbers we end up in a range where adjacent
+ // double values are far apart - 512.0 apart in one test failure. In that
+ // situation it makes no sense for our epsilon to be 1.0 - it should be
+ // the difference between adjacent doubles.
+ double epsilon = nextafter(expected_microseconds_since_origin, INFINITY) -
+ expected_microseconds_since_origin;
+ // Epsilon must be at least 1.0 because converted_microseconds_since_origin
+ // comes from an integral value and the rounding is not perfect.
+ if (epsilon < 1.0)
+ epsilon = 1.0;
EXPECT_NEAR(expected_microseconds_since_origin,
- converted_microseconds_since_origin,
- 1.0)
+ converted_microseconds_since_origin, epsilon)
<< "ticks=" << ticks << ", to be converted via logic path: "
<< (ticks < Time::kQPCOverflowThreshold ? "FAST" : "SAFE");
}
diff --git a/chromium/base/tools_sanity_unittest.cc b/chromium/base/tools_sanity_unittest.cc
index 5c41bd74f29..a01d07bd82e 100644
--- a/chromium/base/tools_sanity_unittest.cc
+++ b/chromium/base/tools_sanity_unittest.cc
@@ -41,14 +41,15 @@ if (debug::IsBinaryInstrumented()) { EXPECT_DEATH(action, \
#define HARMFUL_ACCESS(action,error_regexp) EXPECT_DEATH(action,error_regexp)
#endif // !OS_IOS && !SYZYASAN
#else
-#define HARMFUL_ACCESS(action,error_regexp) \
-do { if (RunningOnValgrind()) { action; } } while (0)
+#define HARMFUL_ACCESS(action, error_regexp)
+#define HARMFUL_ACCESS_IS_NOOP
#endif
void DoReadUninitializedValue(char *ptr) {
// Comparison with 64 is to prevent clang from optimizing away the
// jump -- valgrind only catches jumps and conditional moves, but clang uses
- // the borrow flag if the condition is just `*ptr == '\0'`.
+ // the borrow flag if the condition is just `*ptr == '\0'`. We no longer
+ // support valgrind, but this constant should be fine to keep as-is.
if (*ptr == 64) {
VLOG(1) << "Uninit condition is true";
} else {
@@ -65,6 +66,7 @@ void ReadUninitializedValue(char *ptr) {
#endif
}
+#ifndef HARMFUL_ACCESS_IS_NOOP
void ReadValueOutOfArrayBoundsLeft(char *ptr) {
char c = ptr[-2];
VLOG(1) << "Reading a byte out of bounds: " << c;
@@ -75,15 +77,14 @@ void ReadValueOutOfArrayBoundsRight(char *ptr, size_t size) {
VLOG(1) << "Reading a byte out of bounds: " << c;
}
-// This is harmless if you run it under Valgrind thanks to redzones.
void WriteValueOutOfArrayBoundsLeft(char *ptr) {
ptr[-1] = kMagicValue;
}
-// This is harmless if you run it under Valgrind thanks to redzones.
void WriteValueOutOfArrayBoundsRight(char *ptr, size_t size) {
ptr[size] = kMagicValue;
}
+#endif // HARMFUL_ACCESS_IS_NOOP
void MakeSomeErrors(char *ptr, size_t size) {
ReadUninitializedValue(ptr);
@@ -149,44 +150,37 @@ TEST(ToolsSanityTest, MAYBE_AccessesToMallocMemory) {
HARMFUL_ACCESS(foo[5] = 0, "heap-use-after-free");
}
+#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
+
static int* allocateArray() {
// Clang warns about the mismatched new[]/delete if they occur in the same
// function.
return new int[10];
}
+// This test may corrupt memory if not compiled with AddressSanitizer.
TEST(ToolsSanityTest, MAYBE_ArrayDeletedWithoutBraces) {
-#if !defined(ADDRESS_SANITIZER) && !defined(SYZYASAN)
- // This test may corrupt memory if not run under Valgrind or compiled with
- // AddressSanitizer.
- if (!RunningOnValgrind())
- return;
-#endif
-
// Without the |volatile|, clang optimizes away the next two lines.
int* volatile foo = allocateArray();
delete foo;
}
+#endif
+#if defined(ADDRESS_SANITIZER)
static int* allocateScalar() {
// Clang warns about the mismatched new/delete[] if they occur in the same
// function.
return new int;
}
+// This test may corrupt memory if not compiled with AddressSanitizer.
TEST(ToolsSanityTest, MAYBE_SingleElementDeletedWithBraces) {
-#if !defined(ADDRESS_SANITIZER)
- // This test may corrupt memory if not run under Valgrind or compiled with
- // AddressSanitizer.
- if (!RunningOnValgrind())
- return;
-#endif
-
// Without the |volatile|, clang optimizes away the next two lines.
int* volatile foo = allocateScalar();
(void) foo;
delete [] foo;
}
+#endif
#if defined(ADDRESS_SANITIZER) || defined(SYZYASAN)
@@ -221,6 +215,7 @@ TEST(ToolsSanityTest, DISABLED_AddressSanitizerGlobalOOBCrashTest) {
*access = 43;
}
+#ifndef HARMFUL_ACCESS_IS_NOOP
TEST(ToolsSanityTest, AsanHeapOverflow) {
HARMFUL_ACCESS(debug::AsanHeapOverflow() ,"to the right");
}
@@ -233,7 +228,7 @@ TEST(ToolsSanityTest, AsanHeapUseAfterFree) {
HARMFUL_ACCESS(debug::AsanHeapUseAfterFree(), "heap-use-after-free");
}
-#if defined(SYZYASAN)
+#if defined(SYZYASAN) && defined(COMPILER_MSVC)
TEST(ToolsSanityTest, AsanCorruptHeapBlock) {
HARMFUL_ACCESS(debug::AsanCorruptHeapBlock(), "");
}
@@ -243,7 +238,8 @@ TEST(ToolsSanityTest, AsanCorruptHeap) {
// particular string to look for in the stack trace.
EXPECT_DEATH(debug::AsanCorruptHeap(), "");
}
-#endif // SYZYASAN
+#endif // SYZYASAN && COMPILER_MSVC
+#endif // !HARMFUL_ACCESS_IS_NOOP
#endif // ADDRESS_SANITIZER || SYZYASAN
@@ -423,4 +419,12 @@ TEST(ToolsSanityTest, BadUnrelatedCast) {
#endif // CFI_ERROR_MSG
+#undef CFI_ERROR_MSG
+#undef MAYBE_AccessesToNewMemory
+#undef MAYBE_AccessesToMallocMemory
+#undef MAYBE_ArrayDeletedWithoutBraces
+#undef MAYBE_SingleElementDeletedWithBraces
+#undef HARMFUL_ACCESS
+#undef HARMFUL_ACCESS_IS_NOOP
+
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 1e1b84bf8a6..5468528efb2 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -60,8 +60,7 @@ void DoGlobalDumpWithoutCallback(
MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail) {
- GlobalMemoryDumpRequestArgs args{dump_type, level_of_detail};
- global_dump_fn.Run(args);
+ global_dump_fn.Run(dump_type, level_of_detail);
}
// Proxy class which wraps a ConvertableToTraceFormat owned by the
@@ -449,8 +448,8 @@ void MemoryDumpManager::UnregisterDumpProviderInternal(
if (take_mdp_ownership_and_delete_async) {
// The MDP will be deleted whenever the MDPInfo struct will, that is either:
// - At the end of this function, if no dump is in progress.
- // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
- // removed from |pending_dump_providers|.
+ // - In ContinueAsyncProcessDump() when MDPInfo is removed from
+ // |pending_dump_providers|.
// - When the provider is removed from other clients (MemoryPeakDetector).
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
@@ -565,192 +564,164 @@ void MemoryDumpManager::CreateProcessDump(
// Start the process dump. This involves task runner hops as specified by the
// MemoryDumpProvider(s) in RegisterDumpProvider()).
- SetupNextMemoryDump(std::move(pmd_async_state));
-}
-
-// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
-// PostTask is always required for a generic SequencedTaskRunner to ensure that
-// no other task is running on it concurrently. SetupNextMemoryDump() and
-// InvokeOnMemoryDump() are called alternatively which linearizes the dump
-// provider's OnMemoryDump invocations.
-// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
-// active at any time for a given PMD, regardless of status of the |lock_|.
-// |lock_| is used in these functions purely to ensure consistency w.r.t.
-// (un)registrations of |dump_providers_|.
-void MemoryDumpManager::SetupNextMemoryDump(
- std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
+ ContinueAsyncProcessDump(pmd_async_state.release());
+}
+
+// Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
+// on the current sequenced task runner. If the next MDP does not run in current
+// sequenced task runner, then switches to that task runner and continues. All
+// OnMemoryDump() invocations are linearized. |lock_| is used in these functions
+// purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
+void MemoryDumpManager::ContinueAsyncProcessDump(
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
HEAP_PROFILER_SCOPED_IGNORE;
// Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
// in the PostTask below don't end up registering their own dump providers
// (for discounting trace memory overhead) while holding the |lock_|.
TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
- if (pmd_async_state->pending_dump_providers.empty())
- return FinishAsyncProcessDump(std::move(pmd_async_state));
+ // In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
+ // why it isn't is because of the corner case logic of |did_post_task|
+ // above, which needs to take back the ownership of the |pmd_async_state| when
+ // the PostTask() fails.
+ // Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
+ // to prevent accidental leaks. Using a unique_ptr would prevent us to to
+ // skip the hop and move on. Hence the manual naked -> unique ptr juggling.
+ auto pmd_async_state = WrapUnique(owned_pmd_async_state);
+ owned_pmd_async_state = nullptr;
- // Read MemoryDumpProviderInfo thread safety considerations in
- // memory_dump_manager.h when accessing |mdpinfo| fields.
- MemoryDumpProviderInfo* mdpinfo =
- pmd_async_state->pending_dump_providers.back().get();
+ while (!pmd_async_state->pending_dump_providers.empty()) {
+ // Read MemoryDumpProviderInfo thread safety considerations in
+ // memory_dump_manager.h when accessing |mdpinfo| fields.
+ MemoryDumpProviderInfo* mdpinfo =
+ pmd_async_state->pending_dump_providers.back().get();
- // If we are in background tracing, we should invoke only the whitelisted
- // providers. Ignore other providers and continue.
- if (pmd_async_state->req_args.level_of_detail ==
- MemoryDumpLevelOfDetail::BACKGROUND) {
- // TODO(ssid): This is a temporary hack to fix crashes
- // https://crbug.com/797784. We could still cause stack overflow in a
- // detailed mode dump or when there are lot of providers whitelisted.
- while (!mdpinfo->whitelisted_for_background_mode) {
+ if (!IsDumpProviderAllowedToDump(pmd_async_state->req_args, *mdpinfo)) {
pmd_async_state->pending_dump_providers.pop_back();
- if (pmd_async_state->pending_dump_providers.empty())
- return FinishAsyncProcessDump(std::move(pmd_async_state));
- mdpinfo = pmd_async_state->pending_dump_providers.back().get();
+ continue;
}
- }
- // If we are in summary mode, we only need to invoke the providers
- // whitelisted for summary mode.
- if (pmd_async_state->req_args.dump_type == MemoryDumpType::SUMMARY_ONLY) {
- // TODO(ssid): This is a temporary hack to fix crashes
- // https://crbug.com/797784. We could still cause stack overflow in a
- // detailed mode dump or when there are lot of providers whitelisted. It is
- // assumed here that a provider whitelisted for summary mode is also
- // whitelisted for background mode and skip the check.
- while (!mdpinfo->whitelisted_for_summary_mode) {
+ // If the dump provider did not specify a task runner affinity, dump on
+ // |dump_thread_|.
+ scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
+ if (!task_runner) {
+ DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
+ task_runner = pmd_async_state->dump_thread_task_runner;
+ DCHECK(task_runner);
+ }
+
+ // If |RunsTasksInCurrentSequence()| is true then no PostTask is
+ // required since we are on the right SequencedTaskRunner.
+ if (task_runner->RunsTasksInCurrentSequence()) {
+ InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
pmd_async_state->pending_dump_providers.pop_back();
- if (pmd_async_state->pending_dump_providers.empty())
- return FinishAsyncProcessDump(std::move(pmd_async_state));
- mdpinfo = pmd_async_state->pending_dump_providers.back().get();
+ continue;
}
- }
- // If the dump provider did not specify a task runner affinity, dump on
- // |dump_thread_|.
- scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
- if (!task_runner) {
- DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
- task_runner = pmd_async_state->dump_thread_task_runner;
- DCHECK(task_runner);
- }
+ bool did_post_task = task_runner->PostTask(
+ FROM_HERE,
+ BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
+ Unretained(pmd_async_state.get())));
- if (mdpinfo->options.dumps_on_single_thread_task_runner &&
- task_runner->RunsTasksInCurrentSequence()) {
- // If |dumps_on_single_thread_task_runner| is true then no PostTask is
- // required if we are on the right thread.
- return InvokeOnMemoryDump(pmd_async_state.release());
- }
+ if (did_post_task) {
+ // Ownership is tranferred to the posted task.
+ ignore_result(pmd_async_state.release());
+ return;
+ }
- bool did_post_task = task_runner->PostTask(
- FROM_HERE, BindOnce(&MemoryDumpManager::InvokeOnMemoryDump,
- Unretained(this), Unretained(pmd_async_state.get())));
+ // PostTask usually fails only if the process or thread is shut down. So,
+ // the dump provider is disabled here. But, don't disable unbound dump
+ // providers, since the |dump_thread_| is controlled by MDM.
+ if (mdpinfo->task_runner) {
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
+ mdpinfo->disabled = true;
+ }
- if (did_post_task) {
- // Ownership is tranferred to InvokeOnMemoryDump().
- ignore_result(pmd_async_state.release());
- return;
+ // PostTask failed. Ignore the dump provider and continue.
+ pmd_async_state->pending_dump_providers.pop_back();
}
- // PostTask usually fails only if the process or thread is shut down. So, the
- // dump provider is disabled here. But, don't disable unbound dump providers.
- // The utility thread is normally shutdown when disabling the trace and
- // getting here in this case is expected.
- if (mdpinfo->task_runner) {
- DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
- << "\". Failed to post task on the task runner provided.";
+ FinishAsyncProcessDump(std::move(pmd_async_state));
+}
- // A locked access is required to R/W |disabled| (for the
- // UnregisterAndDeleteDumpProviderSoon() case).
- AutoLock lock(lock_);
- mdpinfo->disabled = true;
+bool MemoryDumpManager::IsDumpProviderAllowedToDump(
+ const MemoryDumpRequestArgs& req_args,
+ const MemoryDumpProviderInfo& mdpinfo) const {
+ // If we are in background tracing, we should invoke only the whitelisted
+ // providers. Ignore other providers and continue.
+ if (req_args.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
+ !mdpinfo.whitelisted_for_background_mode) {
+ return false;
}
- // PostTask failed. Ignore the dump provider and continue.
- pmd_async_state->pending_dump_providers.pop_back();
- SetupNextMemoryDump(std::move(pmd_async_state));
+ // If we are in summary mode, we only need to invoke the providers
+ // whitelisted for summary mode.
+ if (req_args.dump_type == MemoryDumpType::SUMMARY_ONLY &&
+ !mdpinfo.whitelisted_for_summary_mode) {
+ return false;
+ }
+
+ return true;
}
// This function is called on the right task runner for current MDP. It is
// either the task runner specified by MDP or |dump_thread_task_runner| if the
// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
// (unless disabled).
-void MemoryDumpManager::InvokeOnMemoryDump(
- ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
+ ProcessMemoryDump* pmd) {
HEAP_PROFILER_SCOPED_IGNORE;
- // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
- // why it isn't is because of the corner case logic of |did_post_task|
- // above, which needs to take back the ownership of the |pmd_async_state| when
- // the PostTask() fails.
- // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
- // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
- // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
- auto pmd_async_state = WrapUnique(owned_pmd_async_state);
- owned_pmd_async_state = nullptr;
-
- // Read MemoryDumpProviderInfo thread safety considerations in
- // memory_dump_manager.h when accessing |mdpinfo| fields.
- MemoryDumpProviderInfo* mdpinfo =
- pmd_async_state->pending_dump_providers.back().get();
-
DCHECK(!mdpinfo->task_runner ||
mdpinfo->task_runner->RunsTasksInCurrentSequence());
- // Limit the scope of the TRACE_EVENT1 below to not include the
- // SetupNextMemoryDump(). Don't replace with a BEGIN/END pair or change the
- // event name, as the slow-reports pipeline relies on this event.
- {
- TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
- "dump_provider.name", mdpinfo->name);
+ TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
+ "dump_provider.name", mdpinfo->name);
- // Do not add any other TRACE_EVENT macro (or function that might have them)
- // below this point. Under some rare circunstances, they can re-initialize
- // and invalide the current ThreadLocalEventBuffer MDP, making the
- // |should_dump| check below susceptible to TOCTTOU bugs (crbug.com/763365).
+ // Do not add any other TRACE_EVENT macro (or function that might have them)
+ // below this point. Under some rare circunstances, they can re-initialize
+ // and invalide the current ThreadLocalEventBuffer MDP, making the
+ // |should_dump| check below susceptible to TOCTTOU bugs
+ // (https://crbug.com/763365).
- bool should_dump;
- bool is_thread_bound;
- {
- // A locked access is required to R/W |disabled| (for the
- // UnregisterAndDeleteDumpProviderSoon() case).
- AutoLock lock(lock_);
+ bool is_thread_bound;
+ {
+ // A locked access is required to R/W |disabled| (for the
+ // UnregisterAndDeleteDumpProviderSoon() case).
+ AutoLock lock(lock_);
- // Unregister the dump provider if it failed too many times consecutively.
- if (!mdpinfo->disabled &&
- mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
- mdpinfo->disabled = true;
- LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
- << "\". Dump failed multiple times consecutively.";
- }
- should_dump = !mdpinfo->disabled;
- is_thread_bound = mdpinfo->task_runner != nullptr;
- } // AutoLock lock(lock_);
-
- if (should_dump) {
- // Invoke the dump provider.
-
- // A stack allocated string with dump provider name is useful to debug
- // crashes while invoking dump after a |dump_provider| is not unregistered
- // in safe way.
- // TODO(ssid): Remove this after fixing crbug.com/643438.
- char provider_name_for_debugging[16];
- strncpy(provider_name_for_debugging, mdpinfo->name,
- sizeof(provider_name_for_debugging) - 1);
- provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] =
- '\0';
- base::debug::Alias(provider_name_for_debugging);
-
- ProcessMemoryDump* pmd = pmd_async_state->process_memory_dump.get();
- ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
- CHECK(!is_thread_bound ||
- !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
- bool dump_successful =
- mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
- mdpinfo->consecutive_failures =
- dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
+ // Unregister the dump provider if it failed too many times consecutively.
+ if (!mdpinfo->disabled &&
+ mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+ mdpinfo->disabled = true;
+ DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+ << "\". Dump failed multiple times consecutively.";
}
- }
+ if (mdpinfo->disabled)
+ return;
- pmd_async_state->pending_dump_providers.pop_back();
- SetupNextMemoryDump(std::move(pmd_async_state));
+ is_thread_bound = mdpinfo->task_runner != nullptr;
+ } // AutoLock lock(lock_);
+
+ // Invoke the dump provider.
+
+ // A stack allocated string with dump provider name is useful to debug
+ // crashes while invoking dump after a |dump_provider| is not unregistered
+ // in safe way.
+ char provider_name_for_debugging[16];
+ strncpy(provider_name_for_debugging, mdpinfo->name,
+ sizeof(provider_name_for_debugging) - 1);
+ provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
+ base::debug::Alias(provider_name_for_debugging);
+
+ ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
+ CHECK(!is_thread_bound ||
+ !*(static_cast<volatile bool*>(&mdpinfo->disabled)));
+ bool dump_successful =
+ mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
+ mdpinfo->consecutive_failures =
+ dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
}
void MemoryDumpManager::FinishAsyncProcessDump(
diff --git a/chromium/base/trace_event/memory_dump_manager.h b/chromium/base/trace_event/memory_dump_manager.h
index 76fd239aeb9..593bfe05948 100644
--- a/chromium/base/trace_event/memory_dump_manager.h
+++ b/chromium/base/trace_event/memory_dump_manager.h
@@ -50,7 +50,7 @@ enum HeapProfilingMode {
class BASE_EXPORT MemoryDumpManager {
public:
using RequestGlobalDumpFunction =
- RepeatingCallback<void(const GlobalMemoryDumpRequestArgs& args)>;
+ RepeatingCallback<void(MemoryDumpType, MemoryDumpLevelOfDetail)>;
static const char* const kTraceCategory;
@@ -180,6 +180,8 @@ class BASE_EXPORT MemoryDumpManager {
friend std::default_delete<MemoryDumpManager>; // For the testing instance.
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerTest;
+ FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest,
+ NoStackOverflowWithTooManyMDPs);
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfill an asynchronous CreateProcessDump()
@@ -240,22 +242,27 @@ class BASE_EXPORT MemoryDumpManager {
static void SetInstanceForTesting(MemoryDumpManager* instance);
static uint32_t GetDumpsSumKb(const std::string&, const ProcessMemoryDump*);
- void FinishAsyncProcessDump(
- std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
-
// Lazily initializes dump_thread_ and returns its TaskRunner.
scoped_refptr<base::SequencedTaskRunner> GetOrCreateBgTaskRunnerLocked();
- // Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
- // the MDP while registration. On failure to do so, skips and continues to
- // next MDP.
- void SetupNextMemoryDump(
- std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+ // Calls InvokeOnMemoryDump() for the each MDP that belongs to the current
+ // task runner and switches to the task runner of the next MDP. Handles
+ // failures in MDP and thread hops, and always calls FinishAsyncProcessDump()
+ // at the end.
+ void ContinueAsyncProcessDump(
+ ProcessMemoryDumpAsyncState* owned_pmd_async_state);
- // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
- // the end to continue the ProcessMemoryDump. Should be called on the MDP task
+ // Returns true if the given dump type and mode allows the given MDP to dump.
+ bool IsDumpProviderAllowedToDump(const MemoryDumpRequestArgs& req_args,
+ const MemoryDumpProviderInfo& mdpinfo) const;
+
+ // Invokes OnMemoryDump() of the given MDP. Should be called on the MDP task
// runner.
- void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+ void InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
+ ProcessMemoryDump* pmd);
+
+ void FinishAsyncProcessDump(
+ std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
diff --git a/chromium/base/trace_event/memory_dump_manager_test_utils.h b/chromium/base/trace_event/memory_dump_manager_test_utils.h
index 032ef844afb..413017f6c0a 100644
--- a/chromium/base/trace_event/memory_dump_manager_test_utils.h
+++ b/chromium/base/trace_event/memory_dump_manager_test_utils.h
@@ -13,9 +13,10 @@ namespace base {
namespace trace_event {
void RequestGlobalDumpForInProcessTesting(
- const GlobalMemoryDumpRequestArgs& args) {
- MemoryDumpRequestArgs local_args = {0 /* dump_guid */, args.dump_type,
- args.level_of_detail};
+ base::trace_event::MemoryDumpType dump_type,
+ base::trace_event::MemoryDumpLevelOfDetail level_of_detail) {
+ MemoryDumpRequestArgs local_args = {0 /* dump_guid */, dump_type,
+ level_of_detail};
MemoryDumpManager::GetInstance()->CreateProcessDump(
local_args, ProcessMemoryDumpCallback());
};
diff --git a/chromium/base/trace_event/memory_dump_manager_unittest.cc b/chromium/base/trace_event/memory_dump_manager_unittest.cc
index 22cee6d65ce..a7361de46c4 100644
--- a/chromium/base/trace_event/memory_dump_manager_unittest.cc
+++ b/chromium/base/trace_event/memory_dump_manager_unittest.cc
@@ -527,16 +527,14 @@ TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
task_runner1->set_enabled(false);
EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED));
- // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
- // to same task runner.
EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
- EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+ EXPECT_EQ(1u, task_runner2->no_of_post_tasks());
task_runner1->set_enabled(true);
EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
MemoryDumpLevelOfDetail::DETAILED));
EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
- EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
+ EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
DisableTracing();
}
@@ -1026,5 +1024,67 @@ TEST_F(MemoryDumpManagerTest, EnableHeapProfilingIfNeededUnsupported) {
#endif // BUILDFLAG(USE_ALLOCATOR_SHIM) && !defined(OS_NACL)
}
+// Mock MDP class that tests if the number of OnMemoryDump() calls are expected.
+// It is implemented without gmocks since EXPECT_CALL implementation is slow
+// when there are 1000s of instances, as required in
+// NoStackOverflowWithTooManyMDPs test.
+class SimpleMockMemoryDumpProvider : public MemoryDumpProvider {
+ public:
+ SimpleMockMemoryDumpProvider(int expected_num_dump_calls)
+ : expected_num_dump_calls_(expected_num_dump_calls), num_dump_calls_(0) {}
+
+ ~SimpleMockMemoryDumpProvider() override {
+ EXPECT_EQ(expected_num_dump_calls_, num_dump_calls_);
+ }
+
+ bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) override {
+ ++num_dump_calls_;
+ return true;
+ }
+
+ private:
+ int expected_num_dump_calls_;
+ int num_dump_calls_;
+};
+
+TEST_F(MemoryDumpManagerTest, NoStackOverflowWithTooManyMDPs) {
+ InitializeMemoryDumpManagerForInProcessTesting(false /* is_coordinator */);
+ SetDumpProviderWhitelistForTesting(kTestMDPWhitelist);
+ SetDumpProviderSummaryWhitelistForTesting(kTestMDPWhitelistForSummary);
+
+ int kMDPCount = 1000;
+ std::vector<std::unique_ptr<SimpleMockMemoryDumpProvider>> mdps;
+ for (int i = 0; i < kMDPCount; ++i) {
+ mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(1));
+ RegisterDumpProvider(mdps.back().get(), nullptr);
+ }
+ for (int i = 0; i < kMDPCount; ++i) {
+ mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(2));
+ RegisterDumpProvider(mdps.back().get(), nullptr, kDefaultOptions,
+ kBackgroundButNotSummaryWhitelistedMDPName);
+ }
+ for (int i = 0; i < kMDPCount; ++i) {
+ mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(3));
+ RegisterDumpProvider(mdps.back().get(), nullptr, kDefaultOptions,
+ kWhitelistedMDPName);
+ }
+ std::unique_ptr<Thread> stopped_thread(new Thread("test thread"));
+ stopped_thread->Start();
+ for (int i = 0; i < kMDPCount; ++i) {
+ mdps.push_back(std::make_unique<SimpleMockMemoryDumpProvider>(0));
+ RegisterDumpProvider(mdps.back().get(), stopped_thread->task_runner(),
+ kDefaultOptions, kWhitelistedMDPName);
+ }
+ stopped_thread->Stop();
+
+ EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::DETAILED));
+ EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+ MemoryDumpLevelOfDetail::BACKGROUND));
+ EXPECT_TRUE(RequestProcessDumpAndWait(MemoryDumpType::SUMMARY_ONLY,
+ MemoryDumpLevelOfDetail::BACKGROUND));
+}
+
} // namespace trace_event
} // namespace base
diff --git a/chromium/base/trace_event/memory_dump_provider.h b/chromium/base/trace_event/memory_dump_provider.h
index 1f656b12689..b458bfb1ea5 100644
--- a/chromium/base/trace_event/memory_dump_provider.h
+++ b/chromium/base/trace_event/memory_dump_provider.h
@@ -40,7 +40,7 @@ class BASE_EXPORT MemoryDumpProvider {
bool supports_heap_profiling;
};
- virtual ~MemoryDumpProvider() {}
+ virtual ~MemoryDumpProvider() = default;
// Called by the MemoryDumpManager when generating memory dumps.
// The |args| specify if the embedder should generate light/heavy dumps on
@@ -70,7 +70,7 @@ class BASE_EXPORT MemoryDumpProvider {
virtual void SuspendFastMemoryPolling() {}
protected:
- MemoryDumpProvider() {}
+ MemoryDumpProvider() = default;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpProvider);
};
diff --git a/chromium/base/trace_event/memory_dump_request_args.h b/chromium/base/trace_event/memory_dump_request_args.h
index adf8d26e600..a3e9d0c9c7e 100644
--- a/chromium/base/trace_event/memory_dump_request_args.h
+++ b/chromium/base/trace_event/memory_dump_request_args.h
@@ -74,15 +74,6 @@ struct BASE_EXPORT MemoryDumpRequestArgs {
MemoryDumpLevelOfDetail level_of_detail;
};
-// Initial request arguments for a global memory dump. (see
-// MemoryDumpManager::RequestGlobalMemoryDump()). Keep this consistent with
-// memory_instrumentation.mojo and memory_instrumentation_struct_traits.{h,cc}
-// TODO(hjd): Move this to memory_instrumentation, crbug.com/776726
-struct BASE_EXPORT GlobalMemoryDumpRequestArgs {
- MemoryDumpType dump_type;
- MemoryDumpLevelOfDetail level_of_detail;
-};
-
// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
// providers. Dump providers are expected to read the args for creating dumps.
struct MemoryDumpArgs {
diff --git a/chromium/base/trace_event/memory_infra_background_whitelist.cc b/chromium/base/trace_event/memory_infra_background_whitelist.cc
index 60f08b52ccc..f2cf4cda9af 100644
--- a/chromium/base/trace_event/memory_infra_background_whitelist.cc
+++ b/chromium/base/trace_event/memory_infra_background_whitelist.cc
@@ -24,7 +24,6 @@ const char* const kDumpProviderWhitelist[] = {
"ClientDiscardableSharedMemoryManager",
"DOMStorage",
"DiscardableSharedMemoryManager",
- "DnsConfigServicePosix::HostsReader",
"gpu::BufferManager",
"gpu::RenderbufferManager",
"gpu::TextureManager",
@@ -101,7 +100,6 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"mojo/shared_buffer",
"mojo/unknown",
"mojo/watcher",
- "net/dns_config_service_posix_hosts_reader",
"net/http_network_session_0x?",
"net/http_network_session_0x?/quic_stream_factory",
"net/http_network_session_0x?/socket_pool",
@@ -179,6 +177,8 @@ const char* const kAllocatorDumpNameWhitelist[] = {
"skia/sk_resource_cache",
"sqlite",
"ui/resource_manager_0x?",
+ "v8/isolate_0x?/contexts/detached_context",
+ "v8/isolate_0x?/contexts/native_context",
"v8/isolate_0x?/heap_spaces",
"v8/isolate_0x?/heap_spaces/code_space",
"v8/isolate_0x?/heap_spaces/large_object_space",
@@ -276,13 +276,11 @@ bool IsMemoryAllocatorDumpNameWhitelisted(const std::string& name) {
return true;
}
- // As are shared memory dumps. Note: we skip the first character after the
- // slash and last character in the string as they are expected to be brackets.
- if (base::StartsWith(name, "shared_memory/(", CompareCase::SENSITIVE)) {
- for (size_t i = strlen("shared_memory/") + 1; i < name.size() - 1; i++)
+ if (base::StartsWith(name, "shared_memory/", CompareCase::SENSITIVE)) {
+ for (size_t i = strlen("shared_memory/"); i < name.size(); i++)
if (!base::IsHexDigit(name[i]))
return false;
- return name.back() == ')';
+ return true;
}
// Remove special characters, numbers (including hexadecimal which are marked
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 3d068afedbd..8bd89737040 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -29,6 +29,8 @@
#endif
#if defined(OS_WIN)
+#include <windows.h> // Must be in front of other Windows header files
+
#include <Psapi.h>
#endif
diff --git a/chromium/base/trace_event/process_memory_dump_unittest.cc b/chromium/base/trace_event/process_memory_dump_unittest.cc
index 0148588200e..e2b0e6f9c1a 100644
--- a/chromium/base/trace_event/process_memory_dump_unittest.cc
+++ b/chromium/base/trace_event/process_memory_dump_unittest.cc
@@ -18,6 +18,7 @@
#include "testing/gtest/include/gtest/gtest.h"
#if defined(OS_WIN)
+#include <windows.h>
#include "winbase.h"
#elif defined(OS_POSIX)
#include <sys/mman.h>
diff --git a/chromium/base/trace_event/trace_buffer.h b/chromium/base/trace_event/trace_buffer.h
index 4885a3c7c09..3d6465fdc35 100644
--- a/chromium/base/trace_event/trace_buffer.h
+++ b/chromium/base/trace_event/trace_buffer.h
@@ -57,7 +57,7 @@ class BASE_EXPORT TraceBufferChunk {
// TraceBuffer holds the events as they are collected.
class BASE_EXPORT TraceBuffer {
public:
- virtual ~TraceBuffer() {}
+ virtual ~TraceBuffer() = default;
virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
virtual void ReturnChunk(size_t index,
diff --git a/chromium/base/trace_event/trace_config.h b/chromium/base/trace_event/trace_config.h
index 2e7f6c0c826..54604892936 100644
--- a/chromium/base/trace_event/trace_config.h
+++ b/chromium/base/trace_event/trace_config.h
@@ -26,6 +26,8 @@ namespace trace_event {
class ConvertableToTraceFormat;
// Options determines how the trace buffer stores data.
+// A Java counterpart will be generated for this enum.
+// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
enum TraceRecordMode {
// Record until the trace buffer is full.
RECORD_UNTIL_FULL,
diff --git a/chromium/base/trace_event/trace_config_unittest.cc b/chromium/base/trace_event/trace_config_unittest.cc
index 30f82f95473..3cb6d61b7d2 100644
--- a/chromium/base/trace_event/trace_config_unittest.cc
+++ b/chromium/base/trace_event/trace_config_unittest.cc
@@ -653,8 +653,8 @@ TEST(TraceConfigTest, EmptyMemoryDumpConfigTest) {
tc.ToString());
EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
EXPECT_EQ(
- TraceConfig::MemoryDumpConfig::HeapProfiler ::
- kDefaultBreakdownThresholdBytes,
+ static_cast<uint32_t>(TraceConfig::MemoryDumpConfig::HeapProfiler::
+ kDefaultBreakdownThresholdBytes),
tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
@@ -664,8 +664,8 @@ TEST(TraceConfigTest, LegacyStringToMemoryDumpConfig) {
EXPECT_NE(std::string::npos, tc.ToString().find("memory_dump_config"));
EXPECT_EQ(0u, tc.memory_dump_config().triggers.size());
EXPECT_EQ(
- TraceConfig::MemoryDumpConfig::HeapProfiler ::
- kDefaultBreakdownThresholdBytes,
+ static_cast<uint32_t>(TraceConfig::MemoryDumpConfig::HeapProfiler::
+ kDefaultBreakdownThresholdBytes),
tc.memory_dump_config().heap_profiler_options.breakdown_threshold_bytes);
}
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc
index 74cfb7402b9..ff8d2ffdeab 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.cc
+++ b/chromium/base/trace_event/trace_event_etw_export_win.cc
@@ -15,6 +15,8 @@
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
+#include <windows.h>
+
// The GetProcAddress technique is borrowed from
// https://github.com/google/UIforETW/tree/master/ETWProviders
//
diff --git a/chromium/base/trace_event/trace_event_impl.h b/chromium/base/trace_event/trace_event_impl.h
index d682ebd3aea..58137f00c34 100644
--- a/chromium/base/trace_event/trace_event_impl.h
+++ b/chromium/base/trace_event/trace_event_impl.h
@@ -40,8 +40,8 @@ typedef base::Callback<bool(const char* category_group_name,
// class must implement this interface.
class BASE_EXPORT ConvertableToTraceFormat {
public:
- ConvertableToTraceFormat() {}
- virtual ~ConvertableToTraceFormat() {}
+ ConvertableToTraceFormat() = default;
+ virtual ~ConvertableToTraceFormat() = default;
// Append the class info to the provided |out| string. The appended
// data must be a valid JSON object. Strings must be properly quoted, and
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 625168f6417..5dc53780f2a 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -2338,7 +2338,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_one", &value));
- EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(value->is_double());
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(1, double_value);
@@ -2348,7 +2348,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_half", &value));
- EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(value->is_double());
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(0.5, double_value);
@@ -2358,7 +2358,7 @@ TEST_F(TraceEventTestFixture, PrimitiveArgs) {
dict->GetDictionary("args", &args_dict);
ASSERT_TRUE(args_dict);
EXPECT_TRUE(args_dict->Get("float_neghalf", &value));
- EXPECT_TRUE(value->IsType(Value::Type::DOUBLE));
+ EXPECT_TRUE(value->is_double());
EXPECT_TRUE(value->GetAsDouble(&double_value));
EXPECT_EQ(-0.5, double_value);
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index 92fccb93601..dc746c627ef 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -363,6 +363,15 @@ TraceLog::TraceLog()
SetProcessID(static_cast<int>(GetCurrentProcId()));
#endif
+// Linux renderer processes and Android O processes are not allowed to read
+// "proc/stat" file, crbug.com/788870.
+#if defined(OS_WIN) || (defined(OS_MACOSX) && !defined(OS_IOS))
+ process_creation_time_ = CurrentProcessInfo::CreationTime();
+#else
+ // Use approximate time when creation time is not available.
+ process_creation_time_ = Time::Now();
+#endif
+
logged_events_.reset(CreateTraceBuffer());
MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog",
@@ -962,12 +971,19 @@ void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
// This will flush the thread local buffer.
delete thread_local_event_buffer_.Get();
- AutoLock lock(lock_);
- if (!CheckGeneration(generation) || !flush_task_runner_ ||
- !thread_message_loops_.empty())
- return;
-
- flush_task_runner_->PostTask(
+ // Scheduler uses TRACE_EVENT macros when posting a task, which can lead
+ // to acquiring a tracing lock. Given that posting a task requires grabbing
+ // a scheduler lock, we need to post this task outside tracing lock to avoid
+ // deadlocks.
+ scoped_refptr<SingleThreadTaskRunner> cached_flush_task_runner;
+ {
+ AutoLock lock(lock_);
+ cached_flush_task_runner = flush_task_runner_;
+ if (!CheckGeneration(generation) || !flush_task_runner_ ||
+ !thread_message_loops_.empty())
+ return;
+ }
+ cached_flush_task_runner->PostTask(
FROM_HERE, BindOnce(&TraceLog::FinishFlush, Unretained(this), generation,
discard_events));
}
@@ -1476,17 +1492,10 @@ void TraceLog::AddMetadataEventsWhileLocked() {
current_thread_id, "process_name", "name", process_name_);
}
-// See https://crbug.com/726484 for Fuchsia.
-#if !defined(OS_NACL) && !defined(OS_IOS) && !defined(OS_FUCHSIA)
- Time process_creation_time = CurrentProcessInfo::CreationTime();
- if (!process_creation_time.is_null()) {
- TimeDelta process_uptime = Time::Now() - process_creation_time;
- InitializeMetadataEvent(
- AddEventToThreadSharedChunkWhileLocked(nullptr, false),
- current_thread_id, "process_uptime_seconds", "uptime",
- process_uptime.InSeconds());
- }
-#endif // !defined(OS_NACL) && !defined(OS_IOS) && !defined(OS_FUCHSIA)
+ TimeDelta process_uptime = Time::Now() - process_creation_time_;
+ InitializeMetadataEvent(
+ AddEventToThreadSharedChunkWhileLocked(nullptr, false), current_thread_id,
+ "process_uptime_seconds", "uptime", process_uptime.InSeconds());
if (!process_labels_.empty()) {
std::vector<base::StringPiece> labels;
@@ -1542,7 +1551,7 @@ TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
DCHECK(handle.chunk_seq);
DCHECK(handle.chunk_index <= TraceBufferChunk::kMaxChunkIndex);
- DCHECK(handle.event_index < TraceBufferChunk::kTraceBufferChunkSize);
+ DCHECK(handle.event_index <= TraceBufferChunk::kTraceBufferChunkSize - 1);
if (thread_local_event_buffer_.Get()) {
TraceEvent* trace_event =
diff --git a/chromium/base/trace_event/trace_log.h b/chromium/base/trace_event/trace_log.h
index 284298ccf6e..9abaf452ce8 100644
--- a/chromium/base/trace_event/trace_log.h
+++ b/chromium/base/trace_event/trace_log.h
@@ -466,6 +466,7 @@ class BASE_EXPORT TraceLog : public MemoryDumpProvider {
int process_sort_index_;
std::unordered_map<int, int> thread_sort_indices_;
std::unordered_map<int, std::string> thread_names_;
+ base::Time process_creation_time_;
// The following two maps are used only when ECHO_TO_CONSOLE.
std::unordered_map<int, base::stack<TimeTicks>> thread_event_start_times_;
diff --git a/chromium/base/tuple_unittest.cc b/chromium/base/tuple_unittest.cc
index 321c549bfcc..4b38797e0e7 100644
--- a/chromium/base/tuple_unittest.cc
+++ b/chromium/base/tuple_unittest.cc
@@ -96,7 +96,7 @@ TEST(TupleTest, Copying) {
// Creating the tuple should copy the class to store internally in the tuple.
std::tuple<CopyLogger, CopyLogger*, bool*> tuple(logger, &logger, &res);
- std::get<1>(tuple) = &std::get<0>(tuple);
+ std::get<CopyLogger*>(tuple) = &std::get<CopyLogger>(tuple);
EXPECT_EQ(2, CopyLogger::TimesConstructed);
EXPECT_EQ(1, CopyLogger::TimesCopied);
diff --git a/chromium/base/unguessable_token.cc b/chromium/base/unguessable_token.cc
index cd9830e6866..489bf62721e 100644
--- a/chromium/base/unguessable_token.cc
+++ b/chromium/base/unguessable_token.cc
@@ -14,7 +14,7 @@ UnguessableToken::UnguessableToken(uint64_t high, uint64_t low)
: high_(high), low_(low) {}
std::string UnguessableToken::ToString() const {
- return base::StringPrintf("(%08" PRIX64 "%08" PRIX64 ")", high_, low_);
+ return base::StringPrintf("%08" PRIX64 "%08" PRIX64, high_, low_);
}
// static
@@ -35,7 +35,7 @@ UnguessableToken UnguessableToken::Deserialize(uint64_t high, uint64_t low) {
}
std::ostream& operator<<(std::ostream& out, const UnguessableToken& token) {
- return out << token.ToString();
+ return out << "(" << token.ToString() << ")";
}
} // namespace base
diff --git a/chromium/base/unguessable_token.h b/chromium/base/unguessable_token.h
index 122541d1811..6858e22a4cf 100644
--- a/chromium/base/unguessable_token.h
+++ b/chromium/base/unguessable_token.h
@@ -19,11 +19,17 @@ namespace base {
struct UnguessableTokenHash;
// A UnguessableToken is an 128-bit token generated from a cryptographically
-// strong random source.
+// strong random source. It can be used as part of a larger aggregate type,
+// or as an ID in and of itself.
//
-// UnguessableToken should be used when a sensitive ID needs to be unguessable,
-// and is shared across processes. It can be used as part of a larger aggregate
-// type, or as an ID in and of itself.
+// UnguessableToken can be used to implement "Capability-Based Security".
+// In other words, UnguessableToken can be used when the resource associated
+// with the ID needs to be protected against manipulation by other untrusted
+// agents in the system, and there is no other convenient way to verify the
+// authority of the agent to do so (because the resource is part of a table
+// shared across processes, for instance). In such a scheme, knowledge of the
+// token value in and of itself is sufficient proof of authority to carry out
+// an operation against the associated resource.
//
// Use Create() for creating new UnguessableTokens.
//
@@ -61,6 +67,7 @@ class BASE_EXPORT UnguessableToken {
bool is_empty() const { return high_ == 0 && low_ == 0; }
+ // Hex representation of the unguessable token.
std::string ToString() const;
explicit operator bool() const { return !is_empty(); }
diff --git a/chromium/base/unguessable_token_unittest.cc b/chromium/base/unguessable_token_unittest.cc
index 0158257185c..287fca31a7f 100644
--- a/chromium/base/unguessable_token_unittest.cc
+++ b/chromium/base/unguessable_token_unittest.cc
@@ -80,13 +80,14 @@ TEST(UnguessableTokenTest, VerifyValueSerialization) {
TEST(UnguessableTokenTest, VerifyToString) {
UnguessableToken token = UnguessableToken::Deserialize(0x123, 0xABC);
- std::string expected = "(0000012300000ABC)";
+ std::string expected = "0000012300000ABC";
EXPECT_EQ(expected, token.ToString());
+ std::string expected_stream = "(0000012300000ABC)";
std::stringstream stream;
stream << token;
- EXPECT_EQ(expected, stream.str());
+ EXPECT_EQ(expected_stream, stream.str());
}
TEST(UnguessableTokenTest, VerifySmallerThanOperator) {
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 76e973221b1..33aeca3aba4 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -356,7 +356,7 @@ const Value* Value::FindPathOfType(std::initializer_list<StringPiece> path,
const Value* Value::FindPathOfType(span<const StringPiece> path,
Type type) const {
const Value* result = FindPath(path);
- if (!result || !result->IsType(type))
+ if (!result || result->type() != type)
return nullptr;
return result;
}
@@ -877,7 +877,7 @@ bool DictionaryValue::GetBinary(StringPiece path,
const Value** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(Type::BINARY))
+ if (!result || !value->is_blob())
return false;
if (out_value)
@@ -895,7 +895,7 @@ bool DictionaryValue::GetDictionary(StringPiece path,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(Type::DICTIONARY))
+ if (!result || !value->is_dict())
return false;
if (out_value)
@@ -915,7 +915,7 @@ bool DictionaryValue::GetList(StringPiece path,
const ListValue** out_value) const {
const Value* value;
bool result = Get(path, &value);
- if (!result || !value->IsType(Type::LIST))
+ if (!result || !value->is_list())
return false;
if (out_value)
@@ -1000,7 +1000,7 @@ bool DictionaryValue::GetDictionaryWithoutPathExpansion(
const DictionaryValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(Type::DICTIONARY))
+ if (!result || !value->is_dict())
return false;
if (out_value)
@@ -1024,7 +1024,7 @@ bool DictionaryValue::GetListWithoutPathExpansion(
const ListValue** out_value) const {
const Value* value;
bool result = GetWithoutPathExpansion(key, &value);
- if (!result || !value->IsType(Type::LIST))
+ if (!result || !value->is_list())
return false;
if (out_value)
@@ -1106,7 +1106,7 @@ void DictionaryValue::MergeDictionary(const DictionaryValue* dictionary) {
for (DictionaryValue::Iterator it(*dictionary); !it.IsAtEnd(); it.Advance()) {
const Value* merge_value = &it.value();
// Check whether we have to merge dictionaries.
- if (merge_value->IsType(Value::Type::DICTIONARY)) {
+ if (merge_value->is_dict()) {
DictionaryValue* sub_dict;
if (GetDictionaryWithoutPathExpansion(it.key(), &sub_dict)) {
sub_dict->MergeDictionary(
@@ -1235,7 +1235,7 @@ bool ListValue::GetDictionary(size_t index,
const DictionaryValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(Type::DICTIONARY))
+ if (!result || !value->is_dict())
return false;
if (out_value)
@@ -1253,7 +1253,7 @@ bool ListValue::GetDictionary(size_t index, DictionaryValue** out_value) {
bool ListValue::GetList(size_t index, const ListValue** out_value) const {
const Value* value;
bool result = Get(index, &value);
- if (!result || !value->IsType(Type::LIST))
+ if (!result || !value->is_list())
return false;
if (out_value)
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 4caa440b438..caa664d52cb 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -150,7 +150,6 @@ class BASE_EXPORT Value {
Type type() const { return type_; }
// Returns true if the current object represents a given type.
- bool IsType(Type type) const { return type == type_; }
bool is_none() const { return type() == Type::NONE; }
bool is_bool() const { return type() == Type::BOOLEAN; }
bool is_int() const { return type() == Type::INTEGER; }
diff --git a/chromium/base/values_unittest.cc b/chromium/base/values_unittest.cc
index 4dc6bf0e0c0..32b8e96a704 100644
--- a/chromium/base/values_unittest.cc
+++ b/chromium/base/values_unittest.cc
@@ -811,10 +811,10 @@ TEST(ValuesTest, StringValue) {
// Test overloaded StringValue constructor.
std::unique_ptr<Value> narrow_value(new Value("narrow"));
ASSERT_TRUE(narrow_value.get());
- ASSERT_TRUE(narrow_value->IsType(Value::Type::STRING));
+ ASSERT_TRUE(narrow_value->is_string());
std::unique_ptr<Value> utf16_value(new Value(ASCIIToUTF16("utf16")));
ASSERT_TRUE(utf16_value.get());
- ASSERT_TRUE(utf16_value->IsType(Value::Type::STRING));
+ ASSERT_TRUE(utf16_value->is_string());
// Test overloaded GetAsString.
std::string narrow = "http://google.com";
@@ -1026,7 +1026,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
std::unique_ptr<Value> removed_item;
EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::Type::INTEGER));
+ EXPECT_TRUE(removed_item->is_int());
EXPECT_FALSE(dict.HasKey("a.long.way.down"));
EXPECT_FALSE(dict.HasKey("a.long.way"));
EXPECT_TRUE(dict.Get("a.long.key.path", nullptr));
@@ -1039,7 +1039,7 @@ TEST(ValuesTest, DictionaryRemovePath) {
removed_item.reset();
EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
ASSERT_TRUE(removed_item);
- EXPECT_TRUE(removed_item->IsType(base::Value::Type::BOOLEAN));
+ EXPECT_TRUE(removed_item->is_bool());
EXPECT_TRUE(dict.empty());
}
@@ -1078,13 +1078,13 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("null", &copy_null));
ASSERT_TRUE(copy_null);
ASSERT_NE(copy_null, null_weak);
- ASSERT_TRUE(copy_null->IsType(Value::Type::NONE));
+ ASSERT_TRUE(copy_null->is_none());
Value* copy_bool = nullptr;
ASSERT_TRUE(copy_dict->Get("bool", &copy_bool));
ASSERT_TRUE(copy_bool);
ASSERT_NE(copy_bool, bool_weak);
- ASSERT_TRUE(copy_bool->IsType(Value::Type::BOOLEAN));
+ ASSERT_TRUE(copy_bool->is_bool());
bool copy_bool_value = false;
ASSERT_TRUE(copy_bool->GetAsBoolean(&copy_bool_value));
ASSERT_TRUE(copy_bool_value);
@@ -1093,7 +1093,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("int", &copy_int));
ASSERT_TRUE(copy_int);
ASSERT_NE(copy_int, int_weak);
- ASSERT_TRUE(copy_int->IsType(Value::Type::INTEGER));
+ ASSERT_TRUE(copy_int->is_int());
int copy_int_value = 0;
ASSERT_TRUE(copy_int->GetAsInteger(&copy_int_value));
ASSERT_EQ(42, copy_int_value);
@@ -1102,7 +1102,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("double", &copy_double));
ASSERT_TRUE(copy_double);
ASSERT_NE(copy_double, double_weak);
- ASSERT_TRUE(copy_double->IsType(Value::Type::DOUBLE));
+ ASSERT_TRUE(copy_double->is_double());
double copy_double_value = 0;
ASSERT_TRUE(copy_double->GetAsDouble(&copy_double_value));
ASSERT_EQ(3.14, copy_double_value);
@@ -1111,7 +1111,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string", &copy_string));
ASSERT_TRUE(copy_string);
ASSERT_NE(copy_string, string_weak);
- ASSERT_TRUE(copy_string->IsType(Value::Type::STRING));
+ ASSERT_TRUE(copy_string->is_string());
std::string copy_string_value;
string16 copy_string16_value;
ASSERT_TRUE(copy_string->GetAsString(&copy_string_value));
@@ -1123,7 +1123,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("string16", &copy_string16));
ASSERT_TRUE(copy_string16);
ASSERT_NE(copy_string16, string16_weak);
- ASSERT_TRUE(copy_string16->IsType(Value::Type::STRING));
+ ASSERT_TRUE(copy_string16->is_string());
ASSERT_TRUE(copy_string16->GetAsString(&copy_string_value));
ASSERT_TRUE(copy_string16->GetAsString(&copy_string16_value));
ASSERT_EQ(std::string("hello16"), copy_string_value);
@@ -1133,7 +1133,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("binary", &copy_binary));
ASSERT_TRUE(copy_binary);
ASSERT_NE(copy_binary, binary_weak);
- ASSERT_TRUE(copy_binary->IsType(Value::Type::BINARY));
+ ASSERT_TRUE(copy_binary->is_blob());
ASSERT_NE(binary_weak->GetBlob().data(), copy_binary->GetBlob().data());
ASSERT_EQ(binary_weak->GetBlob(), copy_binary->GetBlob());
@@ -1141,7 +1141,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("list", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, list_weak);
- ASSERT_TRUE(copy_value->IsType(Value::Type::LIST));
+ ASSERT_TRUE(copy_value->is_list());
ListValue* copy_list = nullptr;
ASSERT_TRUE(copy_value->GetAsList(&copy_list));
ASSERT_TRUE(copy_list);
@@ -1167,7 +1167,7 @@ TEST(ValuesTest, DeepCopy) {
ASSERT_TRUE(copy_dict->Get("dictionary", &copy_value));
ASSERT_TRUE(copy_value);
ASSERT_NE(copy_value, dict_weak);
- ASSERT_TRUE(copy_value->IsType(Value::Type::DICTIONARY));
+ ASSERT_TRUE(copy_value->is_dict());
DictionaryValue* copy_nested_dictionary = nullptr;
ASSERT_TRUE(copy_value->GetAsDictionary(&copy_nested_dictionary));
ASSERT_TRUE(copy_nested_dictionary);
diff --git a/chromium/base/version_unittest.cc b/chromium/base/version_unittest.cc
index 4ca784fc117..285ca9cc43e 100644
--- a/chromium/base/version_unittest.cc
+++ b/chromium/base/version_unittest.cc
@@ -91,24 +91,31 @@ TEST(VersionTest, Compare) {
const char* rhs;
int expected;
} cases[] = {
- {"1.0", "1.0", 0},
- {"1.0", "0.0", 1},
- {"1.0", "2.0", -1},
- {"1.0", "1.1", -1},
- {"1.1", "1.0", 1},
- {"1.0", "1.0.1", -1},
- {"1.1", "1.0.1", 1},
- {"1.1", "1.0.1", 1},
- {"1.0.0", "1.0", 0},
- {"1.0.3", "1.0.20", -1},
- {"11.0.10", "15.007.20011", -1},
- {"11.0.10", "15.5.28.130162", -1},
+ {"1.0", "1.0", 0},
+ {"1.0", "0.0", 1},
+ {"1.0", "2.0", -1},
+ {"1.0", "1.1", -1},
+ {"1.1", "1.0", 1},
+ {"1.0", "1.0.1", -1},
+ {"1.1", "1.0.1", 1},
+ {"1.1", "1.0.1", 1},
+ {"1.0.0", "1.0", 0},
+ {"1.0.3", "1.0.20", -1},
+ {"11.0.10", "15.007.20011", -1},
+ {"11.0.10", "15.5.28.130162", -1},
+ {"15.5.28.130162", "15.5.28.130162", 0},
};
for (size_t i = 0; i < arraysize(cases); ++i) {
base::Version lhs(cases[i].lhs);
base::Version rhs(cases[i].rhs);
EXPECT_EQ(lhs.CompareTo(rhs), cases[i].expected) <<
cases[i].lhs << " ? " << cases[i].rhs;
+ // CompareToWildcardString() should have same behavior as CompareTo() when
+ // no wildcards are present.
+ EXPECT_EQ(lhs.CompareToWildcardString(cases[i].rhs), cases[i].expected)
+ << cases[i].lhs << " ? " << cases[i].rhs;
+ EXPECT_EQ(rhs.CompareToWildcardString(cases[i].lhs), -cases[i].expected)
+ << cases[i].lhs << " ? " << cases[i].rhs;
// Test comparison operators
switch (cases[i].expected) {
diff --git a/chromium/base/win/OWNERS b/chromium/base/win/OWNERS
index 082fae6a95e..4593b2c3623 100644
--- a/chromium/base/win/OWNERS
+++ b/chromium/base/win/OWNERS
@@ -1,3 +1,4 @@
+brucedawson@chromium.org
grt@chromium.org
jschuh@chromium.org
robliao@chromium.org
diff --git a/chromium/base/win/current_module.h b/chromium/base/win/current_module.h
index bbc41346c13..ee141db2115 100644
--- a/chromium/base/win/current_module.h
+++ b/chromium/base/win/current_module.h
@@ -5,6 +5,8 @@
#ifndef BASE_WIN_CURRENT_MODULE_H_
#define BASE_WIN_CURRENT_MODULE_H_
+#include <windows.h>
+
// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
extern "C" IMAGE_DOS_HEADER __ImageBase;
diff --git a/chromium/base/win/object_watcher.cc b/chromium/base/win/object_watcher.cc
index 426f52e291a..4c1c2356bac 100644
--- a/chromium/base/win/object_watcher.cc
+++ b/chromium/base/win/object_watcher.cc
@@ -8,6 +8,8 @@
#include "base/logging.h"
#include "base/threading/sequenced_task_runner_handle.h"
+#include <windows.h>
+
namespace base {
namespace win {
diff --git a/chromium/base/win/object_watcher.h b/chromium/base/win/object_watcher.h
index 6fddc588dec..b7ed76d041c 100644
--- a/chromium/base/win/object_watcher.h
+++ b/chromium/base/win/object_watcher.h
@@ -5,7 +5,7 @@
#ifndef BASE_WIN_OBJECT_WATCHER_H_
#define BASE_WIN_OBJECT_WATCHER_H_
-#include <windows.h>
+#include "base/win/windows_types.h"
#include "base/base_export.h"
#include "base/callback.h"
diff --git a/chromium/base/win/registry.h b/chromium/base/win/registry.h
index 9acbea2995e..53327ec5a1e 100644
--- a/chromium/base/win/registry.h
+++ b/chromium/base/win/registry.h
@@ -5,10 +5,10 @@
#ifndef BASE_WIN_REGISTRY_H_
#define BASE_WIN_REGISTRY_H_
-#include <windows.h>
#include <stdint.h>
#include <string>
#include <vector>
+#include "base/win/windows_types.h"
#include "base/base_export.h"
#include "base/macros.h"
diff --git a/chromium/base/win/scoped_handle.cc b/chromium/base/win/scoped_handle.cc
index d8c92124dfa..6eb32712909 100644
--- a/chromium/base/win/scoped_handle.cc
+++ b/chromium/base/win/scoped_handle.cc
@@ -2,6 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <windows.h>
+
#include "base/win/scoped_handle.h"
#include <stddef.h>
diff --git a/chromium/base/win/scoped_handle.h b/chromium/base/win/scoped_handle.h
index c1e4597ab80..0d65a9534db 100644
--- a/chromium/base/win/scoped_handle.h
+++ b/chromium/base/win/scoped_handle.h
@@ -5,7 +5,7 @@
#ifndef BASE_WIN_SCOPED_HANDLE_H_
#define BASE_WIN_SCOPED_HANDLE_H_
-#include <windows.h>
+#include "base/win/windows_types.h"
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
diff --git a/chromium/base/win/shortcut_unittest.cc b/chromium/base/win/shortcut_unittest.cc
index f16791572dc..3c1c26f38c0 100644
--- a/chromium/base/win/shortcut_unittest.cc
+++ b/chromium/base/win/shortcut_unittest.cc
@@ -99,7 +99,8 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
ShortcutProperties properties_read_1;
ASSERT_TRUE(ResolveShortcutProperties(
file_1, ShortcutProperties::PROPERTIES_ALL, &properties_read_1));
- EXPECT_EQ(ShortcutProperties::PROPERTIES_ALL, properties_read_1.options);
+ EXPECT_EQ(static_cast<unsigned>(ShortcutProperties::PROPERTIES_ALL),
+ properties_read_1.options);
ValidatePathsAreEqual(link_properties_.target, properties_read_1.target);
ValidatePathsAreEqual(link_properties_.working_dir,
properties_read_1.working_dir);
@@ -122,7 +123,8 @@ TEST_F(ShortcutTest, CreateAndResolveShortcutProperties) {
ShortcutProperties properties_read_2;
ASSERT_TRUE(ResolveShortcutProperties(
file_2, ShortcutProperties::PROPERTIES_ALL, &properties_read_2));
- EXPECT_EQ(ShortcutProperties::PROPERTIES_ALL, properties_read_2.options);
+ EXPECT_EQ(static_cast<unsigned>(ShortcutProperties::PROPERTIES_ALL),
+ properties_read_2.options);
ValidatePathsAreEqual(only_target_properties.target,
properties_read_2.target);
ValidatePathsAreEqual(FilePath(), properties_read_2.working_dir);
diff --git a/chromium/base/win/win_includes_unittest.cc b/chromium/base/win/win_includes_unittest.cc
new file mode 100644
index 00000000000..73b7b556502
--- /dev/null
+++ b/chromium/base/win/win_includes_unittest.cc
@@ -0,0 +1,32 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file ensures that these header files don't include Windows.h and can
+// compile without including Windows.h. This helps to improve compile times.
+
+#include "base/files/file_util.h"
+#include "base/files/platform_file.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+#include "base/win/registry.h"
+#include "base/win/scoped_handle.h"
+#include "base/win/win_util.h"
+
+#ifdef _WINDOWS_
+#error Windows.h was included inappropriately.
+#endif
+
+// Make sure windows.h can be included after windows_types.h
+#include "base/win/windows_types.h"
+
+#include <windows.h>
+
+// Check that type sizes match.
+static_assert(sizeof(CHROME_CONDITION_VARIABLE) == sizeof(CONDITION_VARIABLE),
+ "Definition mismatch.");
+static_assert(sizeof(CHROME_SRWLOCK) == sizeof(SRWLOCK),
+ "Definition mismatch.");
diff --git a/chromium/base/win/win_util.cc b/chromium/base/win/win_util.cc
index fccafc901fe..0d45d41faa2 100644
--- a/chromium/base/win/win_util.cc
+++ b/chromium/base/win/win_util.cc
@@ -2,13 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// Must be included before process_metrics.h to get full IoCounters definition
+#include <windows.h>
+
#include "base/win/win_util.h"
#include <aclapi.h>
#include <cfgmgr32.h>
+#include <initguid.h>
#include <powrprof.h>
#include <shobjidl.h> // Must be before propkey.
-#include <initguid.h>
+
#include <inspectable.h>
#include <mdmregistration.h>
#include <objbase.h>
@@ -23,7 +27,7 @@
#include <signal.h>
#include <stddef.h>
#include <stdlib.h>
-#include <tchar.h> // Must be before tpcshrd.h or for any use of _T macro
+#include <tchar.h> // Must be before tpcshrd.h or for any use of _T macro
#include <tpcshrd.h>
#include <uiviewsettingsinterop.h>
#include <windows.ui.viewmanagement.h>
@@ -454,6 +458,21 @@ bool IsTabletDevice(std::string* reason, HWND hwnd) {
if (IsWindows10TabletMode(hwnd))
return true;
+ return IsDeviceUsedAsATablet(reason);
+}
+
+// This method is used to set the right interactions media queries,
+// see https://drafts.csswg.org/mediaqueries-4/#mf-interaction. It doesn't
+// check the Windows 10 tablet mode because it doesn't reflect the actual
+// input configuration of the device and can be manually triggered by the user
+// independently from the hardware state.
+bool IsDeviceUsedAsATablet(std::string* reason) {
+ if (GetVersion() < VERSION_WIN8) {
+ if (reason)
+ *reason = "Tablet device detection not supported below Windows 8\n";
+ return false;
+ }
+
if (GetSystemMetrics(SM_MAXIMUMTOUCHES) == 0) {
if (reason) {
*reason += "Device does not support touch.\n";
@@ -481,22 +500,16 @@ bool IsTabletDevice(std::string* reason, HWND hwnd) {
GetModuleHandle(L"user32.dll"), "GetAutoRotationState"));
if (get_auto_rotation_state_func) {
- AR_STATE rotation_state;
- ZeroMemory(&rotation_state, sizeof(AR_STATE));
- if (get_auto_rotation_state_func(&rotation_state)) {
- if ((rotation_state & AR_NOT_SUPPORTED) || (rotation_state & AR_LAPTOP) ||
- (rotation_state & AR_NOSENSOR))
- return false;
- }
+ AR_STATE rotation_state = AR_ENABLED;
+ if (get_auto_rotation_state_func(&rotation_state) &&
+ (rotation_state & (AR_NOT_SUPPORTED | AR_LAPTOP | AR_NOSENSOR)) != 0)
+ return false;
}
// PlatformRoleSlate was added in Windows 8+.
POWER_PLATFORM_ROLE role = GetPlatformRole();
- bool mobile_power_profile = (role == PlatformRoleMobile);
- bool slate_power_profile = (role == PlatformRoleSlate);
-
bool is_tablet = false;
- if (mobile_power_profile || slate_power_profile) {
+ if (role == PlatformRoleMobile || role == PlatformRoleSlate) {
is_tablet = !GetSystemMetrics(SM_CONVERTIBLESLATEMODE);
if (!is_tablet) {
if (reason) {
diff --git a/chromium/base/win/win_util.h b/chromium/base/win/win_util.h
index 570dad4ce8f..80da69e24ad 100644
--- a/chromium/base/win/win_util.h
+++ b/chromium/base/win/win_util.h
@@ -22,8 +22,8 @@
#ifndef BASE_WIN_WIN_UTIL_H_
#define BASE_WIN_WIN_UTIL_H_
-#include <windows.h>
#include <stdint.h>
+#include "base/win/windows_types.h"
#include <string>
#include <vector>
@@ -35,6 +35,9 @@ struct IPropertyStore;
struct _tagpropertykey;
typedef _tagpropertykey PROPERTYKEY;
+// _WINDOWS_ will be defined if Windows.h was included - include Windows.h first
+// to get access to the full struct definition.
+#if defined(_WINDOWS_)
// This is the same as NONCLIENTMETRICS except that the
// unused member |iPaddedBorderWidth| has been removed.
struct NONCLIENTMETRICS_XP {
@@ -54,6 +57,9 @@ struct NONCLIENTMETRICS_XP {
LOGFONTW lfStatusFont;
LOGFONTW lfMessageFont;
};
+#else
+struct NONCLIENTMETRICS_XP;
+#endif
namespace base {
namespace win {
@@ -138,17 +144,28 @@ BASE_EXPORT void SetAbortBehaviorForCrashReporting();
BASE_EXPORT bool IsWindows10TabletMode(HWND hwnd);
// A tablet is a device that is touch enabled and also is being used
-// "like a tablet". This is used by the following:-
-// 1. Metrics:- To gain insight into how users use Chrome.
-// 2. Physical keyboard presence :- If a device is in tablet mode, it means
+// "like a tablet". This is used by the following:
+// 1. Metrics: To gain insight into how users use Chrome.
+// 2. Physical keyboard presence: If a device is in tablet mode, it means
// that there is no physical keyboard attached.
-// 3. To set the right interactions media queries,
-// see https://drafts.csswg.org/mediaqueries-4/#mf-interaction
// This function optionally sets the |reason| parameter to determine as to why
// or why not a device was deemed to be a tablet.
-// Returns true if the device is in tablet mode.
+// Returns true if the user has set Windows 10 in tablet mode.
BASE_EXPORT bool IsTabletDevice(std::string* reason, HWND hwnd);
+// Return true if the device is physically used as a tablet independently of
+// Windows tablet mode. It checks if the device:
+// - Is running Windows 8 or newer,
+// - Has a touch digitizer,
+// - Is not docked,
+// - Has a supported rotation sensor,
+// - Is not in laptop mode,
+// - prefers the mobile or slate power management profile (per OEM choice), and
+// - Is in slate mode.
+// This function optionally sets the |reason| parameter to determine as to why
+// or why not a device was deemed to be a tablet.
+BASE_EXPORT bool IsDeviceUsedAsATablet(std::string* reason);
+
// A slate is a touch device that may have a keyboard attached. This function
// returns true if a keyboard is attached and optionally will set the |reason|
// parameter to the detection method that was used to detect the keyboard.
diff --git a/chromium/base/win/windows_full.h b/chromium/base/win/windows_full.h
new file mode 100644
index 00000000000..8b9e43ae733
--- /dev/null
+++ b/chromium/base/win/windows_full.h
@@ -0,0 +1,13 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This header is needed so that mojo typemap files can specify their dependence
+// on Windows.h. This can be removed once https://crbug.com/798763 is resolved.
+
+#ifndef BASE_WIN_WINDOWS_FULL_H
+#define BASE_WIN_WINDOWS_FULL_H
+
+#include <windows.h>
+
+#endif // BASE_WIN_WINDOWS_FULL_H
diff --git a/chromium/base/win/windows_types.h b/chromium/base/win/windows_types.h
new file mode 100644
index 00000000000..091e47fff25
--- /dev/null
+++ b/chromium/base/win/windows_types.h
@@ -0,0 +1,250 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This file contains defines and typedefs that allow popular Windows types to
+// be used without the overhead of including windows.h.
+
+#ifndef BASE_WIN_WINDOWS_TYPES_H
+#define BASE_WIN_WINDOWS_TYPES_H
+
+// Needed for function prototypes.
+#include <concurrencysal.h>
+#include <sal.h>
+#include <specstrings.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// typedef and define the most commonly used Windows integer types.
+
+typedef unsigned long DWORD;
+typedef long LONG;
+typedef __int64 LONGLONG;
+typedef unsigned __int64 ULONGLONG;
+
+#define VOID void
+typedef char CHAR;
+typedef short SHORT;
+typedef long LONG;
+typedef int INT;
+typedef unsigned int UINT;
+typedef unsigned int* PUINT;
+typedef void* LPVOID;
+typedef void* PVOID;
+typedef void* HANDLE;
+typedef int BOOL;
+typedef unsigned char BYTE;
+typedef BYTE BOOLEAN;
+typedef DWORD ULONG;
+typedef unsigned short WORD;
+typedef WORD UWORD;
+typedef WORD ATOM;
+
+#if defined(_WIN64)
+typedef __int64 INT_PTR, *PINT_PTR;
+typedef unsigned __int64 UINT_PTR, *PUINT_PTR;
+
+typedef __int64 LONG_PTR, *PLONG_PTR;
+typedef unsigned __int64 ULONG_PTR, *PULONG_PTR;
+#else
+typedef __w64 int INT_PTR, *PINT_PTR;
+typedef __w64 unsigned int UINT_PTR, *PUINT_PTR;
+
+typedef __w64 long LONG_PTR, *PLONG_PTR;
+typedef __w64 unsigned long ULONG_PTR, *PULONG_PTR;
+#endif
+
+typedef UINT_PTR WPARAM;
+typedef LONG_PTR LPARAM;
+typedef LONG_PTR LRESULT;
+#define LRESULT LONG_PTR
+typedef _Return_type_success_(return >= 0) long HRESULT;
+
+typedef ULONG_PTR SIZE_T, *PSIZE_T;
+typedef LONG_PTR SSIZE_T, *PSSIZE_T;
+
+typedef DWORD ACCESS_MASK;
+typedef ACCESS_MASK REGSAM;
+
+
+// Forward declare Windows compatible handles.
+
+#define CHROME_DECLARE_HANDLE(name) \
+ struct name##__; \
+ typedef struct name##__* name
+CHROME_DECLARE_HANDLE(HGLRC);
+CHROME_DECLARE_HANDLE(HICON);
+CHROME_DECLARE_HANDLE(HINSTANCE);
+CHROME_DECLARE_HANDLE(HKEY);
+CHROME_DECLARE_HANDLE(HMENU);
+CHROME_DECLARE_HANDLE(HWND);
+typedef HINSTANCE HMODULE;
+#undef CHROME_DECLARE_HANDLE
+
+
+// Forward declare some Windows struct/typedef sets.
+
+typedef struct _OVERLAPPED OVERLAPPED;
+typedef struct tagMSG MSG, *PMSG, *NPMSG, *LPMSG;
+
+typedef struct _RTL_SRWLOCK RTL_SRWLOCK;
+typedef RTL_SRWLOCK SRWLOCK, *PSRWLOCK;
+
+typedef struct _GUID GUID;
+typedef GUID CLSID;
+
+typedef struct tagLOGFONTW LOGFONTW, *PLOGFONTW, *NPLOGFONTW, *LPLOGFONTW;
+typedef LOGFONTW LOGFONT;
+
+typedef struct _FILETIME FILETIME;
+
+typedef struct tagMENUITEMINFOW MENUITEMINFOW, MENUITEMINFO;
+
+
+// Declare Chrome versions of some Windows structures. These are needed for
+// when we need a concrete type but don't want to pull in Windows.h. We can't
+// declare the Windows types so we declare our types and cast to the Windows
+// types in a few places.
+
+struct CHROME_SRWLOCK {
+ PVOID Ptr;
+};
+
+struct CHROME_CONDITION_VARIABLE {
+ PVOID Ptr;
+};
+
+
+// Define some commonly used Windows constants. Note that the layout of these
+// macros - including internal spacing - must be 100% consistent with windows.h.
+
+#ifndef INVALID_HANDLE_VALUE
+// Work around there being two slightly different definitions in the SDK.
+#define INVALID_HANDLE_VALUE ((HANDLE)(LONG_PTR)-1)
+#endif
+#define TLS_OUT_OF_INDEXES ((DWORD)0xFFFFFFFF)
+#define HTNOWHERE 0
+#define MAX_PATH 260
+#define CS_GLOBALCLASS 0x4000
+
+#define ERROR_SUCCESS 0L
+#define ERROR_FILE_NOT_FOUND 2L
+#define ERROR_ACCESS_DENIED 5L
+#define ERROR_INVALID_HANDLE 6L
+#define ERROR_SHARING_VIOLATION 32L
+#define ERROR_LOCK_VIOLATION 33L
+#define REG_BINARY ( 3ul )
+
+#define STATUS_PENDING ((DWORD )0x00000103L)
+#define STILL_ACTIVE STATUS_PENDING
+#define SUCCEEDED(hr) (((HRESULT)(hr)) >= 0)
+#define FAILED(hr) (((HRESULT)(hr)) < 0)
+
+#define HKEY_CLASSES_ROOT (( HKEY ) (ULONG_PTR)((LONG)0x80000000) )
+#define HKEY_LOCAL_MACHINE (( HKEY ) (ULONG_PTR)((LONG)0x80000002) )
+#define HKEY_CURRENT_USER (( HKEY ) (ULONG_PTR)((LONG)0x80000001) )
+#define KEY_QUERY_VALUE (0x0001)
+#define KEY_SET_VALUE (0x0002)
+#define KEY_CREATE_SUB_KEY (0x0004)
+#define KEY_ENUMERATE_SUB_KEYS (0x0008)
+#define KEY_NOTIFY (0x0010)
+#define KEY_CREATE_LINK (0x0020)
+#define KEY_WOW64_32KEY (0x0200)
+#define KEY_WOW64_64KEY (0x0100)
+#define KEY_WOW64_RES (0x0300)
+
+#define READ_CONTROL (0x00020000L)
+#define SYNCHRONIZE (0x00100000L)
+
+#define STANDARD_RIGHTS_READ (READ_CONTROL)
+#define STANDARD_RIGHTS_WRITE (READ_CONTROL)
+#define STANDARD_RIGHTS_ALL (0x001F0000L)
+
+#define KEY_READ ((STANDARD_RIGHTS_READ |\
+ KEY_QUERY_VALUE |\
+ KEY_ENUMERATE_SUB_KEYS |\
+ KEY_NOTIFY) \
+ & \
+ (~SYNCHRONIZE))
+
+
+#define KEY_WRITE ((STANDARD_RIGHTS_WRITE |\
+ KEY_SET_VALUE |\
+ KEY_CREATE_SUB_KEY) \
+ & \
+ (~SYNCHRONIZE))
+
+#define KEY_ALL_ACCESS ((STANDARD_RIGHTS_ALL |\
+ KEY_QUERY_VALUE |\
+ KEY_SET_VALUE |\
+ KEY_CREATE_SUB_KEY |\
+ KEY_ENUMERATE_SUB_KEYS |\
+ KEY_NOTIFY |\
+ KEY_CREATE_LINK) \
+ & \
+ (~SYNCHRONIZE))
+
+// Define some macros needed when prototyping Windows functions.
+
+#define DECLSPEC_IMPORT __declspec(dllimport)
+#define WINBASEAPI DECLSPEC_IMPORT
+#define WINUSERAPI DECLSPEC_IMPORT
+#define WINAPI __stdcall
+#define CALLBACK __stdcall
+
+// Needed for optimal lock performance.
+WINBASEAPI _Releases_exclusive_lock_(*SRWLock) VOID WINAPI
+ ReleaseSRWLockExclusive(_Inout_ PSRWLOCK SRWLock);
+
+// Needed to support protobuf's GetMessage macro magic.
+WINUSERAPI BOOL WINAPI GetMessageW(_Out_ LPMSG lpMsg,
+ _In_opt_ HWND hWnd,
+ _In_ UINT wMsgFilterMin,
+ _In_ UINT wMsgFilterMax);
+
+// Needed for thread_local_storage.h
+WINBASEAPI LPVOID WINAPI TlsGetValue(_In_ DWORD dwTlsIndex);
+
+// Needed for scoped_handle.h
+WINBASEAPI _Check_return_ _Post_equals_last_error_ DWORD WINAPI
+ GetLastError(VOID);
+
+WINBASEAPI VOID WINAPI SetLastError(_In_ DWORD dwErrCode);
+
+#ifdef __cplusplus
+}
+#endif
+
+// These macros are all defined by windows.h and are also used as the names of
+// functions in the Chromium code base. Add to this list as needed whenever
+// there is a Windows macro which causes a function call to be renamed. This
+// ensures that the same renaming will happen everywhere. Includes of this file
+// can be added wherever needed to ensure this consistent renaming.
+
+#define CopyFile CopyFileW
+#define CreateDirectory CreateDirectoryW
+#define CreateEvent CreateEventW
+#define CreateFile CreateFileW
+#define CreateService CreateServiceW
+#define DeleteFile DeleteFileW
+#define DispatchMessage DispatchMessageW
+#define DrawText DrawTextW
+#define GetComputerName GetComputerNameW
+#define GetCurrentDirectory GetCurrentDirectoryW
+#define GetCurrentTime() GetTickCount()
+#define GetFileAttributes GetFileAttributesW
+#define GetMessage GetMessageW
+#define GetUserName GetUserNameW
+#define LoadIcon LoadIconW
+#define LoadImage LoadImageW
+#define PostMessage PostMessageW
+#define ReplaceFile ReplaceFileW
+#define ReportEvent ReportEventW
+#define SendMessage SendMessageW
+#define SendMessageCallback SendMessageCallbackW
+#define SetCurrentDirectory SetCurrentDirectoryW
+#define StartService StartServiceW
+
+#endif // BASE_WIN_WINDOWS_TYPES_H