summaryrefslogtreecommitdiff
path: root/chromium/base
diff options
context:
space:
mode:
authorAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-12 14:27:29 +0200
committerAllan Sandfeld Jensen <allan.jensen@qt.io>2020-10-13 09:35:20 +0000
commitc30a6232df03e1efbd9f3b226777b07e087a1122 (patch)
treee992f45784689f373bcc38d1b79a239ebe17ee23 /chromium/base
parent7b5b123ac58f58ffde0f4f6e488bcd09aa4decd3 (diff)
downloadqtwebengine-chromium-c30a6232df03e1efbd9f3b226777b07e087a1122.tar.gz
BASELINE: Update Chromium to 85.0.4183.14085-based
Change-Id: Iaa42f4680837c57725b1344f108c0196741f6057 Reviewed-by: Allan Sandfeld Jensen <allan.jensen@qt.io>
Diffstat (limited to 'chromium/base')
-rw-r--r--chromium/base/BUILD.gn381
-rw-r--r--chromium/base/DEPS4
-rw-r--r--chromium/base/OWNERS2
-rw-r--r--chromium/base/PRESUBMIT.py48
-rw-r--r--chromium/base/README.md12
-rw-r--r--chromium/base/allocator/BUILD.gn14
-rw-r--r--chromium/base/allocator/allocator.gni6
-rw-r--r--chromium/base/allocator/allocator_shim.cc10
-rw-r--r--chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc110
-rw-r--r--chromium/base/allocator/malloc_zone_functions_mac.h2
-rw-r--r--chromium/base/allocator/partition_allocator/PartitionAlloc.md39
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager.cc223
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager.h100
-rw-r--r--chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc150
-rw-r--r--chromium/base/allocator/partition_allocator/address_space_randomization.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.cc79
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer.h29
-rw-r--r--chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc52
-rw-r--r--chromium/base/allocator/partition_allocator/oom.h1
-rw-r--r--chromium/base/allocator/partition_allocator/oom_callback.cc3
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator.cc71
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h7
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h22
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_internals_win.h11
-rw-r--r--chromium/base/allocator/partition_allocator/page_allocator_unittest.cc23
-rw-r--r--chromium/base/allocator/partition_allocator/partition_address_space.cc69
-rw-r--r--chromium/base/allocator/partition_allocator/partition_address_space.h151
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.cc325
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc.h750
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_check.h36
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_constants.h77
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_features.cc16
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_features.h34
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_forward.h6
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc8
-rw-r--r--chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc1103
-rw-r--r--chromium/base/allocator/partition_allocator/partition_bucket.cc161
-rw-r--r--chromium/base/allocator/partition_allocator/partition_bucket.h11
-rw-r--r--chromium/base/allocator/partition_allocator/partition_cookie.h57
-rw-r--r--chromium/base/allocator/partition_allocator/partition_direct_map_extent.h5
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.cc76
-rw-r--r--chromium/base/allocator/partition_allocator/partition_page.h96
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.cc50
-rw-r--r--chromium/base/allocator/partition_allocator/partition_root_base.h381
-rw-r--r--chromium/base/android/resource_exclusions.gni7
-rw-r--r--chromium/base/base_paths_fuchsia.cc2
-rw-r--r--chromium/base/base_paths_posix.cc1
-rw-r--r--chromium/base/base_paths_win.cc12
-rw-r--r--chromium/base/base_paths_win.h4
-rw-r--r--chromium/base/base_switches.cc4
-rw-r--r--chromium/base/base_switches.h1
-rw-r--r--chromium/base/big_endian.h3
-rw-r--r--chromium/base/bind_internal.h3
-rw-r--r--chromium/base/bits.h13
-rw-r--r--chromium/base/bits_unittest.cc27
-rw-r--r--chromium/base/callback.h2
-rw-r--r--chromium/base/callback_list.h2
-rw-r--r--chromium/base/cancelable_callback.h2
-rw-r--r--chromium/base/check.cc1
-rw-r--r--chromium/base/check.h7
-rw-r--r--chromium/base/check_op.h26
-rw-r--r--chromium/base/command_line.cc1
-rw-r--r--chromium/base/command_line_unittest.cc42
-rw-r--r--chromium/base/containers/adapters.h5
-rw-r--r--chromium/base/containers/checked_iterators.h2
-rw-r--r--chromium/base/containers/circular_deque.h2
-rw-r--r--chromium/base/containers/flat_map.h4
-rw-r--r--chromium/base/containers/flat_tree.h7
-rw-r--r--chromium/base/containers/flat_tree_unittest.cc5
-rw-r--r--chromium/base/containers/id_map.h9
-rw-r--r--chromium/base/containers/intrusive_heap.h2
-rw-r--r--chromium/base/containers/linked_list.h11
-rw-r--r--chromium/base/containers/mru_cache.h22
-rw-r--r--chromium/base/containers/mru_cache_unittest.cc8
-rw-r--r--chromium/base/containers/ring_buffer.h7
-rw-r--r--chromium/base/containers/small_map.h2
-rw-r--r--chromium/base/containers/span.h2
-rw-r--r--chromium/base/containers/stack_container.h14
-rw-r--r--chromium/base/containers/stack_container_unittest.cc20
-rw-r--r--chromium/base/containers/vector_buffer.h14
-rw-r--r--chromium/base/dcheck_is_on.h14
-rw-r--r--chromium/base/debug/alias.h15
-rw-r--r--chromium/base/debug/debugger_posix.cc2
-rw-r--r--chromium/base/debug/dump_without_crashing.cc2
-rw-r--r--chromium/base/debug/leak_tracker.h1
-rw-r--r--chromium/base/feature_list.cc3
-rw-r--r--chromium/base/files/file_descriptor_watcher_posix.h2
-rw-r--r--chromium/base/files/file_enumerator.h6
-rw-r--r--chromium/base/files/file_path.cc41
-rw-r--r--chromium/base/files/file_path.h2
-rw-r--r--chromium/base/files/file_path_watcher_linux.cc2
-rw-r--r--chromium/base/files/file_path_watcher_unittest.cc1
-rw-r--r--chromium/base/files/file_util.cc16
-rw-r--r--chromium/base/files/file_util.h54
-rw-r--r--chromium/base/files/file_util_posix.cc39
-rw-r--r--chromium/base/files/file_util_unittest.cc88
-rw-r--r--chromium/base/files/file_util_win.cc8
-rw-r--r--chromium/base/files/important_file_writer.cc23
-rw-r--r--chromium/base/files/important_file_writer_cleaner_unittest.cc2
-rw-r--r--chromium/base/files/memory_mapped_file.cc1
-rw-r--r--chromium/base/files/memory_mapped_file_win.cc1
-rw-r--r--chromium/base/files/scoped_file.h1
-rw-r--r--chromium/base/fuchsia/default_context.h4
-rw-r--r--chromium/base/fuchsia/intl_profile_watcher.cc6
-rw-r--r--chromium/base/fuchsia/process_context.cc41
-rw-r--r--chromium/base/fuchsia/process_context.h35
-rw-r--r--chromium/base/fuchsia/scoped_service_binding.h102
-rw-r--r--chromium/base/fuchsia/scoped_service_binding_unittest.cc68
-rw-r--r--chromium/base/fuchsia/scoped_service_publisher.h51
-rw-r--r--chromium/base/fuchsia/scoped_service_publisher_unittest.cc46
-rw-r--r--chromium/base/fuchsia/service_provider_impl.h1
-rw-r--r--chromium/base/fuchsia/test_component_context_for_process.cc8
-rw-r--r--chromium/base/fuchsia/test_component_context_for_process.h5
-rw-r--r--chromium/base/fuchsia/test_component_context_for_process_unittest.cc9
-rw-r--r--chromium/base/hash/hash.h1
-rw-r--r--chromium/base/hash/md5_constexpr_internal.h2
-rw-r--r--chromium/base/i18n/time_formatting.cc1
-rw-r--r--chromium/base/ios/ios_util.h3
-rw-r--r--chromium/base/ios/ios_util.mm5
-rw-r--r--chromium/base/ios/weak_nsobject.h2
-rw-r--r--chromium/base/json/json_common.h2
-rw-r--r--chromium/base/json/json_file_value_serializer.h2
-rw-r--r--chromium/base/json/json_parser.cc129
-rw-r--r--chromium/base/json/json_parser.h38
-rw-r--r--chromium/base/json/json_parser_unittest.cc161
-rw-r--r--chromium/base/json/json_perftest_decodebench.cc1
-rw-r--r--chromium/base/json/json_reader.cc94
-rw-r--r--chromium/base/json/json_reader.h73
-rw-r--r--chromium/base/json/json_reader_fuzzer.cc2
-rw-r--r--chromium/base/json/json_reader_unittest.cc52
-rw-r--r--chromium/base/json/json_string_value_serializer.cc12
-rw-r--r--chromium/base/json/json_value_serializer_unittest.cc14
-rw-r--r--chromium/base/lazy_instance.h2
-rw-r--r--chromium/base/lazy_instance_helpers.h2
-rw-r--r--chromium/base/lazy_instance_unittest.cc11
-rw-r--r--chromium/base/logging.cc8
-rw-r--r--chromium/base/logging.h4
-rw-r--r--chromium/base/logging_unittest.cc9
-rw-r--r--chromium/base/mac/foundation_util.mm1
-rw-r--r--chromium/base/mac/mach_port_rendezvous.cc1
-rw-r--r--chromium/base/mac/scoped_mach_vm.h2
-rw-r--r--chromium/base/mac/scoped_typeref.h26
-rw-r--r--chromium/base/memory/aligned_memory.cc18
-rw-r--r--chromium/base/memory/aligned_memory.h21
-rw-r--r--chromium/base/memory/aligned_memory_unittest.cc4
-rw-r--r--chromium/base/memory/checked_ptr.cc29
-rw-r--r--chromium/base/memory/checked_ptr.h441
-rw-r--r--chromium/base/memory/checked_ptr_unittest.cc387
-rw-r--r--chromium/base/memory/discardable_shared_memory.cc11
-rw-r--r--chromium/base/memory/discardable_shared_memory.h2
-rw-r--r--chromium/base/memory/discardable_shared_memory_unittest.cc10
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc12
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix.h3
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc20
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix.cc10
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix.h2
-rw-r--r--chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc2
-rw-r--r--chromium/base/memory/memory_pressure_listener.cc12
-rw-r--r--chromium/base/memory/memory_pressure_listener.h7
-rw-r--r--chromium/base/memory/memory_pressure_listener_unittest.cc5
-rw-r--r--chromium/base/memory/platform_shared_memory_region.cc10
-rw-r--r--chromium/base/memory/platform_shared_memory_region_android.cc1
-rw-r--r--chromium/base/memory/platform_shared_memory_region_unittest.cc1
-rw-r--r--chromium/base/memory/ref_counted.h2
-rw-r--r--chromium/base/memory/ref_counted_delete_on_sequence.h2
-rw-r--r--chromium/base/memory/scoped_refptr.h2
-rw-r--r--chromium/base/memory/shared_memory_tracker.cc15
-rw-r--r--chromium/base/memory/shared_memory_tracker.h2
-rw-r--r--chromium/base/memory/singleton.h2
-rw-r--r--chromium/base/memory/singleton_unittest.cc21
-rw-r--r--chromium/base/memory/weak_ptr.h2
-rw-r--r--chromium/base/message_loop/message_loop.cc164
-rw-r--r--chromium/base/message_loop/message_loop.h201
-rw-r--r--chromium/base/message_loop/message_loop_current.cc25
-rw-r--r--chromium/base/message_loop/message_loop_current.h57
-rw-r--r--chromium/base/message_loop/message_loop_unittest.cc2270
-rw-r--r--chromium/base/message_loop/message_pump.h2
-rw-r--r--chromium/base/message_loop/message_pump_fuchsia.cc2
-rw-r--r--chromium/base/message_loop/message_pump_glib_unittest.cc15
-rw-r--r--chromium/base/message_loop/message_pump_io_ios.cc2
-rw-r--r--chromium/base/message_loop/message_pump_io_ios_unittest.cc1
-rw-r--r--chromium/base/message_loop/message_pump_libevent.cc13
-rw-r--r--chromium/base/message_loop/message_pump_libevent_unittest.cc1
-rw-r--r--chromium/base/message_loop/message_pump_mac.h33
-rw-r--r--chromium/base/message_loop/message_pump_mac.mm147
-rw-r--r--chromium/base/message_loop/message_pump_mac_unittest.mm174
-rw-r--r--chromium/base/message_loop/message_pump_win.cc90
-rw-r--r--chromium/base/metrics/dummy_histogram.cc5
-rw-r--r--chromium/base/metrics/dummy_histogram.h3
-rw-r--r--chromium/base/metrics/field_trial.cc15
-rw-r--r--chromium/base/metrics/field_trial.h8
-rw-r--r--chromium/base/metrics/field_trial_params.h1
-rw-r--r--chromium/base/metrics/field_trial_unittest.cc81
-rw-r--r--chromium/base/metrics/histogram.cc21
-rw-r--r--chromium/base/metrics/histogram.h9
-rw-r--r--chromium/base/metrics/histogram_base.cc24
-rw-r--r--chromium/base/metrics/histogram_base.h9
-rw-r--r--chromium/base/metrics/histogram_macros_internal.h2
-rw-r--r--chromium/base/metrics/histogram_macros_local.h1
-rw-r--r--chromium/base/metrics/histogram_unittest.cc49
-rw-r--r--chromium/base/metrics/sparse_histogram.cc10
-rw-r--r--chromium/base/metrics/sparse_histogram.h6
-rw-r--r--chromium/base/metrics/sparse_histogram_unittest.cc47
-rw-r--r--chromium/base/metrics/ukm_source_id.cc3
-rw-r--r--chromium/base/metrics/ukm_source_id.h6
-rw-r--r--chromium/base/metrics/user_metrics.cc2
-rw-r--r--chromium/base/numerics/checked_math_impl.h20
-rw-r--r--chromium/base/numerics/safe_conversions.h4
-rw-r--r--chromium/base/observer_list.h3
-rw-r--r--chromium/base/observer_list_internal.h2
-rw-r--r--chromium/base/observer_list_threadsafe.h2
-rw-r--r--chromium/base/observer_list_threadsafe_unittest.cc1
-rw-r--r--chromium/base/one_shot_event.h2
-rw-r--r--chromium/base/optional.h2
-rw-r--r--chromium/base/pickle.cc11
-rw-r--r--chromium/base/pickle.h6
-rw-r--r--chromium/base/posix/file_descriptor_shuffle.cc3
-rw-r--r--chromium/base/posix/unix_domain_socket.cc1
-rw-r--r--chromium/base/power_monitor/power_monitor.cc28
-rw-r--r--chromium/base/power_monitor/power_monitor.h14
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source.h9
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_mac.mm13
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_unittest.cc39
-rw-r--r--chromium/base/power_monitor/power_monitor_device_source_win.cc1
-rw-r--r--chromium/base/power_monitor/power_monitor_source.cc32
-rw-r--r--chromium/base/power_monitor/power_monitor_source.h12
-rw-r--r--chromium/base/power_monitor/power_monitor_unittest.cc60
-rw-r--r--chromium/base/power_monitor/power_observer.h24
-rw-r--r--chromium/base/power_monitor/thermal_state_observer_mac.h40
-rw-r--r--chromium/base/power_monitor/thermal_state_observer_mac.mm75
-rw-r--r--chromium/base/power_monitor/thermal_state_observer_mac_unittest.mm52
-rw-r--r--chromium/base/process/internal_linux.cc2
-rw-r--r--chromium/base/process/internal_linux.h5
-rw-r--r--chromium/base/process/kill_fuchsia.cc1
-rw-r--r--chromium/base/process/kill_win.cc1
-rw-r--r--chromium/base/process/launch_mac.cc2
-rw-r--r--chromium/base/process/launch_posix.cc2
-rw-r--r--chromium/base/process/memory_linux.cc6
-rw-r--r--chromium/base/process/process_handle.cc2
-rw-r--r--chromium/base/process/process_info_win.cc1
-rw-r--r--chromium/base/process/process_iterator_linux.cc1
-rw-r--r--chromium/base/process/process_metrics.h14
-rw-r--r--chromium/base/process/process_metrics_freebsd.cc5
-rw-r--r--chromium/base/process/process_metrics_fuchsia.cc8
-rw-r--r--chromium/base/process/process_metrics_ios.cc5
-rw-r--r--chromium/base/process/process_metrics_linux.cc75
-rw-r--r--chromium/base/process/process_metrics_mac.cc5
-rw-r--r--chromium/base/process/process_metrics_openbsd.cc5
-rw-r--r--chromium/base/process/process_metrics_unittest.cc69
-rw-r--r--chromium/base/process/process_metrics_win.cc5
-rw-r--r--chromium/base/profiler/chrome_unwinder_android.cc9
-rw-r--r--chromium/base/profiler/chrome_unwinder_android.h4
-rw-r--r--chromium/base/profiler/chrome_unwinder_android_unittest.cc12
-rw-r--r--chromium/base/profiler/metadata_recorder.cc5
-rw-r--r--chromium/base/profiler/metadata_recorder.h6
-rw-r--r--chromium/base/profiler/native_unwinder_android.cc90
-rw-r--r--chromium/base/profiler/native_unwinder_android.h34
-rw-r--r--chromium/base/profiler/native_unwinder_android_unittest.cc176
-rw-r--r--chromium/base/profiler/stack_copier_signal.cc3
-rw-r--r--chromium/base/profiler/stack_copier_unittest.cc1
-rw-r--r--chromium/base/profiler/stack_sampler.h14
-rw-r--r--chromium/base/profiler/stack_sampler_android.cc8
-rw-r--r--chromium/base/profiler/stack_sampler_impl.cc37
-rw-r--r--chromium/base/profiler/stack_sampler_impl.h2
-rw-r--r--chromium/base/profiler/stack_sampler_impl_unittest.cc42
-rw-r--r--chromium/base/profiler/stack_sampler_ios.cc2
-rw-r--r--chromium/base/profiler/stack_sampler_mac.cc8
-rw-r--r--chromium/base/profiler/stack_sampler_posix.cc2
-rw-r--r--chromium/base/profiler/stack_sampler_win.cc7
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.cc62
-rw-r--r--chromium/base/profiler/stack_sampling_profiler.h39
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_test_util.cc92
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_test_util.h8
-rw-r--r--chromium/base/profiler/stack_sampling_profiler_unittest.cc141
-rw-r--r--chromium/base/profiler/unwindstack_internal_android.cc30
-rw-r--r--chromium/base/profiler/unwindstack_internal_android.h34
-rw-r--r--chromium/base/profiler/win32_stack_frame_unwinder.cc1
-rw-r--r--chromium/base/run_loop.cc16
-rw-r--r--chromium/base/run_loop.h43
-rw-r--r--chromium/base/safe_numerics_unittest.cc32
-rw-r--r--chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h2
-rw-r--r--chromium/base/scoped_clear_last_error_unittest.cc1
-rw-r--r--chromium/base/scoped_generic.h2
-rw-r--r--chromium/base/scoped_observer.h2
-rw-r--r--chromium/base/sequence_checker.h10
-rw-r--r--chromium/base/stl_util.h34
-rw-r--r--chromium/base/strings/no_trigraphs_unittest.cc10
-rw-r--r--chromium/base/strings/strcat.cc62
-rw-r--r--chromium/base/strings/strcat.h14
-rw-r--r--chromium/base/strings/strcat_internal.h60
-rw-r--r--chromium/base/strings/strcat_win.cc35
-rw-r--r--chromium/base/strings/strcat_win.h45
-rw-r--r--chromium/base/strings/string_number_conversions.cc461
-rw-r--r--chromium/base/strings/string_number_conversions.h8
-rw-r--r--chromium/base/strings/string_number_conversions_internal.h303
-rw-r--r--chromium/base/strings/string_number_conversions_win.cc79
-rw-r--r--chromium/base/strings/string_number_conversions_win.h40
-rw-r--r--chromium/base/strings/string_piece.h6
-rw-r--r--chromium/base/strings/string_split.cc145
-rw-r--r--chromium/base/strings/string_split.h28
-rw-r--r--chromium/base/strings/string_split_internal.h100
-rw-r--r--chromium/base/strings/string_split_win.cc59
-rw-r--r--chromium/base/strings/string_split_win.h53
-rw-r--r--chromium/base/strings/string_util.cc834
-rw-r--r--chromium/base/strings/string_util.h134
-rw-r--r--chromium/base/strings/string_util_internal.h625
-rw-r--r--chromium/base/strings/string_util_posix.h2
-rw-r--r--chromium/base/strings/string_util_unittest.cc45
-rw-r--r--chromium/base/strings/string_util_win.cc145
-rw-r--r--chromium/base/strings/string_util_win.h157
-rw-r--r--chromium/base/strings/utf_string_conversions.cc12
-rw-r--r--chromium/base/strings/utf_string_conversions.h11
-rw-r--r--chromium/base/synchronization/condition_variable.h2
-rw-r--r--chromium/base/synchronization/lock.h2
-rw-r--r--chromium/base/synchronization/lock_impl.h2
-rw-r--r--chromium/base/system/sys_info.h4
-rw-r--r--chromium/base/system/sys_info_chromeos.cc17
-rw-r--r--chromium/base/system/sys_info_linux.cc1
-rw-r--r--chromium/base/system/sys_info_unittest.cc29
-rw-r--r--chromium/base/task/common/checked_lock.h20
-rw-r--r--chromium/base/task/common/checked_lock_impl.cc13
-rw-r--r--chromium/base/task/common/checked_lock_impl.h6
-rw-r--r--chromium/base/task/common/checked_lock_unittest.cc72
-rw-r--r--chromium/base/task/common/task_annotator.cc16
-rw-r--r--chromium/base/task/post_job.h2
-rw-r--r--chromium/base/task/post_task.cc13
-rw-r--r--chromium/base/task/post_task.h21
-rw-r--r--chromium/base/task/post_task_unittest.cc4
-rw-r--r--chromium/base/task/sequence_manager/lazily_deallocated_deque.h2
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.cc118
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl.h22
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc187
-rw-r--r--chromium/base/task/sequence_manager/sequence_manager_perftest.cc1
-rw-r--r--chromium/base/task/sequence_manager/sequenced_task_source.h13
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.cc140
-rw-r--r--chromium/base/task/sequence_manager/task_queue_impl.h23
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.cc55
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector.h24
-rw-r--r--chromium/base/task/sequence_manager/task_queue_selector_unittest.cc69
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_impl.cc2
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_power_monitor.cc91
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_power_monitor.h56
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_power_monitor_unittest.cc69
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc43
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h38
-rw-r--r--chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc172
-rw-r--r--chromium/base/task/sequence_manager/time_domain.cc17
-rw-r--r--chromium/base/task/sequence_manager/time_domain.h8
-rw-r--r--chromium/base/task/sequence_manager/time_domain_unittest.cc1
-rw-r--r--chromium/base/task/sequence_manager/work_queue.cc10
-rw-r--r--chromium/base/task/sequence_manager/work_queue.h5
-rw-r--r--chromium/base/task/sequence_manager/work_queue_sets.h4
-rw-r--r--chromium/base/task/single_thread_task_executor_unittest.cc2157
-rw-r--r--chromium/base/task/task_traits.h2
-rw-r--r--chromium/base/task/thread_pool/job_task_source.cc5
-rw-r--r--chromium/base/task/thread_pool/job_task_source.h6
-rw-r--r--chromium/base/task/thread_pool/service_thread_unittest.cc1
-rw-r--r--chromium/base/task/thread_pool/task_tracker.cc4
-rw-r--r--chromium/base/task/thread_pool/task_tracker.h1
-rw-r--r--chromium/base/task/thread_pool/task_tracker_posix.h1
-rw-r--r--chromium/base/task/thread_pool/thread_group_impl.h2
-rw-r--r--chromium/base/task/thread_pool/thread_pool_impl.h2
-rw-r--r--chromium/base/task/thread_pool/tracked_ref.h2
-rw-r--r--chromium/base/task/thread_pool/worker_thread.cc20
-rw-r--r--chromium/base/task_runner.h2
-rw-r--r--chromium/base/task_runner_util.h2
-rw-r--r--chromium/base/test/BUILD.gn29
-rw-r--r--chromium/base/test/OWNERS4
-rw-r--r--chromium/base/test/generate_fontconfig_caches.cc16
-rw-r--r--chromium/base/test/gtest_links.cc44
-rw-r--r--chromium/base/test/gtest_links.h28
-rw-r--r--chromium/base/test/gtest_links_unittest.cc24
-rw-r--r--chromium/base/test/gtest_util.h9
-rw-r--r--chromium/base/test/gtest_xml_unittest_result_printer.cc51
-rw-r--r--chromium/base/test/gtest_xml_unittest_result_printer.h14
-rw-r--r--chromium/base/test/gtest_xml_unittest_result_printer_unittest.cc51
-rw-r--r--chromium/base/test/gtest_xml_util.cc32
-rw-r--r--chromium/base/test/power_monitor_test_base.cc20
-rw-r--r--chromium/base/test/power_monitor_test_base.h19
-rw-r--r--chromium/base/test/scoped_feature_list.cc2
-rw-r--r--chromium/base/test/scoped_run_loop_timeout.cc1
-rw-r--r--chromium/base/test/test_file_util_win.cc22
-rw-r--r--chromium/base/test/test_pending_task.h2
-rw-r--r--chromium/base/test/test_pending_task_unittest.cc5
-rw-r--r--chromium/base/test/test_suite.cc13
-rw-r--r--chromium/base/test/test_suite.h14
-rw-r--r--chromium/base/test/test_switches.cc3
-rw-r--r--chromium/base/test/test_switches.h1
-rw-r--r--chromium/base/test/test_timeouts.cc45
-rw-r--r--chromium/base/test/test_timeouts.h18
-rw-r--r--chromium/base/test/trace_event_analyzer.cc1
-rw-r--r--chromium/base/test/trace_event_analyzer.h2
-rw-r--r--chromium/base/test/with_feature_override.h17
-rw-r--r--chromium/base/third_party/nspr/prtime.cc7
-rw-r--r--chromium/base/thread_annotations.h2
-rw-r--r--chromium/base/threading/hang_watcher.cc39
-rw-r--r--chromium/base/threading/hang_watcher.h6
-rw-r--r--chromium/base/threading/hang_watcher_unittest.cc34
-rw-r--r--chromium/base/threading/platform_thread_win.cc8
-rw-r--r--chromium/base/threading/scoped_blocking_call.cc2
-rw-r--r--chromium/base/threading/scoped_thread_priority.cc2
-rw-r--r--chromium/base/threading/sequence_bound.h24
-rw-r--r--chromium/base/threading/sequence_bound_unittest.cc23
-rw-r--r--chromium/base/threading/sequenced_task_runner_handle_unittest.cc2
-rw-r--r--chromium/base/threading/thread.cc8
-rw-r--r--chromium/base/threading/thread_checker.h4
-rw-r--r--chromium/base/threading/thread_checker_unittest.cc1
-rw-r--r--chromium/base/threading/thread_local.h2
-rw-r--r--chromium/base/threading/thread_restrictions.cc2
-rw-r--r--chromium/base/threading/thread_restrictions.h8
-rw-r--r--chromium/base/threading/thread_unittest.cc1
-rw-r--r--chromium/base/time/time.cc1
-rw-r--r--chromium/base/time/time.h2
-rw-r--r--chromium/base/time/time_mac.cc1
-rw-r--r--chromium/base/trace_event/base_tracing.h28
-rw-r--r--chromium/base/trace_event/builtin_categories.h9
-rw-r--r--chromium/base/trace_event/category_registry.cc1
-rw-r--r--chromium/base/trace_event/category_registry.h2
-rw-r--r--chromium/base/trace_event/etw_manifest/BUILD.gn27
-rw-r--r--chromium/base/trace_event/etw_manifest/chrome_events_win.man95
-rw-r--r--chromium/base/trace_event/features.gni12
-rw-r--r--chromium/base/trace_event/memory_allocator_dump.h1
-rw-r--r--chromium/base/trace_event/memory_dump_manager.cc1
-rw-r--r--chromium/base/trace_event/memory_infra_background_allowlist.cc3
-rw-r--r--chromium/base/trace_event/process_memory_dump.cc1
-rw-r--r--chromium/base/trace_event/trace_config.cc1
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.cc315
-rw-r--r--chromium/base/trace_event/trace_event_etw_export_win.h20
-rw-r--r--chromium/base/trace_event/trace_event_impl.cc2
-rw-r--r--chromium/base/trace_event/trace_event_stub.cc21
-rw-r--r--chromium/base/trace_event/trace_event_stub.h176
-rw-r--r--chromium/base/trace_event/trace_event_unittest.cc5
-rw-r--r--chromium/base/trace_event/trace_log.cc7
-rw-r--r--chromium/base/trace_event/trace_logging_minimal_win.cc351
-rw-r--r--chromium/base/trace_event/trace_logging_minimal_win.h393
-rw-r--r--chromium/base/trace_event/typed_macros.h74
-rw-r--r--chromium/base/trace_event/typed_macros_embedder_support.h65
-rw-r--r--chromium/base/trace_event/typed_macros_internal.cc95
-rw-r--r--chromium/base/trace_event/typed_macros_internal.h176
-rw-r--r--chromium/base/trace_event/typed_macros_unittest.cc111
-rw-r--r--chromium/base/unguessable_token.h2
-rw-r--r--chromium/base/unguessable_token_unittest.cc10
-rw-r--r--chromium/base/util/BUILD.gn1
-rw-r--r--chromium/base/util/memory_pressure/memory_pressure_voter.cc2
-rw-r--r--chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc2
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator.cc7
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc17
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc2
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.cc8
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia_unittest.cc11
-rw-r--r--chromium/base/util/memory_pressure/system_memory_pressure_evaluator_win_unittest.cc1
-rw-r--r--chromium/base/util/ranges/BUILD.gn26
-rw-r--r--chromium/base/util/ranges/OWNERS2
-rw-r--r--chromium/base/util/ranges/README.md146
-rw-r--r--chromium/base/util/ranges/algorithm.h1352
-rw-r--r--chromium/base/util/ranges/algorithm_unittest.cc398
-rw-r--r--chromium/base/util/ranges/functional.h71
-rw-r--r--chromium/base/util/ranges/functional_unittest.cc52
-rw-r--r--chromium/base/util/ranges/iterator.h40
-rw-r--r--chromium/base/util/ranges/iterator_unittest.cc49
-rw-r--r--chromium/base/util/timer/wall_clock_timer.cc6
-rw-r--r--chromium/base/util/type_safety/pass_key.h2
-rw-r--r--chromium/base/util/values/BUILD.gn3
-rw-r--r--chromium/base/util/values/values_util.cc59
-rw-r--r--chromium/base/util/values/values_util.h42
-rw-r--r--chromium/base/util/values/values_util_unittest.cc59
-rw-r--r--chromium/base/value_conversions.cc105
-rw-r--r--chromium/base/value_conversions.h51
-rw-r--r--chromium/base/values.cc6
-rw-r--r--chromium/base/values.h43
-rw-r--r--chromium/base/version_unittest.cc24
-rw-r--r--chromium/base/win/async_operation_unittest.cc13
-rw-r--r--chromium/base/win/atl.h8
-rw-r--r--chromium/base/win/atl_throw.cc25
-rw-r--r--chromium/base/win/atl_throw.h43
-rw-r--r--chromium/base/win/com_init_check_hook.cc1
-rw-r--r--chromium/base/win/com_init_check_hook.h2
-rw-r--r--chromium/base/win/com_init_util.cc1
-rw-r--r--chromium/base/win/com_init_util.h2
-rw-r--r--chromium/base/win/dispatch_stub.cc40
-rw-r--r--chromium/base/win/dispatch_stub.h43
-rw-r--r--chromium/base/win/map.h3
-rw-r--r--chromium/base/win/post_async_results.h1
-rw-r--r--chromium/base/win/reference_unittest.cc18
-rw-r--r--chromium/base/win/scoped_bstr.h2
-rw-r--r--chromium/base/win/scoped_co_mem.h2
-rw-r--r--chromium/base/win/scoped_devinfo.h24
-rw-r--r--chromium/base/win/scoped_handle.h2
-rw-r--r--chromium/base/win/scoped_hdc.h2
-rw-r--r--chromium/base/win/scoped_propvariant.h2
-rw-r--r--chromium/base/win/scoped_safearray.h3
-rw-r--r--chromium/base/win/scoped_select_object.h2
-rw-r--r--chromium/base/win/scoped_variant.cc1
-rw-r--r--chromium/base/win/scoped_variant_unittest.cc38
-rw-r--r--chromium/base/win/shortcut.h2
-rw-r--r--chromium/base/win/typed_event_handler.h49
-rw-r--r--chromium/base/win/typed_event_handler_unittest.cc49
-rw-r--r--chromium/base/win/variant_util.h151
-rw-r--r--chromium/base/win/variant_util_unittest.cc266
-rw-r--r--chromium/base/win/vector.h2
-rw-r--r--chromium/base/win/vector_unittest.cc37
-rw-r--r--chromium/base/win/windows_version.cc2
-rw-r--r--chromium/base/win/windows_version.h1
-rw-r--r--chromium/base/win/windows_version_unittest.cc4
504 files changed, 17022 insertions, 8856 deletions
diff --git a/chromium/base/BUILD.gn b/chromium/base/BUILD.gn
index 4a9258a9ee7..31b9d925835 100644
--- a/chromium/base/BUILD.gn
+++ b/chromium/base/BUILD.gn
@@ -18,7 +18,6 @@
# huge sequence of random-looking conditionals.
import("//base/allocator/allocator.gni")
-import("//base/trace_event/features.gni")
import("//build/buildflag_header.gni")
import("//build/config/allocator.gni")
import("//build/config/arm.gni")
@@ -34,6 +33,7 @@ import("//build/config/sysroot.gni")
import("//build/config/ui.gni")
import("//build/nocompile.gni")
import("//build/timestamp.gni")
+import("//build_overrides/build.gni")
import("//testing/libfuzzer/fuzzer_test.gni")
import("//testing/test.gni")
import("//third_party/icu/config.gni")
@@ -91,6 +91,11 @@ dep_libevent =
# Determines whether message_pump_libevent should be used.
use_libevent = dep_libevent && !is_ios
+# Whether or not cfi table should be enabled on arm.
+# TODO(crbug.com/1090409): Replace can_unwind_with_cfi_table once sampling
+# profiler is enabled on android.
+enable_arm_cfi_table = is_android && !is_component_build && current_cpu == "arm"
+
if (is_android) {
import("//build/config/android/rules.gni")
}
@@ -227,6 +232,7 @@ jumbo_component("base") {
"cpu.cc",
"cpu.h",
"critical_closure.h",
+ "dcheck_is_on.h",
"debug/activity_analyzer.cc",
"debug/activity_analyzer.h",
"debug/activity_tracker.cc",
@@ -321,6 +327,7 @@ jumbo_component("base") {
"macros.h",
"memory/aligned_memory.cc",
"memory/aligned_memory.h",
+ "memory/checked_ptr.cc",
"memory/checked_ptr.h",
"memory/discardable_memory.cc",
"memory/discardable_memory.h",
@@ -361,8 +368,6 @@ jumbo_component("base") {
"memory/weak_ptr.h",
"memory/writable_shared_memory_region.cc",
"memory/writable_shared_memory_region.h",
- "message_loop/message_loop.cc",
- "message_loop/message_loop.h",
"message_loop/message_loop_current.cc",
"message_loop/message_loop_current.h",
"message_loop/message_pump.cc",
@@ -544,19 +549,23 @@ jumbo_component("base") {
"strings/safe_sprintf.h",
"strings/strcat.cc",
"strings/strcat.h",
+ "strings/strcat_internal.h",
"strings/string16.cc",
"strings/string16.h",
"strings/string_number_conversions.cc",
"strings/string_number_conversions.h",
+ "strings/string_number_conversions_internal.h",
"strings/string_piece.cc",
"strings/string_piece.h",
"strings/string_piece_forward.h",
"strings/string_split.cc",
"strings/string_split.h",
+ "strings/string_split_internal.h",
"strings/string_tokenizer.h",
"strings/string_util.cc",
"strings/string_util.h",
"strings/string_util_constants.cc",
+ "strings/string_util_internal.h",
"strings/stringize_macros.h",
"strings/stringprintf.cc",
"strings/stringprintf.h",
@@ -637,6 +646,8 @@ jumbo_component("base") {
"task/sequence_manager/thread_controller.h",
"task/sequence_manager/thread_controller_impl.cc",
"task/sequence_manager/thread_controller_impl.h",
+ "task/sequence_manager/thread_controller_power_monitor.cc",
+ "task/sequence_manager/thread_controller_power_monitor.h",
"task/sequence_manager/thread_controller_with_message_pump_impl.cc",
"task/sequence_manager/thread_controller_with_message_pump_impl.h",
"task/sequence_manager/time_domain.cc",
@@ -787,79 +798,19 @@ jumbo_component("base") {
"timer/timer.h",
"token.cc",
"token.h",
- "trace_event/auto_open_close_event.h",
- "trace_event/blame_context.cc",
- "trace_event/blame_context.h",
- "trace_event/builtin_categories.cc",
- "trace_event/builtin_categories.h",
- "trace_event/category_registry.cc",
- "trace_event/category_registry.h",
+ "trace_event/base_tracing.h",
"trace_event/common/trace_event_common.h",
- "trace_event/event_name_filter.cc",
- "trace_event/event_name_filter.h",
- "trace_event/heap_profiler.h",
"trace_event/heap_profiler_allocation_context.cc",
"trace_event/heap_profiler_allocation_context.h",
"trace_event/heap_profiler_allocation_context_tracker.cc",
"trace_event/heap_profiler_allocation_context_tracker.h",
- "trace_event/heap_profiler_event_filter.cc",
- "trace_event/heap_profiler_event_filter.h",
- "trace_event/log_message.cc",
- "trace_event/log_message.h",
- "trace_event/malloc_dump_provider.cc",
- "trace_event/malloc_dump_provider.h",
- "trace_event/memory_allocator_dump.cc",
- "trace_event/memory_allocator_dump.h",
"trace_event/memory_allocator_dump_guid.cc",
"trace_event/memory_allocator_dump_guid.h",
- "trace_event/memory_dump_manager.cc",
- "trace_event/memory_dump_manager.h",
- "trace_event/memory_dump_manager_test_utils.h",
- "trace_event/memory_dump_provider.h",
- "trace_event/memory_dump_provider_info.cc",
- "trace_event/memory_dump_provider_info.h",
- "trace_event/memory_dump_request_args.cc",
- "trace_event/memory_dump_request_args.h",
- "trace_event/memory_dump_scheduler.cc",
- "trace_event/memory_dump_scheduler.h",
- "trace_event/memory_infra_background_allowlist.cc",
- "trace_event/memory_infra_background_allowlist.h",
- "trace_event/memory_usage_estimator.cc",
- "trace_event/memory_usage_estimator.h",
- "trace_event/process_memory_dump.cc",
- "trace_event/process_memory_dump.h",
- "trace_event/thread_instruction_count.cc",
- "trace_event/thread_instruction_count.h",
- "trace_event/trace_arguments.cc",
- "trace_event/trace_arguments.h",
- "trace_event/trace_buffer.cc",
- "trace_event/trace_buffer.h",
- "trace_event/trace_category.h",
- "trace_event/trace_config.cc",
- "trace_event/trace_config.h",
- "trace_event/trace_config_category_filter.cc",
- "trace_event/trace_config_category_filter.h",
- "trace_event/trace_event.h",
- "trace_event/trace_event_filter.cc",
- "trace_event/trace_event_filter.h",
- "trace_event/trace_event_impl.cc",
- "trace_event/trace_event_impl.h",
- "trace_event/trace_event_memory_overhead.cc",
- "trace_event/trace_event_memory_overhead.h",
- "trace_event/trace_log.cc",
- "trace_event/trace_log.h",
- "trace_event/trace_log_constants.cc",
- "trace_event/traced_value.cc",
- "trace_event/traced_value.h",
- "trace_event/tracing_agent.cc",
- "trace_event/tracing_agent.h",
"traits_bag.h",
"tuple.h",
"unguessable_token.cc",
"unguessable_token.h",
"updateable_sequenced_task_runner.h",
- "value_conversions.cc",
- "value_conversions.h",
"value_iterators.cc",
"value_iterators.h",
"values.cc",
@@ -986,6 +937,13 @@ jumbo_component("base") {
"profiler/suspendable_thread_delegate_win.cc",
"profiler/suspendable_thread_delegate_win.h",
"scoped_clear_last_error_win.cc",
+ "strings/strcat_win.cc",
+ "strings/strcat_win.h",
+ "strings/string_number_conversions_win.cc",
+ "strings/string_number_conversions_win.h",
+ "strings/string_split_win.cc",
+ "strings/string_split_win.h",
+ "strings/string_util_win.cc",
"strings/string_util_win.h",
"strings/sys_string_conversions_win.cc",
"sync_socket_win.cc",
@@ -999,10 +957,10 @@ jumbo_component("base") {
"threading/platform_thread_win.h",
"threading/thread_local_storage_win.cc",
"timer/hi_res_timer_manager_win.cc",
- "trace_event/trace_event_etw_export_win.cc",
- "trace_event/trace_event_etw_export_win.h",
"win/async_operation.h",
"win/atl.h",
+ "win/atl_throw.cc",
+ "win/atl_throw.h",
"win/com_init_check_hook.cc",
"win/com_init_check_hook.h",
"win/com_init_util.cc",
@@ -1073,7 +1031,7 @@ jumbo_component("base") {
"win/sphelper.h",
"win/startup_information.cc",
"win/startup_information.h",
- "win/typed_event_handler.h",
+ "win/variant_util.h",
"win/vector.cc",
"win/vector.h",
"win/win_util.cc",
@@ -1221,11 +1179,6 @@ jumbo_component("base") {
"os_compat_android.h",
"profiler/stack_sampler_android.cc",
"threading/platform_thread_android.cc",
- "trace_event/cpufreq_monitor_android.cc",
- "trace_event/cpufreq_monitor_android.h",
- "trace_event/java_heap_dump_provider_android.cc",
- "trace_event/java_heap_dump_provider_android.h",
- "trace_event/trace_event_android.cc",
]
}
@@ -1311,6 +1264,10 @@ jumbo_component("base") {
"//third_party/modp_b64",
]
+ # native_unwinder_android is intended for use solely via a dynamic feature
+ # module, to avoid increasing Chrome's executable size.
+ assert_no_deps = [ ":native_unwinder_android" ]
+
public_deps = [
":anchor_functions_buildflags",
":base_static",
@@ -1323,6 +1280,7 @@ jumbo_component("base") {
":partition_alloc_buildflags",
":sanitizer_buildflags",
":synchronization_buildflags",
+ ":tracing_buildflags",
"//base/numerics:base_numerics",
]
@@ -1357,12 +1315,22 @@ jumbo_component("base") {
deps += [ "//base/allocator:tcmalloc" ]
} else if (is_linux && use_allocator == "none") {
sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
- } else if (is_android && use_allocator == "none") {
- sources += [
- "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc",
- "allocator/allocator_shim_override_linker_wrapped_symbols.h",
- ]
+ } else if ((is_linux || is_android) && use_allocator == "partition") {
+ # Cannot use the same dispatching for host-side binaries.
+ if (is_a_target_toolchain) {
+ sources += [
+ "allocator/allocator_shim_default_dispatch_to_partition_alloc.cc",
+ ]
+ } else {
+ sources += [ "allocator/allocator_shim_default_dispatch_to_glibc.cc" ]
+ }
+ } else if (is_android) {
+ sources +=
+ [ "allocator/allocator_shim_override_linker_wrapped_symbols.h" ]
all_dependent_configs += [ "//base/allocator:wrap_malloc_symbols" ]
+ if (use_allocator == "none") {
+ sources += [ "allocator/allocator_shim_default_dispatch_to_linker_wrapped_symbols.cc" ]
+ }
} else if (is_mac || is_ios) {
sources += [
"allocator/allocator_shim_default_dispatch_to_mac_zoned_malloc.cc",
@@ -1568,8 +1536,6 @@ jumbo_component("base") {
"files/file_posix.cc",
"files/file_util_posix.cc",
"files/memory_mapped_file_posix.cc",
- "fuchsia/default_context.cc",
- "fuchsia/default_context.h",
"fuchsia/default_job.cc",
"fuchsia/default_job.h",
"fuchsia/file_utils.cc",
@@ -1580,8 +1546,10 @@ jumbo_component("base") {
"fuchsia/fuchsia_logging.h",
"fuchsia/intl_profile_watcher.cc",
"fuchsia/intl_profile_watcher.h",
- "fuchsia/scoped_service_binding.cc",
+ "fuchsia/process_context.cc",
+ "fuchsia/process_context.h",
"fuchsia/scoped_service_binding.h",
+ "fuchsia/scoped_service_publisher.h",
"fuchsia/service_provider_impl.cc",
"fuchsia/service_provider_impl.h",
"fuchsia/startup_context.cc",
@@ -1639,6 +1607,7 @@ jumbo_component("base") {
"//third_party/fuchsia-sdk/sdk/pkg/fdio",
"//third_party/fuchsia-sdk/sdk/pkg/fidl_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/sys_cpp",
+ "//third_party/fuchsia-sdk/sdk/pkg/vfs_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/zx",
]
@@ -1648,8 +1617,8 @@ jumbo_component("base") {
"//third_party/fuchsia-sdk/sdk/pkg/async-loop-cpp",
"//third_party/fuchsia-sdk/sdk/pkg/async-loop-default",
"//third_party/fuchsia-sdk/sdk/pkg/fidl",
+ "//third_party/fuchsia-sdk/sdk/pkg/sys_inspect_cpp",
"//third_party/fuchsia-sdk/sdk/pkg/syslog",
- "//third_party/fuchsia-sdk/sdk/pkg/vfs_cpp",
"//third_party/icu",
]
}
@@ -1766,6 +1735,8 @@ jumbo_component("base") {
# Add stuff that doesn't work in NaCl.
sources += [
# PartitionAlloc uses SpinLock, which doesn't work in NaCl (see below).
+ "allocator/partition_allocator/address_pool_manager.cc",
+ "allocator/partition_allocator/address_pool_manager.h",
"allocator/partition_allocator/address_space_randomization.cc",
"allocator/partition_allocator/address_space_randomization.h",
"allocator/partition_allocator/memory_reclaimer.cc",
@@ -1776,9 +1747,14 @@ jumbo_component("base") {
"allocator/partition_allocator/page_allocator.cc",
"allocator/partition_allocator/page_allocator.h",
"allocator/partition_allocator/page_allocator_internal.h",
+ "allocator/partition_allocator/partition_address_space.cc",
+ "allocator/partition_allocator/partition_address_space.h",
"allocator/partition_allocator/partition_alloc.cc",
"allocator/partition_allocator/partition_alloc.h",
+ "allocator/partition_allocator/partition_alloc_check.h",
"allocator/partition_allocator/partition_alloc_constants.h",
+ "allocator/partition_allocator/partition_alloc_features.cc",
+ "allocator/partition_allocator/partition_alloc_features.h",
"allocator/partition_allocator/partition_alloc_forward.h",
"allocator/partition_allocator/partition_bucket.cc",
"allocator/partition_allocator/partition_bucket.h",
@@ -1789,8 +1765,6 @@ jumbo_component("base") {
"allocator/partition_allocator/partition_oom.h",
"allocator/partition_allocator/partition_page.cc",
"allocator/partition_allocator/partition_page.h",
- "allocator/partition_allocator/partition_root_base.cc",
- "allocator/partition_allocator/partition_root_base.h",
"allocator/partition_allocator/random.cc",
"allocator/partition_allocator/random.h",
"allocator/partition_allocator/spin_lock.cc",
@@ -1836,10 +1810,7 @@ jumbo_component("base") {
# so exclude the NTSecAPI.h one.
jumbo_excluded_sources = [ "rand_util_win.cc" ]
- deps += [
- "//base/trace_event/etw_manifest:chrome_events_win",
- "//base/win:base_win_buildflags",
- ]
+ deps += [ "//base/win:base_win_buildflags" ]
data_deps += [ "//build/win:runtime_libs" ]
@@ -1884,6 +1855,8 @@ jumbo_component("base") {
"message_loop/message_pump_kqueue.cc",
"message_loop/message_pump_kqueue.h",
"power_monitor/power_monitor_device_source_mac.mm",
+ "power_monitor/thermal_state_observer_mac.h",
+ "power_monitor/thermal_state_observer_mac.mm",
"system/sys_info_mac.mm",
"time/time_conversion_posix.cc",
"time/time_exploded_posix.cc",
@@ -2093,6 +2066,104 @@ jumbo_component("base") {
]
}
+ if (enable_base_tracing) {
+ sources += [
+ "trace_event/auto_open_close_event.h",
+ "trace_event/blame_context.cc",
+ "trace_event/blame_context.h",
+ "trace_event/builtin_categories.cc",
+ "trace_event/builtin_categories.h",
+ "trace_event/category_registry.cc",
+ "trace_event/category_registry.h",
+ "trace_event/event_name_filter.cc",
+ "trace_event/event_name_filter.h",
+ "trace_event/heap_profiler.h",
+ "trace_event/heap_profiler_event_filter.cc",
+ "trace_event/heap_profiler_event_filter.h",
+ "trace_event/log_message.cc",
+ "trace_event/log_message.h",
+ "trace_event/malloc_dump_provider.cc",
+ "trace_event/malloc_dump_provider.h",
+ "trace_event/memory_allocator_dump.cc",
+ "trace_event/memory_allocator_dump.h",
+ "trace_event/memory_dump_manager.cc",
+ "trace_event/memory_dump_manager.h",
+ "trace_event/memory_dump_manager_test_utils.h",
+ "trace_event/memory_dump_provider.h",
+ "trace_event/memory_dump_provider_info.cc",
+ "trace_event/memory_dump_provider_info.h",
+ "trace_event/memory_dump_request_args.cc",
+ "trace_event/memory_dump_request_args.h",
+ "trace_event/memory_dump_scheduler.cc",
+ "trace_event/memory_dump_scheduler.h",
+ "trace_event/memory_infra_background_allowlist.cc",
+ "trace_event/memory_infra_background_allowlist.h",
+ "trace_event/memory_usage_estimator.cc",
+ "trace_event/memory_usage_estimator.h",
+ "trace_event/process_memory_dump.cc",
+ "trace_event/process_memory_dump.h",
+ "trace_event/thread_instruction_count.cc",
+ "trace_event/thread_instruction_count.h",
+ "trace_event/trace_arguments.cc",
+ "trace_event/trace_arguments.h",
+ "trace_event/trace_buffer.cc",
+ "trace_event/trace_buffer.h",
+ "trace_event/trace_category.h",
+ "trace_event/trace_config.cc",
+ "trace_event/trace_config.h",
+ "trace_event/trace_config_category_filter.cc",
+ "trace_event/trace_config_category_filter.h",
+ "trace_event/trace_event.h",
+ "trace_event/trace_event_filter.cc",
+ "trace_event/trace_event_filter.h",
+ "trace_event/trace_event_impl.cc",
+ "trace_event/trace_event_impl.h",
+ "trace_event/trace_event_memory_overhead.cc",
+ "trace_event/trace_event_memory_overhead.h",
+ "trace_event/trace_log.cc",
+ "trace_event/trace_log.h",
+ "trace_event/trace_log_constants.cc",
+ "trace_event/traced_value.cc",
+ "trace_event/traced_value.h",
+ "trace_event/tracing_agent.cc",
+ "trace_event/tracing_agent.h",
+ "trace_event/typed_macros.h",
+ "trace_event/typed_macros_embedder_support.h",
+ "trace_event/typed_macros_internal.cc",
+ "trace_event/typed_macros_internal.h",
+ ]
+
+ public_deps += [ "//third_party/perfetto:libperfetto" ]
+
+ deps += [ "//third_party/perfetto/include/perfetto/protozero" ]
+
+ all_dependent_configs += [ "//third_party/perfetto/gn:public_config" ]
+
+ if (is_win) {
+ sources += [
+ "trace_event/trace_event_etw_export_win.cc",
+ "trace_event/trace_event_etw_export_win.h",
+ "trace_event/trace_logging_minimal_win.cc",
+ "trace_event/trace_logging_minimal_win.h",
+ ]
+ }
+
+ if (is_android) {
+ sources += [
+ "trace_event/cpufreq_monitor_android.cc",
+ "trace_event/cpufreq_monitor_android.h",
+ "trace_event/java_heap_dump_provider_android.cc",
+ "trace_event/java_heap_dump_provider_android.h",
+ "trace_event/trace_event_android.cc",
+ ]
+ }
+ } else {
+ sources += [
+ "trace_event/trace_event_stub.cc",
+ "trace_event/trace_event_stub.h",
+ ]
+ }
+
if (using_sanitizer) {
data += [ "//tools/valgrind/asan/" ]
if (is_win) {
@@ -2139,6 +2210,8 @@ buildflag_header("debugging_buildflags") {
"CAN_UNWIND_WITH_FRAME_POINTERS=$can_unwind_with_frame_pointers",
"UNSAFE_DEVELOPER_BUILD=$is_unsafe_developer_build",
"CAN_UNWIND_WITH_CFI_TABLE=$can_unwind_with_cfi_table",
+ "ENABLE_ARM_CFI_TABLE=$enable_arm_cfi_table",
+ "EXCLUDE_UNWIND_TABLES=$exclude_unwind_tables",
"ENABLE_GDBINIT_WARNING=$enable_gdbinit_warning",
"ENABLE_LLDBINIT_WARNING=$enable_lldbinit_warning",
]
@@ -2154,7 +2227,8 @@ buildflag_header("orderfile_buildflags") {
header = "orderfile_buildflags.h"
header_dir = "base/android/orderfile"
using_order_profiling = is_android && use_order_profiling
- using_devtools_dumping = is_android && devtools_instrumentation_dumping
+ using_devtools_dumping =
+ is_android && devtools_instrumentation_dumping && enable_base_tracing
flags = [
"DEVTOOLS_INSTRUMENTATION_DUMPING=$using_devtools_dumping",
@@ -2208,6 +2282,12 @@ buildflag_header("sanitizer_buildflags") {
flags = [ "IS_HWASAN=$is_hwasan" ]
}
+buildflag_header("tracing_buildflags") {
+ header = "tracing_buildflags.h"
+
+ flags = [ "ENABLE_BASE_TRACING=$enable_base_tracing" ]
+}
+
# This is the subset of files from base that should not be used with a dynamic
# library. Note that this library cannot depend on base because base depends on
# base_static.
@@ -2418,19 +2498,25 @@ if ((is_win && (current_cpu == "x64" || current_cpu == "arm64")) || is_mac ||
}
}
-if (is_android && (current_cpu == "arm" || current_cpu == "arm64")) {
+if (is_android) {
source_set("native_unwinder_android") {
+ # This target is intended to be used only within the stack_unwinder dynamic
+ # feature module, to avoid binary size increase in Chrome due to the
+ # libunwindstack dependency. The additional :* visibility is needed to allow
+ # use by base test targets.
+ visibility = [
+ ":*",
+ "//chrome/android/modules/stack_unwinder/internal:*",
+ ]
sources = [
"profiler/native_unwinder_android.cc",
"profiler/native_unwinder_android.h",
- "profiler/unwindstack_internal_android.cc",
- "profiler/unwindstack_internal_android.h",
]
- include_dirs = [ "//third_party/libunwindstack/src/libunwindstack/include" ]
-
- public_deps = [ ":base" ]
- deps = [ "//third_party/libunwindstack" ]
+ public_deps = [
+ ":base",
+ "//third_party/libunwindstack",
+ ]
}
}
@@ -2445,6 +2531,9 @@ source_set("base_stack_sampling_profiler_test_util") {
"//base/test:test_support",
"//testing/gtest",
]
+ if (is_android) {
+ deps += [ ":native_unwinder_android" ]
+ }
}
bundle_data("base_unittests_bundle_data") {
@@ -2623,7 +2712,6 @@ test("base_unittests") {
"memory/shared_memory_region_unittest.cc",
"memory/singleton_unittest.cc",
"memory/weak_ptr_unittest.cc",
- "message_loop/message_loop_unittest.cc",
"message_loop/message_pump_glib_unittest.cc",
"message_loop/message_pump_unittest.cc",
"message_loop/work_id_provider_unittest.cc",
@@ -2657,6 +2745,7 @@ test("base_unittests") {
"parameter_pack_unittest.cc",
"path_service_unittest.cc",
"pickle_unittest.cc",
+ "power_monitor/power_monitor_device_source_unittest.cc",
"power_monitor/power_monitor_unittest.cc",
"process/environment_internal_unittest.cc",
"process/memory_unittest.cc",
@@ -2684,6 +2773,7 @@ test("base_unittests") {
"sequenced_task_runner_unittest.cc",
"stl_util_unittest.cc",
"strings/char_traits_unittest.cc",
+ "strings/no_trigraphs_unittest.cc",
"strings/nullable_string16_unittest.cc",
"strings/pattern_unittest.cc",
"strings/safe_sprintf_unittest.cc",
@@ -2723,6 +2813,7 @@ test("base_unittests") {
"task/sequence_manager/task_queue_selector_unittest.cc",
"task/sequence_manager/task_queue_unittest.cc",
"task/sequence_manager/test/mock_time_message_pump_unittest.cc",
+ "task/sequence_manager/thread_controller_power_monitor_unittest.cc",
"task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc",
"task/sequence_manager/time_domain_unittest.cc",
"task/sequence_manager/work_deduplicator_unittest.cc",
@@ -2757,7 +2848,10 @@ test("base_unittests") {
"template_util_unittest.cc",
"test/gmock_callback_support_unittest.cc",
"test/gmock_move_support_unittest.cc",
+ "test/gtest_links_unittest.cc",
+ "test/gtest_xml_unittest_result_printer_unittest.cc",
"test/launcher/test_launcher_unittest.cc",
+ "test/launcher/test_results_tracker_unittest.cc",
"test/metrics/histogram_enum_reader_unittest.cc",
"test/metrics/histogram_tester_unittest.cc",
"test/metrics/user_action_tester_unittest.cc",
@@ -2770,7 +2864,6 @@ test("base_unittests") {
"test/test_mock_time_task_runner_unittest.cc",
"test/test_pending_task_unittest.cc",
"test/test_waitable_event_unittest.cc",
- "test/trace_event_analyzer_unittest.cc",
"thread_annotations_unittest.cc",
"threading/hang_watcher_unittest.cc",
"threading/platform_thread_unittest.cc",
@@ -2800,22 +2893,6 @@ test("base_unittests") {
"timer/timer_unittest.cc",
"token_unittest.cc",
"tools_sanity_unittest.cc",
- "trace_event/blame_context_unittest.cc",
- "trace_event/event_name_filter_unittest.cc",
- "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
- "trace_event/memory_allocator_dump_unittest.cc",
- "trace_event/memory_dump_manager_unittest.cc",
- "trace_event/memory_dump_scheduler_unittest.cc",
- "trace_event/memory_infra_background_allowlist_unittest.cc",
- "trace_event/memory_usage_estimator_unittest.cc",
- "trace_event/process_memory_dump_unittest.cc",
- "trace_event/trace_arguments_unittest.cc",
- "trace_event/trace_category_unittest.cc",
- "trace_event/trace_config_unittest.cc",
- "trace_event/trace_event_filter_test_utils.cc",
- "trace_event/trace_event_filter_test_utils.h",
- "trace_event/trace_event_unittest.cc",
- "trace_event/traced_value_unittest.cc",
"traits_bag_unittest.cc",
"tuple_unittest.cc",
"unguessable_token_unittest.cc",
@@ -2873,6 +2950,8 @@ test("base_unittests") {
"win/com_init_check_hook_unittest.cc",
"win/com_init_util_unittest.cc",
"win/core_winrt_util_unittest.cc",
+ "win/dispatch_stub.cc",
+ "win/dispatch_stub.h",
"win/dllmain.cc",
"win/embedded_i18n/language_selector_unittest.cc",
"win/enum_variant_unittest.cc",
@@ -2897,7 +2976,7 @@ test("base_unittests") {
"win/scoped_winrt_initializer_unittest.cc",
"win/shortcut_unittest.cc",
"win/startup_information_unittest.cc",
- "win/typed_event_handler_unittest.cc",
+ "win/variant_util_unittest.cc",
"win/vector_unittest.cc",
"win/win_includes_unittest.cc",
"win/win_util_unittest.cc",
@@ -2929,6 +3008,7 @@ test("base_unittests") {
"mac/scoped_objc_class_swizzler_unittest.mm",
"mac/scoped_sending_event_unittest.mm",
"message_loop/message_pump_mac_unittest.mm",
+ "power_monitor/thermal_state_observer_mac_unittest.mm",
"process/memory_unittest_mac.h",
"process/memory_unittest_mac.mm",
"strings/sys_string_conversions_mac_unittest.mm",
@@ -2962,7 +3042,7 @@ test("base_unittests") {
# generated from debug info in the binary. Removing "default_symbols" and
# adding symbols config removes the "strip_debug" config that strips the
# debug info, on base unittests apk.
- if (can_unwind_with_cfi_table) {
+ if (can_unwind_with_cfi_table || enable_arm_cfi_table) {
configs -= [ "//build/config/compiler:default_symbols" ]
if (symbol_level == 2) {
configs += [ "//build/config/compiler:symbols" ]
@@ -2970,6 +3050,8 @@ test("base_unittests") {
configs += [ "//build/config/compiler:minimal_symbols" ]
}
add_unwind_tables_in_apk = true
+ }
+ if (can_unwind_with_cfi_table) {
sources += [ "trace_event/cfi_backtrace_android_unittest.cc" ]
}
if (current_cpu == "arm") {
@@ -2978,8 +3060,6 @@ test("base_unittests") {
if (!exclude_unwind_tables &&
(current_cpu == "arm" || current_cpu == "arm64")) {
sources += [ "profiler/native_unwinder_android_unittest.cc" ]
- include_dirs =
- [ "//third_party/libunwindstack/src/libunwindstack/include" ]
deps += [
":base_profiler_test_support_java",
":base_profiler_test_support_jni_headers",
@@ -3004,15 +3084,16 @@ test("base_unittests") {
"android/sys_utils_unittest.cc",
"android/unguessable_token_android_unittest.cc",
"os_compat_android_unittest.cc",
- "trace_event/cpufreq_monitor_android_unittest.cc",
- "trace_event/java_heap_dump_provider_android_unittest.cc",
]
# Android does not use test_launcher to run gtests.
sources -= [
"process/process_unittest.cc",
"process/process_util_unittest.cc",
+ "test/gtest_links_unittest.cc",
+ "test/gtest_xml_unittest_result_printer_unittest.cc",
"test/launcher/test_launcher_unittest.cc",
+ "test/launcher/test_results_tracker_unittest.cc",
]
deps += [
":base_java",
@@ -3047,7 +3128,10 @@ test("base_unittests") {
"process/process_util_unittest.cc",
"sync_socket_unittest.cc",
"synchronization/waitable_event_watcher_unittest.cc",
+ "test/gtest_links_unittest.cc",
+ "test/gtest_xml_unittest_result_printer_unittest.cc",
"test/launcher/test_launcher_unittest.cc",
+ "test/launcher/test_results_tracker_unittest.cc",
]
sources += [
@@ -3069,6 +3153,7 @@ test("base_unittests") {
if (use_partition_alloc) {
sources += [
+ "allocator/partition_allocator/address_pool_manager_unittest.cc",
"allocator/partition_allocator/address_space_randomization_unittest.cc",
"allocator/partition_allocator/memory_reclaimer_unittest.cc",
"allocator/partition_allocator/page_allocator_unittest.cc",
@@ -3127,6 +3212,7 @@ test("base_unittests") {
"fuchsia/filtered_service_directory_unittest.cc",
"fuchsia/intl_profile_watcher_unittest.cc",
"fuchsia/scoped_service_binding_unittest.cc",
+ "fuchsia/scoped_service_publisher_unittest.cc",
"fuchsia/service_directory_test_base.cc",
"fuchsia/service_directory_test_base.h",
"fuchsia/service_provider_impl_unittest.cc",
@@ -3164,7 +3250,6 @@ test("base_unittests") {
sources += [
"debug/elf_reader_unittest.cc",
"debug/proc_maps_linux_unittest.cc",
- "trace_event/trace_event_android_unittest.cc",
]
}
@@ -3188,6 +3273,37 @@ test("base_unittests") {
}
}
+ if (enable_base_tracing) {
+ sources += [
+ "test/trace_event_analyzer_unittest.cc",
+ "trace_event/blame_context_unittest.cc",
+ "trace_event/event_name_filter_unittest.cc",
+ "trace_event/heap_profiler_allocation_context_tracker_unittest.cc",
+ "trace_event/memory_allocator_dump_unittest.cc",
+ "trace_event/memory_dump_manager_unittest.cc",
+ "trace_event/memory_dump_scheduler_unittest.cc",
+ "trace_event/memory_infra_background_allowlist_unittest.cc",
+ "trace_event/memory_usage_estimator_unittest.cc",
+ "trace_event/process_memory_dump_unittest.cc",
+ "trace_event/trace_arguments_unittest.cc",
+ "trace_event/trace_category_unittest.cc",
+ "trace_event/trace_config_unittest.cc",
+ "trace_event/trace_event_filter_test_utils.cc",
+ "trace_event/trace_event_filter_test_utils.h",
+ "trace_event/trace_event_unittest.cc",
+ "trace_event/traced_value_unittest.cc",
+ "trace_event/typed_macros_unittest.cc",
+ ]
+
+ if (is_android) {
+ sources += [
+ "trace_event/cpufreq_monitor_android_unittest.cc",
+ "trace_event/java_heap_dump_provider_android_unittest.cc",
+ "trace_event/trace_event_android_unittest.cc",
+ ]
+ }
+ }
+
# TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
configs += [
"//build/config/compiler:no_size_t_to_int_warning",
@@ -3305,7 +3421,6 @@ if (is_android) {
}
android_library("base_java") {
- skip_jetify = true
srcjar_deps = [
":base_android_java_enums_srcjar",
":base_build_config_gen",
@@ -3376,6 +3491,9 @@ if (is_android) {
"android/java/src/org/chromium/base/TimezoneUtils.java",
"android/java/src/org/chromium/base/TraceEvent.java",
"android/java/src/org/chromium/base/UnguessableToken.java",
+ "android/java/src/org/chromium/base/UnownedUserData.java",
+ "android/java/src/org/chromium/base/UnownedUserDataHost.java",
+ "android/java/src/org/chromium/base/UnownedUserDataKey.java",
"android/java/src/org/chromium/base/UserData.java",
"android/java/src/org/chromium/base/UserDataHost.java",
"android/java/src/org/chromium/base/annotations/AccessedByNative.java",
@@ -3437,6 +3555,10 @@ if (is_android) {
"android/java/src/org/chromium/base/process_launcher/ChildProcessLauncher.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessService.java",
"android/java/src/org/chromium/base/process_launcher/ChildProcessServiceDelegate.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildServiceConnection.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildServiceConnectionDelegate.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildServiceConnectionFactory.java",
+ "android/java/src/org/chromium/base/process_launcher/ChildServiceConnectionImpl.java",
"android/java/src/org/chromium/base/process_launcher/FileDescriptorInfo.java",
"android/java/src/org/chromium/base/supplier/DestroyableObservableSupplier.java",
"android/java/src/org/chromium/base/supplier/ObservableSupplier.java",
@@ -3499,6 +3621,7 @@ if (is_android) {
"android/javatests/src/org/chromium/base/AdvancedMockContextTest.java",
"android/javatests/src/org/chromium/base/ApiCompatibilityUtilsTest.java",
"android/javatests/src/org/chromium/base/AssertsTest.java",
+ "android/javatests/src/org/chromium/base/CommandLineFlagsTest.java",
"android/javatests/src/org/chromium/base/CommandLineInitUtilTest.java",
"android/javatests/src/org/chromium/base/CommandLineTest.java",
"android/javatests/src/org/chromium/base/EarlyTraceEventTest.java",
@@ -3536,7 +3659,6 @@ if (is_android) {
}
android_library("base_java_test_support") {
- skip_jetify = true
testonly = true
deps = [
":base_java",
@@ -3564,7 +3686,6 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/BaseChromiumRunnerCommon.java",
"test/android/javatests/src/org/chromium/base/test/BaseJUnit4ClassRunner.java",
"test/android/javatests/src/org/chromium/base/test/BaseJUnit4TestRule.java",
- "test/android/javatests/src/org/chromium/base/test/BaseTestResult.java",
"test/android/javatests/src/org/chromium/base/test/BundleTestRule.java",
"test/android/javatests/src/org/chromium/base/test/DestroyActivitiesRule.java",
"test/android/javatests/src/org/chromium/base/test/LifetimeAssertRule.java",
@@ -3594,6 +3715,7 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/AdvancedMockContext.java",
"test/android/javatests/src/org/chromium/base/test/util/AnnotationProcessingUtils.java",
"test/android/javatests/src/org/chromium/base/test/util/AnnotationRule.java",
+ "test/android/javatests/src/org/chromium/base/test/util/Batch.java",
"test/android/javatests/src/org/chromium/base/test/util/CallbackHelper.java",
"test/android/javatests/src/org/chromium/base/test/util/CloseableOnMainThread.java",
"test/android/javatests/src/org/chromium/base/test/util/CommandLineFlags.java",
@@ -3615,7 +3737,6 @@ if (is_android) {
"test/android/javatests/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheck.java",
"test/android/javatests/src/org/chromium/base/test/util/Restriction.java",
"test/android/javatests/src/org/chromium/base/test/util/RestrictionSkipCheck.java",
- "test/android/javatests/src/org/chromium/base/test/util/RetryOnFailure.java",
"test/android/javatests/src/org/chromium/base/test/util/ScalableTimeout.java",
"test/android/javatests/src/org/chromium/base/test/util/SkipCheck.java",
"test/android/javatests/src/org/chromium/base/test/util/TestFileUtil.java",
@@ -3633,7 +3754,7 @@ if (is_android) {
":base_java",
":base_java_test_support",
]
- sources = [ "test/android/javatests/src/org/chromium/base/test/TestChildProcessConnection.java" ]
+ sources = [ "test/android/javatests/src/org/chromium/base/process_launcher/TestChildProcessConnection.java" ]
}
android_library("base_junit_test_support") {
@@ -3655,15 +3776,14 @@ if (is_android) {
"//testing/android/junit:junit_test_support",
"//third_party/android_deps:androidx_test_monitor_java",
"//third_party/android_deps:androidx_test_uiautomator_uiautomator_java",
+ "//third_party/android_deps:robolectric_all_java",
"//third_party/android_support_test_runner:runner_java",
"//third_party/hamcrest:hamcrest_java",
"//third_party/junit:junit",
- "//third_party/robolectric:robolectric_all_java",
]
}
junit_binary("base_junit_tests") {
- skip_jetify = true
sources = [
"android/junit/src/org/chromium/base/ApplicationStatusTest.java",
"android/junit/src/org/chromium/base/CallbackControllerTest.java",
@@ -3674,6 +3794,8 @@ if (is_android) {
"android/junit/src/org/chromium/base/NonThreadSafeTest.java",
"android/junit/src/org/chromium/base/PiiEliderTest.java",
"android/junit/src/org/chromium/base/PromiseTest.java",
+ "android/junit/src/org/chromium/base/UnownedUserDataHostTest.java",
+ "android/junit/src/org/chromium/base/UnownedUserDataKeyTest.java",
"android/junit/src/org/chromium/base/memory/MemoryPressureMonitorTest.java",
"android/junit/src/org/chromium/base/metrics/CachingUmaRecorderTest.java",
"android/junit/src/org/chromium/base/process_launcher/ChildConnectionAllocatorTest.java",
@@ -3716,8 +3838,10 @@ if (is_android) {
"memory/memory_pressure_listener.h",
"metrics/histogram_base.h",
"task/task_traits.h",
- "trace_event/trace_config.h",
]
+ if (enable_base_tracing) {
+ sources += [ "trace_event/trace_config.h" ]
+ }
}
generate_jni("base_profiler_test_support_jni_headers") {
@@ -3747,7 +3871,6 @@ if (is_android) {
}
android_library("base_java_unittest_support") {
- skip_jetify = true
testonly = true
deps = [ ":base_java" ]
sources = [
diff --git a/chromium/base/DEPS b/chromium/base/DEPS
index 477a867eafb..d88e0e50c0d 100644
--- a/chromium/base/DEPS
+++ b/chromium/base/DEPS
@@ -3,10 +3,12 @@ include_rules = [
"+third_party/apple_apsl",
"+third_party/boringssl/src/include",
"+third_party/ced",
+ "+third_party/libunwindstack/src/libunwindstack/include",
"+third_party/lss",
"+third_party/modp_b64",
+ "+third_party/perfetto/include",
+ "+third_party/perfetto/protos/perfetto",
"+third_party/tcmalloc",
- "+third_party/libunwindstack/src/libunwindstack/include",
# These are implicitly brought in from the root, and we don't want them.
"-ipc",
diff --git a/chromium/base/OWNERS b/chromium/base/OWNERS
index 327118745fd..7895973cca2 100644
--- a/chromium/base/OWNERS
+++ b/chromium/base/OWNERS
@@ -37,6 +37,6 @@ per-file rand_util*=file://ipc/SECURITY_OWNERS
per-file security_unittest.cc=jln@chromium.org
# For Value:
-per-file values*=jdoerrie@chromium.org
+per-file value*=jdoerrie@chromium.org
# COMPONENT: Internals>Core
diff --git a/chromium/base/PRESUBMIT.py b/chromium/base/PRESUBMIT.py
index 7996eab3b87..1a08a7f188a 100644
--- a/chromium/base/PRESUBMIT.py
+++ b/chromium/base/PRESUBMIT.py
@@ -32,12 +32,60 @@ def _CheckNoInterfacesInBase(input_api, output_api):
return []
+def _CheckNoTraceEventInclude(input_api, output_api):
+ """Verify that //base includes base_tracing.h instead of trace event headers.
+
+ Checks that files outside trace event implementation include the
+ base_tracing.h header instead of specific trace event implementation headers
+ to maintain compatibility with the gn flag "enable_base_tracing = false".
+ """
+ discouraged_includes = [
+ r'^#include "base/trace_event/blame_context.h"$',
+ r'^#include "base/trace_event/memory_allocator_dump_guid.h"$',
+ r'^#include "base/trace_event/memory_dump_provider.h"$',
+ r'^#include "base/trace_event/trace_event.h"$',
+ r'^#include "base/trace_event/traced_value.h"$',
+ ]
+
+ white_list = [
+ r".*\.(h|cc|mm)$",
+ ]
+ black_list = [
+ r".*[\\/]trace_event[\\/].*",
+ r".*[\\/]tracing[\\/].*",
+ ]
+
+ def FilterFile(affected_file):
+ return input_api.FilterSourceFile(
+ affected_file,
+ white_list=white_list,
+ black_list=black_list)
+
+ locations = []
+ for f in input_api.AffectedSourceFiles(FilterFile):
+ for line_num, line in f.ChangedContents():
+ for include in discouraged_includes:
+ if input_api.re.search(include, line):
+ locations.append(" %s:%d" % (f.LocalPath(), line_num))
+ break
+
+ if locations:
+ return [ output_api.PresubmitPromptWarning(
+ 'Consider replacing includes to trace_event implementation headers\n' +
+ 'in //base with "base/trace_event/base_tracing.h" and/or verify\n' +
+ 'that base_unittests still passes with gn arg\n' +
+ 'enable_base_tracing = false.\n' + '\n'.join(locations)) ]
+ return []
+
+
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
results = []
results.extend(_CheckNoInterfacesInBase(input_api, output_api))
+ results.extend(_CheckNoTraceEventInclude(input_api, output_api))
return results
+
def CheckChangeOnUpload(input_api, output_api):
results = []
results.extend(_CommonChecks(input_api, output_api))
diff --git a/chromium/base/README.md b/chromium/base/README.md
index da452ce9c70..7f6240c054b 100644
--- a/chromium/base/README.md
+++ b/chromium/base/README.md
@@ -7,10 +7,9 @@ Please add to it!
Chromium is a very mature project. Most things that are generally useful are
already here and things not here aren't generally useful.
-Base is pulled into many projects. For example, various ChromeOS daemons. So
-the bar for adding stuff is that it must have demonstrated wide
+The bar for adding stuff to base is that it must have demonstrated wide
applicability. Prefer to add things closer to where they're used (i.e. "not
-base"), and pull into base only when needed. In a project our size,
+base"), and pull into base only when needed. In a project our size,
sometimes even duplication is OK and inevitable.
Adding a new logging macro `DPVELOG_NE` is not more clear than just
@@ -21,6 +20,13 @@ If the code in question does not need to be used inside base, but will have
multiple consumers across the codebase, consider placing it in a new directory
under components/ instead.
+base is written for the Chromium project and is not intended to be used
+outside it. Using base outside of src.git is explicitly not supported,
+and base makes no guarantees about API (or even ABI) stability (like all
+other code in Chromium). New code that depends on base/ must be in
+src.git. Code that's not in src.git but pulled in through DEPS (for
+example, v8) cannot use base.
+
## Qualifications for being in //base OWNERS
* interest and ability to learn low level/high detail/complex c++ stuff
* inclination to always ask why and understand everything (including external
diff --git a/chromium/base/allocator/BUILD.gn b/chromium/base/allocator/BUILD.gn
index eb808ba556c..c6649c53e82 100644
--- a/chromium/base/allocator/BUILD.gn
+++ b/chromium/base/allocator/BUILD.gn
@@ -276,12 +276,14 @@ if (use_allocator == "tcmalloc") {
buildflag_header("buildflags") {
header = "buildflags.h"
- flags = [ "USE_ALLOCATOR_SHIM=$use_allocator_shim" ]
- if (use_allocator == "tcmalloc") {
- flags += [ "USE_TCMALLOC=1" ]
- } else {
- flags += [ "USE_TCMALLOC=0" ]
- }
+ _use_partition_alloc = use_allocator == "partition"
+ _use_tcmalloc = use_allocator == "tcmalloc"
+
+ flags = [
+ "USE_ALLOCATOR_SHIM=$use_allocator_shim",
+ "USE_TCMALLOC=$_use_tcmalloc",
+ "USE_PARTITION_ALLOC_AS_MALLOC=$_use_partition_alloc",
+ ]
}
# Used to shim malloc symbols on Android. see //base/allocator/README.md.
diff --git a/chromium/base/allocator/allocator.gni b/chromium/base/allocator/allocator.gni
index 148e37d9106..8e23e49c162 100644
--- a/chromium/base/allocator/allocator.gni
+++ b/chromium/base/allocator/allocator.gni
@@ -33,7 +33,11 @@ declare_args() {
use_allocator_shim = _default_use_allocator_shim
}
-assert(use_allocator == "none" || use_allocator == "tcmalloc")
+assert(use_allocator == "none" || use_allocator == "tcmalloc" ||
+ use_allocator == "partition")
+
+# Don't ship this configuration, not ready yet.
+assert(!(use_allocator == "partition" && is_official_build))
assert(!is_win || use_allocator == "none", "Tcmalloc doesn't work on Windows.")
assert(!is_mac || use_allocator == "none", "Tcmalloc doesn't work on macOS.")
diff --git a/chromium/base/allocator/allocator_shim.cc b/chromium/base/allocator/allocator_shim.cc
index 320bca7e168..f19ad6f94d5 100644
--- a/chromium/base/allocator/allocator_shim.cc
+++ b/chromium/base/allocator/allocator_shim.cc
@@ -67,16 +67,8 @@ bool CallNewHandler(size_t size) {
}
inline const base::allocator::AllocatorDispatch* GetChainHead() {
- // TODO(primiano): Just use NoBarrier_Load once crbug.com/593344 is fixed.
- // Unfortunately due to that bug NoBarrier_Load() is mistakenly fully
- // barriered on Linux+Clang, and that causes visible perf regressons.
return reinterpret_cast<const base::allocator::AllocatorDispatch*>(
-#if defined(OS_LINUX) && defined(__clang__)
- *static_cast<const volatile base::subtle::AtomicWord*>(&g_chain_head)
-#else
- base::subtle::NoBarrier_Load(&g_chain_head)
-#endif
- );
+ base::subtle::NoBarrier_Load(&g_chain_head));
}
} // namespace
diff --git a/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc b/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
new file mode 100644
index 00000000000..07f27d8fd4e
--- /dev/null
+++ b/chromium/base/allocator/allocator_shim_default_dispatch_to_partition_alloc.cc
@@ -0,0 +1,110 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/allocator_shim.h"
+#include "base/allocator/allocator_shim_internals.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/bits.h"
+#include "base/no_destructor.h"
+
+namespace {
+
+base::ThreadSafePartitionRoot& Allocator() {
+ static base::NoDestructor<base::ThreadSafePartitionRoot> allocator;
+ allocator->Init();
+ return *allocator;
+}
+
+using base::allocator::AllocatorDispatch;
+
+void* PartitionMalloc(const AllocatorDispatch*, size_t size, void* context) {
+ return Allocator().Alloc(size, "");
+}
+
+void* PartitionCalloc(const AllocatorDispatch*,
+ size_t n,
+ size_t size,
+ void* context) {
+ return Allocator().AllocFlags(base::PartitionAllocZeroFill, n * size, "");
+}
+
+void* PartitionMemalign(const AllocatorDispatch*,
+ size_t alignment,
+ size_t size,
+ void* context) {
+ // This is mandated by |posix_memalign()|, so should never fire.
+ //
+ // Note: CHECK() is fine here since we are not called from malloc(), but from
+ // posix_memalign(), so there is no recursion. It is also fine to make aligned
+ // allocations slower, as they are rare.
+ CHECK(base::bits::IsPowerOfTwo(alignment));
+
+ // PartitionAlloc only guarantees alignment for power-of-two sized
+ // allocations. To make sure this applies here, round up the allocation size.
+ size_t size_rounded_up =
+ static_cast<size_t>(1)
+ << (sizeof(size_t) * 8 - base::bits::CountLeadingZeroBits(size - 1));
+
+ void* ptr = Allocator().Alloc(size_rounded_up, "");
+ CHECK_EQ(reinterpret_cast<uintptr_t>(ptr) % alignment, 0ull);
+
+ return ptr;
+}
+
+void* PartitionRealloc(const AllocatorDispatch*,
+ void* address,
+ size_t size,
+ void* context) {
+ return Allocator().Realloc(address, size, "");
+}
+
+void PartitionFree(const AllocatorDispatch*, void* address, void* context) {
+ Allocator().Free(address);
+}
+
+size_t PartitionGetSizeEstimate(const AllocatorDispatch*,
+ void* address,
+ void* context) {
+ // TODO(lizeb): Returns incorrect values for aligned allocations.
+ return base::PartitionAllocGetSize<base::internal::ThreadSafe>(address);
+}
+
+} // namespace
+
+constexpr AllocatorDispatch AllocatorDispatch::default_dispatch = {
+ &PartitionMalloc, /* alloc_function */
+ &PartitionCalloc, /* alloc_zero_initialized_function */
+ &PartitionMemalign, /* alloc_aligned_function */
+ &PartitionRealloc, /* realloc_function */
+ &PartitionFree, /* free_function */
+ &PartitionGetSizeEstimate, /* get_size_estimate_function */
+ nullptr, /* batch_malloc_function */
+ nullptr, /* batch_free_function */
+ nullptr, /* free_definite_size_function */
+ nullptr, /* aligned_malloc_function */
+ nullptr, /* aligned_realloc_function */
+ nullptr, /* aligned_free_function */
+ nullptr, /* next */
+};
+
+// Intercept diagnostics symbols as well, even though they are not part of the
+// unified shim layer.
+//
+// TODO(lizeb): Implement the ones that doable.
+
+extern "C" {
+
+SHIM_ALWAYS_EXPORT void malloc_stats(void) __THROW {}
+
+SHIM_ALWAYS_EXPORT int mallopt(int cmd, int value) __THROW {
+ return 0;
+}
+
+#ifdef HAVE_STRUCT_MALLINFO
+SHIM_ALWAYS_EXPORT struct mallinfo mallinfo(void) __THROW {
+ return {};
+}
+#endif
+
+} // extern "C"
diff --git a/chromium/base/allocator/malloc_zone_functions_mac.h b/chromium/base/allocator/malloc_zone_functions_mac.h
index a7f55433785..1f2d990e4e1 100644
--- a/chromium/base/allocator/malloc_zone_functions_mac.h
+++ b/chromium/base/allocator/malloc_zone_functions_mac.h
@@ -9,7 +9,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/immediate_crash.h"
#include "third_party/apple_apsl/malloc.h"
namespace base {
diff --git a/chromium/base/allocator/partition_allocator/PartitionAlloc.md b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
index d7c283c7465..8f025b91d24 100644
--- a/chromium/base/allocator/partition_allocator/PartitionAlloc.md
+++ b/chromium/base/allocator/partition_allocator/PartitionAlloc.md
@@ -27,12 +27,6 @@ example, if a partition has 3 buckets for 64 bytes, 256 bytes, and 1024 bytes,
then PartitionAlloc will satisfy an allocation request for 128 bytes by rounding
it up to 256 bytes and allocating from the second bucket.
-The special allocator class `template <size_t N> class
-SizeSpecificPartitionAllocator` will satisfy allocations only of size
-`kMaxAllocation = N - kAllocationGranularity` or less, and contains buckets for
-all `n * kAllocationGranularity` (n = 1, 2, ..., `kMaxAllocation`). Attempts to
-allocate more than `kMaxAllocation` will fail.
-
## Performance
The current implementation is optimized for the main thread use-case. For
@@ -52,16 +46,16 @@ bucket size is 1MB = 0x100000 which is greater than 1/2 the available space in
a SuperPage meaning it would not be possible to pack even 2 sequential
allocations in a SuperPage.
-`PartitionRootGeneric::Alloc()` acquires a lock for thread safety. (The current
-implementation uses a spin lock on the assumption that thread contention will be
-rare in its callers. The original caller was Blink, where this is generally
-true. Spin locks also have the benefit of simplicity.)
+`PartitionRoot<internal::ThreadSafe>::Alloc()` acquires a lock for thread
+safety. (The current implementation uses a spin lock on the assumption that
+thread contention will be rare in its callers. The original caller was Blink,
+where this is generally true. Spin locks also have the benefit of simplicity.)
Callers can get thread-unsafe performance using a
-`SizeSpecificPartitionAllocator` or otherwise using `PartitionAlloc` (instead of
-`PartitionRootGeneric::Alloc()`). Callers can also arrange for low contention,
-such as by using a dedicated partition for single-threaded, latency-critical
-allocations.
+`PartitionRoot<internal::NotThreadSafe>::Alloc()` or otherwise using
+`PartitionAlloc<internal::NotThreadSafe>`. Callers can also arrange for low
+contention, such as by using a dedicated partition for single-threaded,
+latency-critical allocations.
Because PartitionAlloc guarantees that address space regions used for one
partition are never reused for other partitions, partitions can eat a large
@@ -100,3 +94,20 @@ hence at different addresses. One page can contain only similar-sized objects.
* Partial pointer overwrite of freelist pointer should fault.
* Large allocations have guard pages at the beginning and end.
+
+## Alignment
+
+PartitionAlloc doesn't have explicit support for a `posix_memalign()` type call,
+however it provides some guarantees on the alignment of returned pointers.
+
+All pointers are aligned on the smallest allocation granularity, namely
+`sizeof(void*)`. Additionally, for power-of-two sized allocations, the behavior
+depends on the compilation flags:
+
+* With `DCHECK_IS_ON()`, returned pointers are never guaranteed to be aligned on
+ more than 16 bytes.
+
+* Otherwise, the returned pointer is guaranteed to be aligned on
+ `min(allocation_size, system page size)`.
+
+See the tests in `partition_alloc_unittest.cc` for more details.
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager.cc b/chromium/base/allocator/partition_allocator/address_pool_manager.cc
new file mode 100644
index 00000000000..4c2a3d0fad9
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager.cc
@@ -0,0 +1,223 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+
+#if defined(OS_MACOSX)
+#include <sys/mman.h>
+#endif
+
+#include <algorithm>
+#include <limits>
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/bits.h"
+#include "base/notreached.h"
+#include "base/stl_util.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+namespace {
+
+void DecommitPages(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ // MAP_FIXED replaces an existing mapping with a new one, when the address is
+ // already part of a mapping. Since newly-created mappings are guaranteed to
+ // be zero-filled, this has the desired effect. It is only required on macOS,
+ // as on other operating systems, |DecommitSystemPages()| provides the same
+ // behavior.
+ void* ptr = mmap(address, size, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ PA_CHECK(ptr == address);
+#else
+ SetSystemPagesAccess(address, size, PageInaccessible);
+ DecommitSystemPages(address, size);
+#endif
+}
+
+bool WARN_UNUSED_RESULT CommitPages(void* address, size_t size) {
+#if defined(OS_MACOSX)
+ SetSystemPagesAccess(address, size, PageReadWrite);
+#else
+ if (!RecommitSystemPages(address, size, PageReadWrite))
+ return false;
+ SetSystemPagesAccess(address, size, PageReadWrite);
+#endif
+
+ return true;
+}
+
+} // namespace
+
+constexpr size_t AddressPoolManager::Pool::kMaxBits;
+
+// static
+AddressPoolManager* AddressPoolManager::GetInstance() {
+ static NoDestructor<AddressPoolManager> instance;
+ return instance.get();
+}
+
+pool_handle AddressPoolManager::Add(uintptr_t ptr, size_t length) {
+ PA_DCHECK(!(ptr & kSuperPageOffsetMask));
+ PA_DCHECK(!((ptr + length) & kSuperPageOffsetMask));
+
+ for (pool_handle i = 0; i < base::size(pools_); ++i) {
+ if (!pools_[i].IsInitialized()) {
+ pools_[i].Initialize(ptr, length);
+ return i + 1;
+ }
+ }
+ NOTREACHED();
+ return 0;
+}
+
+void AddressPoolManager::ResetForTesting() {
+ for (pool_handle i = 0; i < base::size(pools_); ++i)
+ pools_[i].Reset();
+}
+
+void AddressPoolManager::Remove(pool_handle handle) {
+ Pool* pool = GetPool(handle);
+ PA_DCHECK(pool->IsInitialized());
+ pool->Reset();
+}
+
+char* AddressPoolManager::Alloc(pool_handle handle, size_t length) {
+ Pool* pool = GetPool(handle);
+ char* ptr = reinterpret_cast<char*>(pool->FindChunk(length));
+
+ if (UNLIKELY(!ptr) || !CommitPages(ptr, length))
+ return nullptr;
+ return ptr;
+}
+
+void AddressPoolManager::Free(pool_handle handle, void* ptr, size_t length) {
+ PA_DCHECK(0 < handle && handle <= kNumPools);
+ Pool* pool = GetPool(handle);
+ PA_DCHECK(pool->IsInitialized());
+ DecommitPages(ptr, length);
+ pool->FreeChunk(reinterpret_cast<uintptr_t>(ptr), length);
+}
+
+void AddressPoolManager::Pool::Initialize(uintptr_t ptr, size_t length) {
+ PA_CHECK(ptr != 0);
+ PA_CHECK(!(ptr & kSuperPageOffsetMask));
+ PA_CHECK(!(length & kSuperPageOffsetMask));
+ address_begin_ = ptr;
+#if DCHECK_IS_ON()
+ address_end_ = ptr + length;
+ PA_DCHECK(address_begin_ < address_end_);
+#endif
+
+ total_bits_ = length / kSuperPageSize;
+ PA_CHECK(total_bits_ <= kMaxBits);
+
+ base::AutoLock scoped_lock(lock_);
+ alloc_bitset_.reset();
+ bit_hint_ = 0;
+}
+
+bool AddressPoolManager::Pool::IsInitialized() {
+ return address_begin_ != 0;
+}
+
+void AddressPoolManager::Pool::Reset() {
+ address_begin_ = 0;
+}
+
+uintptr_t AddressPoolManager::Pool::FindChunk(size_t requested_size) {
+ base::AutoLock scoped_lock(lock_);
+
+ const size_t required_size = bits::Align(requested_size, kSuperPageSize);
+ const size_t need_bits = required_size >> kSuperPageShift;
+
+ // Use first-fit policy to find an available chunk from free chunks. Start
+ // from |bit_hint_|, because we know there are no free chunks before.
+ size_t beg_bit = bit_hint_;
+ size_t curr_bit = bit_hint_;
+ while (true) {
+ // |end_bit| points 1 past the last bit that needs to be 0. If it goes past
+ // |total_bits_|, return |nullptr| to signal no free chunk was found.
+ size_t end_bit = beg_bit + need_bits;
+ if (end_bit > total_bits_)
+ return 0;
+
+ bool found = true;
+ for (; curr_bit < end_bit; ++curr_bit) {
+ if (alloc_bitset_.test(curr_bit)) {
+ // The bit was set, so this chunk isn't entirely free. Set |found=false|
+ // to ensure the outer loop continues. However, continue the inner loop
+ // to set |beg_bit| just past the last set bit in the investigated
+ // chunk. |curr_bit| is advanced all the way to |end_bit| to prevent the
+ // next outer loop pass from checking the same bits.
+ beg_bit = curr_bit + 1;
+ found = false;
+ if (bit_hint_ == curr_bit)
+ ++bit_hint_;
+ }
+ }
+
+ // An entire [beg_bit;end_bit) region of 0s was found. Fill them with 1s (to
+ // mark as allocated) and return the allocated address.
+ if (found) {
+ for (size_t i = beg_bit; i < end_bit; ++i) {
+ PA_DCHECK(!alloc_bitset_.test(i));
+ alloc_bitset_.set(i);
+ }
+ if (bit_hint_ == beg_bit) {
+ bit_hint_ = end_bit;
+ }
+ uintptr_t address = address_begin_ + beg_bit * kSuperPageSize;
+#if DCHECK_IS_ON()
+ PA_DCHECK(address + required_size <= address_end_);
+#endif
+ return address;
+ }
+ }
+
+ NOTREACHED();
+ return 0;
+}
+
+void AddressPoolManager::Pool::FreeChunk(uintptr_t address, size_t free_size) {
+ base::AutoLock scoped_lock(lock_);
+
+ PA_DCHECK(!(address & kSuperPageOffsetMask));
+
+ const size_t size = bits::Align(free_size, kSuperPageSize);
+ DCHECK_LE(address_begin_, address);
+#if DCHECK_IS_ON()
+ PA_DCHECK(address + size <= address_end_);
+#endif
+
+ const size_t beg_bit = (address - address_begin_) / kSuperPageSize;
+ const size_t end_bit = beg_bit + size / kSuperPageSize;
+ for (size_t i = beg_bit; i < end_bit; ++i) {
+ PA_DCHECK(alloc_bitset_.test(i));
+ alloc_bitset_.reset(i);
+ }
+ bit_hint_ = std::min(bit_hint_, beg_bit);
+}
+
+AddressPoolManager::Pool::Pool() = default;
+AddressPoolManager::Pool::~Pool() = default;
+
+AddressPoolManager::AddressPoolManager() = default;
+AddressPoolManager::~AddressPoolManager() = default;
+
+ALWAYS_INLINE AddressPoolManager::Pool* AddressPoolManager::GetPool(
+ pool_handle handle) {
+ PA_DCHECK(0 < handle && handle <= kNumPools);
+ return &pools_[handle - 1];
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager.h b/chromium/base/allocator/partition_allocator/address_pool_manager.h
new file mode 100644
index 00000000000..cb46cb63c5a
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager.h
@@ -0,0 +1,100 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
+
+#include <bitset>
+
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/atomicops.h"
+#include "base/no_destructor.h"
+#include "base/synchronization/lock.h"
+#include "base/thread_annotations.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+using pool_handle = unsigned;
+
+// The feature is not applicable to 32-bit address space.
+// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
+// address space. The only known case where address space is 32-bit is NaCl, so
+// eliminate it explicitly. static_assert below ensures that other won't slip
+// through.
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+static_assert(sizeof(size_t) >= 8, "Nee more than 32-bit address space");
+
+// AddressPoolManager takes a reserved virtual address space and manages address
+// space allocation.
+//
+// AddressPoolManager (currently) supports up to 2 pools. Each pool manages a
+// contiguous reserved address space. Alloc() takes a pool_handle and returns
+// address regions from the specified pool. Free() also takes a pool_handle and
+// returns the address region back to the manager.
+class BASE_EXPORT AddressPoolManager {
+ public:
+ static AddressPoolManager* GetInstance();
+
+ pool_handle Add(uintptr_t address, size_t length);
+ void Remove(pool_handle handle);
+ char* Alloc(pool_handle handle, size_t length);
+ void Free(pool_handle handle, void* ptr, size_t length);
+ void ResetForTesting();
+
+ private:
+ AddressPoolManager();
+ ~AddressPoolManager();
+
+ class Pool {
+ public:
+ Pool();
+ ~Pool();
+
+ void Initialize(uintptr_t ptr, size_t length);
+ bool IsInitialized();
+ void Reset();
+
+ uintptr_t FindChunk(size_t size);
+ void FreeChunk(uintptr_t address, size_t size);
+
+ private:
+ // The bitset stores the allocation state of the address pool. 1 bit per
+ // super-page: 1 = allocated, 0 = free.
+ static constexpr size_t kGiB = 1024 * 1024 * 1024;
+ static constexpr size_t kMaxSupportedSize = 16 * kGiB;
+ static constexpr size_t kMaxBits = kMaxSupportedSize / kSuperPageSize;
+ base::Lock lock_;
+ std::bitset<kMaxBits> alloc_bitset_ GUARDED_BY(lock_);
+ // An index of a bit in the bitset before which we know for sure there all
+ // 1s. This is a best-effort hint in the sense that there still may be lots
+ // of 1s after this index, but at least we know there is no point in
+ // starting the search before it.
+ size_t bit_hint_ GUARDED_BY(lock_);
+
+ size_t total_bits_ = 0;
+ uintptr_t address_begin_ = 0;
+#if DCHECK_IS_ON()
+ uintptr_t address_end_ = 0;
+#endif
+ };
+
+ ALWAYS_INLINE Pool* GetPool(pool_handle handle);
+
+ static constexpr size_t kNumPools = 2;
+ Pool pools_[kNumPools];
+
+ friend class NoDestructor<AddressPoolManager>;
+ DISALLOW_COPY_AND_ASSIGN(AddressPoolManager);
+};
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_ADDRESS_POOL_MANAGER_H_
diff --git a/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc b/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc
new file mode 100644
index 00000000000..02ca8874ccf
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/address_pool_manager_unittest.cc
@@ -0,0 +1,150 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+class AddressPoolManagerTest : public testing::Test {
+ protected:
+ AddressPoolManagerTest() = default;
+ ~AddressPoolManagerTest() override = default;
+
+ void SetUp() override {
+ AddressPoolManager::GetInstance()->ResetForTesting();
+ base_address_ =
+ AllocPages(nullptr, kPoolSize, kSuperPageSize, base::PageInaccessible,
+ PageTag::kPartitionAlloc, false);
+ ASSERT_TRUE(base_address_);
+ pool_ = AddressPoolManager::GetInstance()->Add(
+ reinterpret_cast<uintptr_t>(base_address_), kPoolSize);
+ }
+
+ void TearDown() override { FreePages(base_address_, kPoolSize); }
+
+ static constexpr size_t kPageCnt = 8192;
+ static constexpr size_t kPoolSize = kSuperPageSize * kPageCnt;
+
+ void* base_address_;
+ pool_handle pool_;
+};
+
+TEST_F(AddressPoolManagerTest, TooLargePool) {
+ uintptr_t base_addr = 0x4200000;
+
+ constexpr size_t kSize = 16ull * 1024 * 1024 * 1024;
+ EXPECT_DEATH_IF_SUPPORTED(
+ AddressPoolManager::GetInstance()->Add(base_addr, kSize + kSuperPageSize),
+ "");
+}
+
+TEST_F(AddressPoolManagerTest, ManyPages) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
+ kPageCnt * kSuperPageSize),
+ base_ptr);
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+ AddressPoolManager::GetInstance()->Free(pool_, base_ptr,
+ kPageCnt * kSuperPageSize);
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_,
+ kPageCnt * kSuperPageSize),
+ base_ptr);
+}
+
+TEST_F(AddressPoolManagerTest, PagesFragmented) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+ void* addrs[kPageCnt];
+ for (size_t i = 0; i < kPageCnt; ++i) {
+ addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+ for (size_t i = 1; i < kPageCnt; i += 2) {
+ AddressPoolManager::GetInstance()->Free(pool_, addrs[i], kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize),
+ nullptr);
+ for (size_t i = 1; i < kPageCnt; i += 2) {
+ addrs[i] = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(addrs[i], base_ptr + i * kSuperPageSize);
+ }
+ EXPECT_EQ(AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize),
+ nullptr);
+}
+
+TEST_F(AddressPoolManagerTest, IrregularPattern) {
+ char* base_ptr = reinterpret_cast<char*>(base_address_);
+
+ void* a1 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ EXPECT_EQ(a1, base_ptr);
+ void* a2 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
+ EXPECT_EQ(a2, base_ptr + 1 * kSuperPageSize);
+ void* a3 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
+ EXPECT_EQ(a3, base_ptr + 3 * kSuperPageSize);
+ void* a4 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 4 * kSuperPageSize);
+ EXPECT_EQ(a4, base_ptr + 6 * kSuperPageSize);
+ void* a5 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 5 * kSuperPageSize);
+ EXPECT_EQ(a5, base_ptr + 10 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a4, 4 * kSuperPageSize);
+ void* a6 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 6 * kSuperPageSize);
+ EXPECT_EQ(a6, base_ptr + 15 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a5, 5 * kSuperPageSize);
+ void* a7 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 7 * kSuperPageSize);
+ EXPECT_EQ(a7, base_ptr + 6 * kSuperPageSize);
+ void* a8 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 3 * kSuperPageSize);
+ EXPECT_EQ(a8, base_ptr + 21 * kSuperPageSize);
+ void* a9 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 2 * kSuperPageSize);
+ EXPECT_EQ(a9, base_ptr + 13 * kSuperPageSize);
+
+ AddressPoolManager::GetInstance()->Free(pool_, a7, 7 * kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, a9, 2 * kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, a6, 6 * kSuperPageSize);
+ void* a10 =
+ AddressPoolManager::GetInstance()->Alloc(pool_, 15 * kSuperPageSize);
+ EXPECT_EQ(a10, base_ptr + 6 * kSuperPageSize);
+}
+
+TEST_F(AddressPoolManagerTest, DecommittedDataIsErased) {
+ void* data = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ ASSERT_TRUE(data);
+
+ memset(data, 42, kSuperPageSize);
+ AddressPoolManager::GetInstance()->Free(pool_, data, kSuperPageSize);
+
+ void* data2 = AddressPoolManager::GetInstance()->Alloc(pool_, kSuperPageSize);
+ ASSERT_EQ(data, data2);
+
+ uint32_t sum = 0;
+ for (size_t i = 0; i < kSuperPageSize; i++) {
+ sum += reinterpret_cast<uint8_t*>(data2)[i];
+ }
+
+ EXPECT_EQ(0u, sum) << sum / 42 << " bytes were not zeroed";
+}
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/address_space_randomization.cc b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
index 72078fdaa50..b168f996148 100644
--- a/chromium/base/allocator/partition_allocator/address_space_randomization.cc
+++ b/chromium/base/allocator/partition_allocator/address_space_randomization.cc
@@ -5,6 +5,7 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/random.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/check_op.h"
@@ -61,7 +62,7 @@ void* GetRandomPageBase() {
random += internal::kASLROffset;
#endif // defined(ARCH_CPU_32_BITS)
- DCHECK_EQ(0ULL, (random & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(random & kPageAllocationGranularityOffsetMask));
return reinterpret_cast<void*>(random);
}
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
index 0d515b11463..3e4203abdbc 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.cc
@@ -5,36 +5,34 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bind.h"
#include "base/location.h"
#include "base/metrics/histogram_functions.h"
-#include "base/timer/elapsed_timer.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace {
template <bool thread_safe>
-void Insert(std::set<internal::PartitionRootBase<thread_safe>*>* partitions,
- internal::PartitionRootBase<thread_safe>* partition) {
- DCHECK(partition);
+void Insert(std::set<PartitionRoot<thread_safe>*>* partitions,
+ PartitionRoot<thread_safe>* partition) {
+ PA_DCHECK(partition);
auto it_and_whether_inserted = partitions->insert(partition);
- DCHECK(it_and_whether_inserted.second);
+ PA_DCHECK(it_and_whether_inserted.second);
}
template <bool thread_safe>
-void Remove(std::set<internal::PartitionRootBase<thread_safe>*>* partitions,
- internal::PartitionRootBase<thread_safe>* partition) {
- DCHECK(partition);
+void Remove(std::set<PartitionRoot<thread_safe>*>* partitions,
+ PartitionRoot<thread_safe>* partition) {
+ PA_DCHECK(partition);
size_t erased_count = partitions->erase(partition);
- DCHECK_EQ(1u, erased_count);
+ PA_DCHECK(erased_count == 1u);
}
} // namespace
-constexpr TimeDelta PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta;
-
// static
PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
static NoDestructor<PartitionAllocMemoryReclaimer> instance;
@@ -42,37 +40,37 @@ PartitionAllocMemoryReclaimer* PartitionAllocMemoryReclaimer::Instance() {
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition) {
+ PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::RegisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition) {
+ PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Insert(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition) {
+ PartitionRoot<internal::ThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_safe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::UnregisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition) {
+ PartitionRoot<internal::NotThreadSafe>* partition) {
AutoLock lock(lock_);
Remove(&thread_unsafe_partitions_, partition);
}
void PartitionAllocMemoryReclaimer::Start(
scoped_refptr<SequencedTaskRunner> task_runner) {
- DCHECK(!timer_);
- DCHECK(task_runner);
+ PA_DCHECK(!timer_);
+ PA_DCHECK(task_runner);
{
AutoLock lock(lock_);
- DCHECK(!thread_safe_partitions_.empty());
+ PA_DCHECK(!thread_safe_partitions_.empty());
}
// This does not need to run on the main thread, however there are a few
@@ -98,58 +96,27 @@ void PartitionAllocMemoryReclaimer::Start(
timer_->Start(
FROM_HERE, kInterval,
BindRepeating(&PartitionAllocMemoryReclaimer::Reclaim, Unretained(this)));
-
- task_runner->PostDelayedTask(
- FROM_HERE,
- BindOnce(&PartitionAllocMemoryReclaimer::RecordStatistics,
- Unretained(this)),
- kStatsRecordingTimeDelta);
}
PartitionAllocMemoryReclaimer::PartitionAllocMemoryReclaimer() = default;
PartitionAllocMemoryReclaimer::~PartitionAllocMemoryReclaimer() = default;
void PartitionAllocMemoryReclaimer::Reclaim() {
+ AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
TRACE_EVENT0("base", "PartitionAllocMemoryReclaimer::Reclaim()");
- // Reclaim will almost always call into the kernel, so tail latency of this
- // task would likely be affected by descheduling.
- //
- // On Linux (and Android) at least, ThreadTicks also includes kernel time, so
- // this is a good measure of the true cost of decommit.
- ElapsedThreadTimer timer;
+
constexpr int kFlags =
PartitionPurgeDecommitEmptyPages | PartitionPurgeDiscardUnusedSystemPages;
- {
- AutoLock lock(lock_); // Has to protect from concurrent (Un)Register calls.
- for (auto* partition : thread_safe_partitions_)
- partition->PurgeMemory(kFlags);
- for (auto* partition : thread_unsafe_partitions_)
- partition->PurgeMemory(kFlags);
- }
-
- has_called_reclaim_ = true;
- if (timer.is_supported())
- total_reclaim_thread_time_ += timer.Elapsed();
-}
-
-void PartitionAllocMemoryReclaimer::RecordStatistics() {
- if (!ElapsedThreadTimer().is_supported())
- return;
- if (!has_called_reclaim_)
- return;
-
- UmaHistogramTimes("Memory.PartitionAlloc.MainThreadTime.5min",
- total_reclaim_thread_time_);
- has_called_reclaim_ = false;
- total_reclaim_thread_time_ = TimeDelta();
+ for (auto* partition : thread_safe_partitions_)
+ partition->PurgeMemory(kFlags);
+ for (auto* partition : thread_unsafe_partitions_)
+ partition->PurgeMemory(kFlags);
}
void PartitionAllocMemoryReclaimer::ResetForTesting() {
AutoLock lock(lock_);
- has_called_reclaim_ = false;
- total_reclaim_thread_time_ = TimeDelta();
timer_ = nullptr;
thread_safe_partitions_.clear();
thread_unsafe_partitions_.clear();
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer.h b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
index 4e51332dca6..d7594aa3a83 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer.h
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer.h
@@ -15,8 +15,6 @@
#include "base/no_destructor.h"
#include "base/single_thread_task_runner.h"
#include "base/thread_annotations.h"
-#include "base/time/time.h"
-#include "base/timer/elapsed_timer.h"
#include "base/timer/timer.h"
namespace base {
@@ -36,42 +34,31 @@ class BASE_EXPORT PartitionAllocMemoryReclaimer {
// Internal. Do not use.
// Registers a partition to be tracked by the reclaimer.
- void RegisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition);
- void RegisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition);
+ void RegisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
+ void RegisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Internal. Do not use.
// Unregisters a partition to be tracked by the reclaimer.
- void UnregisterPartition(
- internal::PartitionRootBase<internal::ThreadSafe>* partition);
- void UnregisterPartition(
- internal::PartitionRootBase<internal::NotThreadSafe>* partition);
+ void UnregisterPartition(PartitionRoot<internal::ThreadSafe>* partition);
+ void UnregisterPartition(PartitionRoot<internal::NotThreadSafe>* partition);
// Starts the periodic reclaim. Should be called once.
void Start(scoped_refptr<SequencedTaskRunner> task_runner);
// Triggers an explicit reclaim now.
void Reclaim();
- static constexpr TimeDelta kStatsRecordingTimeDelta =
- TimeDelta::FromMinutes(5);
-
private:
PartitionAllocMemoryReclaimer();
~PartitionAllocMemoryReclaimer();
void ReclaimAndReschedule();
- void RecordStatistics();
void ResetForTesting();
- // Total time spent in |Reclaim()|.
- bool has_called_reclaim_ = false;
- TimeDelta total_reclaim_thread_time_;
// Schedules periodic |Reclaim()|.
std::unique_ptr<RepeatingTimer> timer_;
Lock lock_;
- std::set<internal::PartitionRootBase<internal::ThreadSafe>*>
- thread_safe_partitions_ GUARDED_BY(lock_);
- std::set<internal::PartitionRootBase<internal::NotThreadSafe>*>
- thread_unsafe_partitions_ GUARDED_BY(lock_);
+ std::set<PartitionRoot<internal::ThreadSafe>*> thread_safe_partitions_
+ GUARDED_BY(lock_);
+ std::set<PartitionRoot<internal::NotThreadSafe>*> thread_unsafe_partitions_
+ GUARDED_BY(lock_);
friend class NoDestructor<PartitionAllocMemoryReclaimer>;
friend class PartitionAllocMemoryReclaimerTest;
diff --git a/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
index 72c72011b69..c41a39b3911 100644
--- a/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/memory_reclaimer_unittest.cc
@@ -8,7 +8,6 @@
#include <utility>
#include "base/allocator/partition_allocator/partition_alloc.h"
-#include "base/test/metrics/histogram_tester.h"
#include "base/test/task_environment.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -19,6 +18,14 @@
namespace base {
+namespace {
+
+void HandleOOM(size_t unused_size) {
+ LOG(FATAL) << "Out of memory";
+}
+
+} // namespace
+
class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
public:
PartitionAllocMemoryReclaimerTest()
@@ -28,8 +35,9 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
protected:
void SetUp() override {
+ PartitionAllocGlobalInit(HandleOOM);
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
- allocator_ = std::make_unique<PartitionAllocatorGeneric>();
+ allocator_ = std::make_unique<PartitionAllocator>();
allocator_->init();
}
@@ -37,6 +45,7 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
allocator_ = nullptr;
PartitionAllocMemoryReclaimer::Instance()->ResetForTesting();
task_environment_.FastForwardUntilNoTasksRemain();
+ PartitionAllocGlobalUninitForTesting();
}
void StartReclaimer() {
@@ -49,32 +58,19 @@ class PartitionAllocMemoryReclaimerTest : public ::testing::Test {
allocator_->root()->Free(data);
}
- size_t GetExpectedTasksCount() const {
- // Includes the stats recording task.
- if (ElapsedThreadTimer().is_supported())
- return 2;
- return 1;
- }
-
test::TaskEnvironment task_environment_;
- std::unique_ptr<PartitionAllocatorGeneric> allocator_;
+ std::unique_ptr<PartitionAllocator> allocator_;
};
TEST_F(PartitionAllocMemoryReclaimerTest, Simple) {
StartReclaimer();
- EXPECT_EQ(GetExpectedTasksCount(),
- task_environment_.GetPendingMainThreadTaskCount());
+ EXPECT_EQ(1u, task_environment_.GetPendingMainThreadTaskCount());
EXPECT_TRUE(task_environment_.NextTaskIsDelayed());
}
-TEST_F(PartitionAllocMemoryReclaimerTest, IsEnabledByDefault) {
- StartReclaimer();
- EXPECT_EQ(2u, task_environment_.GetPendingMainThreadTaskCount());
-}
-
TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
- PartitionRootGeneric* root = allocator_->root();
+ PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = root->total_size_of_committed_pages;
AllocateAndFree();
@@ -91,7 +87,7 @@ TEST_F(PartitionAllocMemoryReclaimerTest, FreesMemory) {
}
TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
- PartitionRootGeneric* root = allocator_->root();
+ PartitionRoot<internal::ThreadSafe>* root = allocator_->root();
size_t committed_initially = root->total_size_of_committed_pages;
{
@@ -107,23 +103,5 @@ TEST_F(PartitionAllocMemoryReclaimerTest, Reclaim) {
}
}
-TEST_F(PartitionAllocMemoryReclaimerTest, StatsRecording) {
- // No stats reported if the timer is not.
- if (!ElapsedThreadTimer().is_supported())
- return;
-
- HistogramTester histogram_tester;
- StartReclaimer();
- EXPECT_EQ(GetExpectedTasksCount(),
- task_environment_.GetPendingMainThreadTaskCount());
-
- task_environment_.FastForwardBy(
- PartitionAllocMemoryReclaimer::kStatsRecordingTimeDelta);
- // Hard to make sure that the total time is >1ms, so cannot assert that the
- // value is not 0.
- histogram_tester.ExpectTotalCount("Memory.PartitionAlloc.MainThreadTime.5min",
- 1);
-}
-
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/oom.h b/chromium/base/allocator/partition_allocator/oom.h
index c3a2d1b03cf..916ef125c87 100644
--- a/chromium/base/allocator/partition_allocator/oom.h
+++ b/chromium/base/allocator/partition_allocator/oom.h
@@ -6,7 +6,6 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_OOM_H_
#include "base/allocator/partition_allocator/oom_callback.h"
-#include "base/logging.h"
#include "base/process/memory.h"
#include "build/build_config.h"
diff --git a/chromium/base/allocator/partition_allocator/oom_callback.cc b/chromium/base/allocator/partition_allocator/oom_callback.cc
index c734458acbb..b6efc31da02 100644
--- a/chromium/base/allocator/partition_allocator/oom_callback.cc
+++ b/chromium/base/allocator/partition_allocator/oom_callback.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "base/allocator/partition_allocator/oom_callback.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/check.h"
@@ -13,7 +14,7 @@ PartitionAllocOomCallback g_oom_callback;
} // namespace
void SetPartitionAllocOomCallback(PartitionAllocOomCallback callback) {
- DCHECK(!g_oom_callback);
+ PA_DCHECK(!g_oom_callback);
g_oom_callback = callback;
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator.cc b/chromium/base/allocator/partition_allocator/page_allocator.cc
index b7785505efc..25ab8d72875 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator.cc
@@ -10,11 +10,12 @@
#include "base/allocator/partition_allocator/address_space_randomization.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/allocator/partition_allocator/spin_lock.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/bits.h"
#include "base/check_op.h"
#include "base/no_destructor.h"
#include "base/numerics/checked_math.h"
+#include "base/synchronization/lock.h"
#include "build/build_config.h"
#if defined(OS_WIN)
@@ -36,14 +37,14 @@ namespace base {
namespace {
// We may reserve/release address space on different threads.
-subtle::SpinLock& GetReserveLock() {
- static NoDestructor<subtle::SpinLock> s_reserveLock;
- return *s_reserveLock;
+Lock& GetReserveLock() {
+ static NoDestructor<Lock> lock;
+ return *lock;
}
// We only support a single block of reserved address space.
-void* s_reservation_address = nullptr;
-size_t s_reservation_size = 0;
+void* s_reservation_address GUARDED_BY(GetReserveLock()) = nullptr;
+size_t s_reservation_size GUARDED_BY(GetReserveLock()) = 0;
void* AllocPagesIncludingReserved(void* address,
size_t length,
@@ -78,9 +79,9 @@ void* TrimMapping(void* base,
pre_slack = alignment - pre_slack;
}
size_t post_slack = base_length - pre_slack - trim_length;
- DCHECK(base_length >= trim_length || pre_slack || post_slack);
- DCHECK(pre_slack < base_length);
- DCHECK(post_slack < base_length);
+ PA_DCHECK(base_length >= trim_length || pre_slack || post_slack);
+ PA_DCHECK(pre_slack < base_length);
+ PA_DCHECK(post_slack < base_length);
return TrimMappingInternal(base, base_length, trim_length, accessibility,
commit, pre_slack, post_slack);
}
@@ -92,10 +93,10 @@ void* SystemAllocPages(void* hint,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(commit || accessibility == PageInaccessible);
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(hint) &
+ kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(commit || accessibility == PageInaccessible);
return SystemAllocPagesInternal(hint, length, accessibility, page_tag,
commit);
}
@@ -106,16 +107,16 @@ void* AllocPages(void* address,
PageAccessibilityConfiguration accessibility,
PageTag page_tag,
bool commit) {
- DCHECK(length >= kPageAllocationGranularity);
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
- DCHECK(align >= kPageAllocationGranularity);
+ PA_DCHECK(length >= kPageAllocationGranularity);
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(align >= kPageAllocationGranularity);
// Alignment must be power of 2 for masking math to work.
- DCHECK(base::bits::IsPowerOfTwo(align));
- DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(base::bits::IsPowerOfTwo(align));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
uintptr_t align_offset_mask = align - 1;
uintptr_t align_base_mask = ~align_offset_mask;
- DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask));
// If the client passed null as the address, choose a good one.
if (address == nullptr) {
@@ -165,7 +166,7 @@ void* AllocPages(void* address,
// Make a larger allocation so we can force alignment.
size_t try_length = length + (align - kPageAllocationGranularity);
- CHECK(try_length >= length);
+ PA_CHECK(try_length >= length);
void* ret;
do {
@@ -183,54 +184,54 @@ void* AllocPages(void* address,
}
void FreePages(void* address, size_t length) {
- DCHECK(!(reinterpret_cast<uintptr_t>(address) &
- kPageAllocationGranularityOffsetMask));
- DCHECK(!(length & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(address) &
+ kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(length & kPageAllocationGranularityOffsetMask));
FreePagesInternal(address, length);
}
bool TrySetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
return TrySetSystemPagesAccessInternal(address, length, accessibility);
}
void SetSystemPagesAccess(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
SetSystemPagesAccessInternal(address, length, accessibility);
}
void DecommitSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
DecommitSystemPagesInternal(address, length);
}
bool RecommitSystemPages(void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
- DCHECK_NE(PageInaccessible, accessibility);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
+ PA_DCHECK(accessibility != PageInaccessible);
return RecommitSystemPagesInternal(address, length, accessibility);
}
void DiscardSystemPages(void* address, size_t length) {
- DCHECK_EQ(0UL, length & kSystemPageOffsetMask);
+ PA_DCHECK(!(length & kSystemPageOffsetMask));
DiscardSystemPagesInternal(address, length);
}
bool ReserveAddressSpace(size_t size) {
// To avoid deadlock, call only SystemAllocPages.
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
if (s_reservation_address == nullptr) {
void* mem = SystemAllocPages(nullptr, size, PageInaccessible,
PageTag::kChromium, false);
if (mem != nullptr) {
// We guarantee this alignment when reserving address space.
- DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
- kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(mem) &
+ kPageAllocationGranularityOffsetMask));
s_reservation_address = mem;
s_reservation_size = size;
return true;
@@ -241,7 +242,7 @@ bool ReserveAddressSpace(size_t size) {
bool ReleaseReservation() {
// To avoid deadlock, call only FreePages.
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
if (!s_reservation_address)
return false;
@@ -252,7 +253,7 @@ bool ReleaseReservation() {
}
bool HasReservationForTesting() {
- subtle::SpinLock::Guard guard(GetReserveLock());
+ AutoLock guard(GetReserveLock());
return s_reservation_address != nullptr;
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
index 7e1bff18525..e2b99f18e70 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_fuchsia.h
@@ -16,8 +16,9 @@
#include <lib/zx/vmo.h>
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/fuchsia/fuchsia_logging.h"
-#include "base/logging.h"
+#include "base/notreached.h"
namespace base {
@@ -35,7 +36,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8:
return "cr_v8";
default:
- DCHECK(false);
+ PA_DCHECK(false);
return "";
}
}
@@ -126,7 +127,7 @@ void* TrimMappingInternal(void* base,
bool commit,
size_t pre_slack,
size_t post_slack) {
- DCHECK_EQ(base_length, trim_length + pre_slack + post_slack);
+ PA_DCHECK(base_length == trim_length + pre_slack + post_slack);
uint64_t base_address = reinterpret_cast<uint64_t>(base);
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
index 27fe5a9a1df..ebf2bcafcae 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_posix.h
@@ -8,7 +8,9 @@
#include <errno.h>
#include <sys/mman.h>
-#include "base/logging.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -56,7 +58,7 @@ const char* PageTagToName(PageTag tag) {
case PageTag::kV8:
return "v8";
default:
- DCHECK(false);
+ PA_DCHECK(false);
return "";
}
}
@@ -112,8 +114,8 @@ void* SystemAllocPagesInternal(void* hint,
#if defined(OS_MACOSX)
// Use a custom tag to make it easier to distinguish Partition Alloc regions
// in vmmap(1). Tags between 240-255 are supported.
- DCHECK_LE(PageTag::kFirst, page_tag);
- DCHECK_GE(PageTag::kLast, page_tag);
+ PA_DCHECK(PageTag::kFirst <= page_tag);
+ PA_DCHECK(PageTag::kLast >= page_tag);
int fd = VM_MAKE_TAG(static_cast<int>(page_tag));
#else
int fd = -1;
@@ -165,12 +167,12 @@ void* TrimMappingInternal(void* base,
// the aligned range.
if (pre_slack) {
int res = munmap(base, pre_slack);
- CHECK(!res);
+ PCHECK(!res);
ret = reinterpret_cast<char*>(base) + pre_slack;
}
if (post_slack) {
int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack);
- CHECK(!res);
+ PCHECK(!res);
}
return ret;
}
@@ -186,11 +188,11 @@ void SetSystemPagesAccessInternal(
void* address,
size_t length,
PageAccessibilityConfiguration accessibility) {
- CHECK_EQ(0, mprotect(address, length, GetAccessFlags(accessibility)));
+ PCHECK(!mprotect(address, length, GetAccessFlags(accessibility)));
}
void FreePagesInternal(void* address, size_t length) {
- CHECK(!munmap(address, length));
+ PCHECK(!munmap(address, length));
}
void DecommitSystemPagesInternal(void* address, size_t length) {
@@ -227,7 +229,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
ret = madvise(address, length, MADV_DONTNEED);
}
- CHECK(0 == ret);
+ PCHECK(0 == ret);
#else
// We have experimented with other flags, but with suboptimal results.
//
@@ -235,7 +237,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// performance benefits unclear.
//
// Therefore, we just do the simple thing: MADV_DONTNEED.
- CHECK(!madvise(address, length, MADV_DONTNEED));
+ PCHECK(!madvise(address, length, MADV_DONTNEED));
#endif
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
index 60a3472f976..9f0fc8cedfa 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
+++ b/chromium/base/allocator/partition_allocator/page_allocator_internals_win.h
@@ -7,7 +7,8 @@
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator_internal.h"
-#include "base/logging.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/notreached.h"
namespace base {
@@ -84,7 +85,7 @@ void SetSystemPagesAccessInternal(
if (!VirtualFree(address, length, MEM_DECOMMIT)) {
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
- CHECK_EQ(static_cast<uint32_t>(ERROR_SUCCESS), GetLastError());
+ PA_CHECK(static_cast<uint32_t>(ERROR_SUCCESS) == GetLastError());
}
} else {
if (!VirtualAlloc(address, length, MEM_COMMIT,
@@ -94,13 +95,13 @@ void SetSystemPagesAccessInternal(
OOM_CRASH(length);
// We check `GetLastError` for `ERROR_SUCCESS` here so that in a crash
// report we get the error number.
- CHECK_EQ(ERROR_SUCCESS, error);
+ PA_CHECK(ERROR_SUCCESS == error);
}
}
}
void FreePagesInternal(void* address, size_t length) {
- CHECK(VirtualFree(address, 0, MEM_RELEASE));
+ PA_CHECK(VirtualFree(address, 0, MEM_RELEASE));
}
void DecommitSystemPagesInternal(void* address, size_t length) {
@@ -135,7 +136,7 @@ void DiscardSystemPagesInternal(void* address, size_t length) {
// failure.
if (ret) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
- CHECK(ptr);
+ PA_CHECK(ptr);
}
}
diff --git a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
index 7612ad26977..2067a8d6222 100644
--- a/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/page_allocator_unittest.cc
@@ -251,6 +251,29 @@ TEST(PageAllocatorTest, PageTagging) {
}
#endif // defined(OS_ANDROID)
+#if !defined(OS_MACOSX)
+
+TEST(PageAllocatorTest, DecommitErasesMemory) {
+ size_t size = kPageAllocationGranularity;
+ void* buffer = AllocPages(nullptr, size, kPageAllocationGranularity,
+ PageReadWrite, PageTag::kChromium, true);
+ ASSERT_TRUE(buffer);
+
+ memset(buffer, 42, size);
+
+ DecommitSystemPages(buffer, size);
+ EXPECT_TRUE(RecommitSystemPages(buffer, size, PageReadWrite));
+
+ uint8_t* recommitted_buffer = reinterpret_cast<uint8_t*>(buffer);
+ uint32_t sum = 0;
+ for (size_t i = 0; i < size; i++) {
+ sum += recommitted_buffer[i];
+ }
+ EXPECT_EQ(0u, sum) << "Data was not erased";
+}
+
+#endif // defined(OS_MACOSX)
+
} // namespace base
#endif // !defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
diff --git a/chromium/base/allocator/partition_allocator/partition_address_space.cc b/chromium/base/allocator/partition_allocator/partition_address_space.cc
new file mode 100644
index 00000000000..d72db634785
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_address_space.cc
@@ -0,0 +1,69 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_address_space.h"
+
+#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/bits.h"
+
+namespace base {
+
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+// Before PartitionAddressSpace::Init(), no allocation are allocated from a
+// reserved address space. So initially make reserved_base_address_ to
+// be kReservedAddressSpaceOffsetMask. So PartitionAddressSpace::Contains()
+// always returns false.
+// Do something similar for normal_bucket_pool_base_address_.
+uintptr_t PartitionAddressSpace::reserved_base_address_ =
+ kReservedAddressSpaceOffsetMask;
+uintptr_t PartitionAddressSpace::normal_bucket_pool_base_address_ =
+ kNormalBucketPoolOffsetMask;
+
+pool_handle PartitionAddressSpace::direct_map_pool_ = 0;
+pool_handle PartitionAddressSpace::normal_bucket_pool_ = 0;
+
+void PartitionAddressSpace::Init() {
+ PA_DCHECK(kReservedAddressSpaceOffsetMask == reserved_base_address_);
+ reserved_base_address_ = reinterpret_cast<uintptr_t>(AllocPages(
+ nullptr, kDesiredAddressSpaceSize, kReservedAddressSpaceAlignment,
+ base::PageInaccessible, PageTag::kPartitionAlloc, false));
+ PA_CHECK(reserved_base_address_);
+ PA_DCHECK(!(reserved_base_address_ & kReservedAddressSpaceOffsetMask));
+
+ uintptr_t current = reserved_base_address_;
+
+ direct_map_pool_ = internal::AddressPoolManager::GetInstance()->Add(
+ current, kDirectMapPoolSize);
+ PA_DCHECK(direct_map_pool_);
+ current += kDirectMapPoolSize;
+
+ normal_bucket_pool_base_address_ = current;
+ normal_bucket_pool_ = internal::AddressPoolManager::GetInstance()->Add(
+ current, kNormalBucketPoolSize);
+ PA_DCHECK(normal_bucket_pool_);
+ current += kNormalBucketPoolSize;
+ PA_DCHECK(reserved_base_address_ + kDesiredAddressSpaceSize == current);
+}
+
+void PartitionAddressSpace::UninitForTesting() {
+ PA_DCHECK(kReservedAddressSpaceOffsetMask != reserved_base_address_);
+ FreePages(reinterpret_cast<void*>(reserved_base_address_),
+ kReservedAddressSpaceAlignment);
+ reserved_base_address_ = kReservedAddressSpaceOffsetMask;
+ direct_map_pool_ = 0;
+ normal_bucket_pool_ = 0;
+ internal::AddressPoolManager::GetInstance()->ResetForTesting();
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_address_space.h b/chromium/base/allocator/partition_allocator/partition_address_space.h
new file mode 100644
index 00000000000..7297a2110db
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_address_space.h
@@ -0,0 +1,151 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
+
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
+#include "base/base_export.h"
+#include "base/bits.h"
+#include "base/feature_list.h"
+#include "base/notreached.h"
+#include "build/build_config.h"
+
+namespace base {
+
+namespace internal {
+
+// The feature is not applicable to 32-bit address space.
+// ARCH_CPU_64_BITS implies 64-bit instruction set, but not necessarily 64-bit
+// address space. The only known case where address space is 32-bit is NaCl, so
+// eliminate it explicitly. static_assert below ensures that other won't slip
+// through.
+// TODO(tasak): define ADDRESS_SPACE_64_BITS as "defined(ARCH_CPU_64_BITS) &&
+// !defined(OS_NACL)" and use it.
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+static_assert(sizeof(size_t) >= 8, "Nee more than 32-bit address space");
+
+// Reserves address space for PartitionAllocator.
+class BASE_EXPORT PartitionAddressSpace {
+ public:
+ static ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ return direct_map_pool_;
+ }
+ static ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ return normal_bucket_pool_;
+ }
+
+ static void Init();
+ static void UninitForTesting();
+
+ static ALWAYS_INLINE bool Contains(const void* address) {
+ return (reinterpret_cast<uintptr_t>(address) &
+ kReservedAddressSpaceBaseMask) == reserved_base_address_;
+ }
+
+ static ALWAYS_INLINE bool IsInNormalBucketPool(const void* address) {
+ return (reinterpret_cast<uintptr_t>(address) & kNormalBucketPoolBaseMask) ==
+ normal_bucket_pool_base_address_;
+ }
+
+ // PartitionAddressSpace is static_only class.
+ PartitionAddressSpace() = delete;
+ PartitionAddressSpace(const PartitionAddressSpace&) = delete;
+ void* operator new(size_t) = delete;
+ void* operator new(size_t, void*) = delete;
+
+ private:
+ // Partition Alloc Address Space
+ // Reserves 32GiB address space for 1 direct map space(16GiB) and 1 normal
+ // bucket space(16GiB).
+ // TODO(bartekn): Look into devices with 39-bit address space that have 256GiB
+ // user-mode space. Libraries loaded at random addresses may stand in the way
+ // of reserving a contiguous 64GiB region. (even though we're requesting only
+ // 32GiB, AllocPages may under the covers reserve 64GiB to satisfy the
+ // alignment requirements)
+ //
+ // +----------------+ reserved_base_address_(32GiB aligned)
+ // | direct map |
+ // | space |
+ // +----------------+ reserved_base_address_ + 16GiB
+ // | normal buckets |
+ // | space |
+ // +----------------+ reserved_base_address_ + 32GiB
+
+ static constexpr size_t kGigaBytes = 1024 * 1024 * 1024;
+ static constexpr size_t kDirectMapPoolSize = 16 * kGigaBytes;
+ static constexpr size_t kNormalBucketPoolSize = 16 * kGigaBytes;
+ static constexpr uintptr_t kNormalBucketPoolOffsetMask =
+ static_cast<uintptr_t>(kNormalBucketPoolSize) - 1;
+ static constexpr uintptr_t kNormalBucketPoolBaseMask =
+ ~kNormalBucketPoolOffsetMask;
+
+ // Reserves 32GiB aligned address space.
+ // We align on 32GiB as well, and since it's a power of two we can check a
+ // pointer with a single bitmask operation.
+ static constexpr size_t kDesiredAddressSpaceSize =
+ kDirectMapPoolSize + kNormalBucketPoolSize;
+ static constexpr size_t kReservedAddressSpaceAlignment =
+ kDesiredAddressSpaceSize;
+ static constexpr uintptr_t kReservedAddressSpaceOffsetMask =
+ static_cast<uintptr_t>(kReservedAddressSpaceAlignment) - 1;
+ static constexpr uintptr_t kReservedAddressSpaceBaseMask =
+ ~kReservedAddressSpaceOffsetMask;
+
+ static_assert(
+ bits::IsPowerOfTwo(PartitionAddressSpace::kReservedAddressSpaceAlignment),
+ "kReservedAddressSpaceALignment should be a power of two.");
+ static_assert(PartitionAddressSpace::kReservedAddressSpaceAlignment >=
+ PartitionAddressSpace::kDesiredAddressSpaceSize,
+ "kReservedAddressSpaceAlignment should be larger or equal to "
+ "kDesiredAddressSpaceSize.");
+ static_assert(
+ PartitionAddressSpace::kReservedAddressSpaceAlignment / 2 <
+ PartitionAddressSpace::kDesiredAddressSpaceSize,
+ "kReservedAddressSpaceAlignment should be the smallest power of "
+ "two greater or equal to kDesiredAddressSpaceSize. So a half of "
+ "the alignment should be smaller than the desired size.");
+
+ // See the comment describing the address layout above.
+ static uintptr_t reserved_base_address_;
+
+ static uintptr_t normal_bucket_pool_base_address_;
+
+ static internal::pool_handle direct_map_pool_;
+ static internal::pool_handle normal_bucket_pool_;
+};
+
+ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ PA_DCHECK(IsPartitionAllocGigaCageEnabled());
+ return PartitionAddressSpace::GetDirectMapPool();
+}
+
+ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ PA_DCHECK(IsPartitionAllocGigaCageEnabled());
+ return PartitionAddressSpace::GetNormalBucketPool();
+}
+
+#else // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+ALWAYS_INLINE internal::pool_handle GetDirectMapPool() {
+ NOTREACHED();
+ return 0;
+}
+
+ALWAYS_INLINE internal::pool_handle GetNormalBucketPool() {
+ NOTREACHED();
+ return 0;
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ADDRESS_SPACE_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.cc b/chromium/base/allocator/partition_allocator/partition_alloc.cc
index a186840ffe3..836daf90f1c 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.cc
@@ -7,8 +7,10 @@
#include <string.h>
#include <memory>
-#include <type_traits>
+#include "base/allocator/partition_allocator/oom.h"
+#include "base/allocator/partition_allocator/page_allocator_internal.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
@@ -19,20 +21,30 @@
namespace base {
-namespace {
-
template <bool thread_safe>
-bool InitializeOnce() {
- // We mark the sentinel bucket/page as free to make sure it is skipped by
- // our logic to find a new active page.
- internal::PartitionBucket<thread_safe>::get_sentinel_bucket()
- ->active_pages_head =
- internal::PartitionPage<thread_safe>::get_sentinel_page();
-
- return true;
+NOINLINE void PartitionRoot<thread_safe>::OutOfMemory(size_t size) {
+#if !defined(ARCH_CPU_64_BITS)
+ // Check whether this OOM is due to a lot of super pages that are allocated
+ // but not committed, probably due to http://crbug.com/421387.
+ if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
+ total_size_of_committed_pages >
+ kReasonableSizeOfUnusedPages) {
+ internal::PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
+ }
+#endif
+ if (internal::g_oom_handling_function)
+ (*internal::g_oom_handling_function)(size);
+ OOM_CRASH(size);
}
-} // namespace
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::DecommitEmptyPages() {
+ for (Page*& page : global_empty_page_ring) {
+ if (page)
+ page->DecommitIfPossible(this);
+ page = nullptr;
+ }
+}
// Two partition pages are used as guard / metadata page so make sure the super
// page size is bigger.
@@ -50,10 +62,6 @@ static_assert(sizeof(internal::PartitionPage<internal::ThreadSafe>) <=
static_assert(sizeof(internal::PartitionBucket<internal::ThreadSafe>) <=
kPageMetadataSize,
"PartitionBucket should not be too big");
-static_assert(
- sizeof(internal::PartitionSuperPageExtentEntry<internal::ThreadSafe>) <=
- kPageMetadataSize,
- "PartitionSuperPageExtentEntry should not be too big");
static_assert(kPageMetadataSize * kNumPartitionPagesPerSuperPage <=
kSystemPageSize,
"page metadata fits in hole");
@@ -62,17 +70,12 @@ static_assert(kGenericMaxDirectMapped <=
(1UL << 31) + kPageAllocationGranularity,
"maximum direct mapped allocation");
// Check that some of our zanier calculations worked out as expected.
-static_assert(kGenericSmallestBucket == 8, "generic smallest bucket");
+static_assert(kGenericSmallestBucket == alignof(std::max_align_t),
+ "generic smallest bucket");
static_assert(kGenericMaxBucketed == 983040, "generic max bucketed");
static_assert(kMaxSystemPagesPerSlotSpan < (1 << 8),
"System pages per slot span must be less than 128.");
-PartitionRoot::PartitionRoot() = default;
-PartitionRoot::~PartitionRoot() = default;
-PartitionRootGeneric::PartitionRootGeneric() = default;
-PartitionRootGeneric::~PartitionRootGeneric() = default;
-PartitionAllocatorGeneric::PartitionAllocatorGeneric() = default;
-
Lock& GetHooksLock() {
static NoDestructor<Lock> lock;
return *lock;
@@ -97,8 +100,8 @@ void PartitionAllocHooks::SetObserverHooks(AllocationObserverHook* alloc_hook,
// Chained hooks are not supported. Registering a non-null hook when a
// non-null hook is already registered indicates somebody is trying to
// overwrite a hook.
- CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
- (!alloc_hook && !free_hook))
+ PA_CHECK((!allocation_observer_hook_ && !free_observer_hook_) ||
+ (!alloc_hook && !free_hook))
<< "Overwriting already set observer hooks";
allocation_observer_hook_ = alloc_hook;
free_observer_hook_ = free_hook;
@@ -111,9 +114,9 @@ void PartitionAllocHooks::SetOverrideHooks(AllocationOverrideHook* alloc_hook,
ReallocOverrideHook realloc_hook) {
AutoLock guard(GetHooksLock());
- CHECK((!allocation_override_hook_ && !free_override_hook_ &&
- !realloc_override_hook_) ||
- (!alloc_hook && !free_hook && !realloc_hook))
+ PA_CHECK((!allocation_override_hook_ && !free_override_hook_ &&
+ !realloc_override_hook_) ||
+ (!alloc_hook && !free_hook && !realloc_hook))
<< "Overwriting already set override hooks";
allocation_override_hook_ = alloc_hook;
free_override_hook_ = free_hook;
@@ -126,10 +129,8 @@ void PartitionAllocHooks::AllocationObserverHookIfEnabled(
void* address,
size_t size,
const char* type_name) {
- if (AllocationObserverHook* hook =
- allocation_observer_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = allocation_observer_hook_.load(std::memory_order_relaxed))
hook(address, size, type_name);
- }
}
bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
@@ -137,25 +138,19 @@ bool PartitionAllocHooks::AllocationOverrideHookIfEnabled(
int flags,
size_t size,
const char* type_name) {
- if (AllocationOverrideHook* hook =
- allocation_override_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = allocation_override_hook_.load(std::memory_order_relaxed))
return hook(out, flags, size, type_name);
- }
return false;
}
void PartitionAllocHooks::FreeObserverHookIfEnabled(void* address) {
- if (FreeObserverHook* hook =
- free_observer_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = free_observer_hook_.load(std::memory_order_relaxed))
hook(address);
- }
}
bool PartitionAllocHooks::FreeOverrideHookIfEnabled(void* address) {
- if (FreeOverrideHook* hook =
- free_override_hook_.load(std::memory_order_relaxed)) {
+ if (auto* hook = free_override_hook_.load(std::memory_order_relaxed))
return hook(address);
- }
return false;
}
@@ -173,6 +168,7 @@ void PartitionAllocHooks::ReallocObserverHookIfEnabled(void* old_address,
allocation_hook(new_address, size, type_name);
}
}
+
bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
void* address) {
if (ReallocOverrideHook* hook =
@@ -182,39 +178,41 @@ bool PartitionAllocHooks::ReallocOverrideHookIfEnabled(size_t* out,
return false;
}
-template <bool thread_safe>
-static void PartitionAllocBaseInit(
- internal::PartitionRootBase<thread_safe>* root) {
- DCHECK(!root->initialized);
-
- static bool intialized = InitializeOnce<thread_safe>();
- static_cast<void>(intialized);
-
- // This is a "magic" value so we can test if a root pointer is valid.
- root->inverted_self = ~reinterpret_cast<uintptr_t>(root);
- root->initialized = true;
-}
-
void PartitionAllocGlobalInit(OomFunction on_out_of_memory) {
- DCHECK(on_out_of_memory);
+ PA_DCHECK(on_out_of_memory);
internal::g_oom_handling_function = on_out_of_memory;
-}
-void PartitionRoot::Init(size_t bucket_count, size_t maximum_allocation) {
- PartitionAllocBaseInit(this);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ // Reserve address space for partition alloc.
+ if (IsPartitionAllocGigaCageEnabled())
+ internal::PartitionAddressSpace::Init();
+#endif
+}
- num_buckets = bucket_count;
- max_allocation = maximum_allocation;
- for (size_t i = 0; i < num_buckets; ++i) {
- Bucket& bucket = buckets()[i];
- bucket.Init(i == 0 ? kAllocationGranularity : (i << kBucketShift));
- }
+void PartitionAllocGlobalUninitForTesting() {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ if (IsPartitionAllocGigaCageEnabled())
+ internal::PartitionAddressSpace::UninitForTesting();
+#endif
+ internal::g_oom_handling_function = nullptr;
}
-void PartitionRootGeneric::Init() {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::InitSlowPath() {
ScopedGuard guard{lock_};
- PartitionAllocBaseInit(this);
+ if (initialized.load(std::memory_order_relaxed))
+ return;
+
+ // We mark the sentinel bucket/page as free to make sure it is skipped by our
+ // logic to find a new active page.
+ //
+ // This may be executed several times, once per PartitionRoot. This is not an
+ // issue, as the operation is atomic and idempotent.
+ Bucket::get_sentinel_bucket()->active_pages_head = Page::get_sentinel_page();
+
+ // This is a "magic" value so we can test if a root pointer is valid.
+ inverted_self = ~reinterpret_cast<uintptr_t>(this);
// Precalculate some shift and mask constants used in the hot path.
// Example: malloc(41) == 101001 binary.
@@ -265,8 +263,8 @@ void PartitionRootGeneric::Init() {
}
current_increment <<= 1;
}
- DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
- DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
+ PA_DCHECK(current_size == 1 << kGenericMaxBucketedOrder);
+ PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
// Then set up the fast size -> bucket lookup table.
bucket = &buckets[0];
@@ -288,38 +286,37 @@ void PartitionRootGeneric::Init() {
}
}
}
- DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
- DCHECK(bucket_ptr == &bucket_lookups[0] +
- ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder));
+ PA_DCHECK(bucket == &buckets[0] + kGenericNumBuckets);
+ PA_DCHECK(bucket_ptr == &bucket_lookups[0] + ((kBitsPerSizeT + 1) *
+ kGenericNumBucketsPerOrder));
// And there's one last bucket lookup that will be hit for e.g. malloc(-1),
// which tries to overflow to a non-existent order.
*bucket_ptr = Bucket::get_sentinel_bucket();
+
+ initialized = true;
}
-bool PartitionReallocDirectMappedInPlace(
- PartitionRootGeneric* root,
- internal::PartitionPage<internal::ThreadSafe>* page,
- size_t raw_size) EXCLUSIVE_LOCKS_REQUIRED(root->lock_) {
- DCHECK(page->bucket->is_direct_mapped());
+template <bool thread_safe>
+bool PartitionRoot<thread_safe>::ReallocDirectMappedInPlace(
+ internal::PartitionPage<thread_safe>* page,
+ size_t raw_size) {
+ PA_DCHECK(page->bucket->is_direct_mapped());
raw_size = internal::PartitionCookieSizeAdjustAdd(raw_size);
// Note that the new size might be a bucketed size; this function is called
// whenever we're reallocating a direct mapped allocation.
- size_t new_size = PartitionRootGeneric::Bucket::get_direct_map_size(raw_size);
+ size_t new_size = Bucket::get_direct_map_size(raw_size);
if (new_size < kGenericMinDirectMappedDownsize)
return false;
// bucket->slot_size is the current size of the allocation.
size_t current_size = page->bucket->slot_size;
- char* char_ptr =
- static_cast<char*>(PartitionRootGeneric::Page::ToPointer(page));
+ char* char_ptr = static_cast<char*>(Page::ToPointer(page));
if (new_size == current_size) {
// No need to move any memory around, but update size and cookie below.
} else if (new_size < current_size) {
- size_t map_size =
- internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage(page)
- ->map_size;
+ size_t map_size = DirectMapExtent::FromPage(page)->map_size;
// Don't reallocate in-place if new size is less than 80 % of the full
// map size, to avoid holding on to too much unused address space.
@@ -328,17 +325,14 @@ bool PartitionReallocDirectMappedInPlace(
// Shrink by decommitting unneeded pages and making them inaccessible.
size_t decommit_size = current_size - new_size;
- root->DecommitSystemPages(char_ptr + new_size, decommit_size);
+ DecommitSystemPages(char_ptr + new_size, decommit_size);
SetSystemPagesAccess(char_ptr + new_size, decommit_size, PageInaccessible);
- } else if (new_size <=
- internal::PartitionDirectMapExtent<internal::ThreadSafe>::FromPage(
- page)
- ->map_size) {
+ } else if (new_size <= DirectMapExtent::FromPage(page)->map_size) {
// Grow within the actually allocated memory. Just need to make the
// pages accessible again.
size_t recommit_size = new_size - current_size;
SetSystemPagesAccess(char_ptr + current_size, recommit_size, PageReadWrite);
- root->RecommitSystemPages(char_ptr + current_size, recommit_size);
+ RecommitSystemPages(char_ptr + current_size, recommit_size);
#if DCHECK_IS_ON()
memset(char_ptr + current_size, kUninitializedByte, recommit_size);
@@ -356,27 +350,27 @@ bool PartitionReallocDirectMappedInPlace(
#endif
page->set_raw_size(raw_size);
- DCHECK(page->get_raw_size() == raw_size);
+ PA_DCHECK(page->get_raw_size() == raw_size);
page->bucket->slot_size = new_size;
return true;
}
-void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
- int flags,
- void* ptr,
- size_t new_size,
- const char* type_name) {
+template <bool thread_safe>
+void* PartitionRoot<thread_safe>::ReallocFlags(int flags,
+ void* ptr,
+ size_t new_size,
+ const char* type_name) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(new_size, flags);
void* result = realloc(ptr, new_size);
- CHECK(result || flags & PartitionAllocReturnNull);
+ PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
if (UNLIKELY(!ptr))
- return PartitionAllocGenericFlags(root, flags, new_size, type_name);
+ return AllocFlags(flags, new_size, type_name);
if (UNLIKELY(!new_size)) {
- root->Free(ptr);
+ Free(ptr);
return nullptr;
}
@@ -394,19 +388,19 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
&actual_old_size, ptr);
}
if (LIKELY(!overridden)) {
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- internal::PartitionCookieFreePointerAdjust(ptr));
+ auto* page =
+ Page::FromPointer(internal::PartitionCookieFreePointerAdjust(ptr));
bool success = false;
{
- PartitionRootGeneric::ScopedGuard guard{root->lock_};
+ internal::ScopedGuard<thread_safe> guard{lock_};
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(root->IsValidPage(page));
+ PA_DCHECK(IsValidPage(page));
if (UNLIKELY(page->bucket->is_direct_mapped())) {
// We may be able to perform the realloc in place by changing the
// accessibility of memory pages and, if reducing the size, decommitting
// them.
- success = PartitionReallocDirectMappedInPlace(root, page, new_size);
+ success = ReallocDirectMappedInPlace(page, new_size);
}
}
if (success) {
@@ -417,8 +411,8 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
return ptr;
}
- const size_t actual_new_size = root->ActualSize(new_size);
- actual_old_size = PartitionAllocGetSize(ptr);
+ const size_t actual_new_size = ActualSize(new_size);
+ actual_old_size = PartitionAllocGetSize<thread_safe>(ptr);
// TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the
// new size is a significant percentage smaller. We could do the same if we
@@ -439,7 +433,7 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
}
// This realloc cannot be resized in-place. Sadness.
- void* ret = PartitionAllocGenericFlags(root, flags, new_size, type_name);
+ void* ret = AllocFlags(flags, new_size, type_name);
if (!ret) {
if (flags & PartitionAllocReturnNull)
return nullptr;
@@ -451,22 +445,9 @@ void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
copy_size = new_size;
memcpy(ret, ptr, copy_size);
- root->Free(ptr);
+ Free(ptr);
return ret;
#endif
-} // namespace base
-
-void* PartitionRootGeneric::Realloc(void* ptr,
- size_t new_size,
- const char* type_name) {
- return PartitionReallocGenericFlags(this, 0, ptr, new_size, type_name);
-}
-
-void* PartitionRootGeneric::TryRealloc(void* ptr,
- size_t new_size,
- const char* type_name) {
- return PartitionReallocGenericFlags(this, PartitionAllocReturnNull, ptr,
- new_size, type_name);
}
template <bool thread_safe>
@@ -495,8 +476,8 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
constexpr size_t kMaxSlotCount =
(kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize;
- DCHECK(bucket_num_slots <= kMaxSlotCount);
- DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
+ PA_DCHECK(bucket_num_slots <= kMaxSlotCount);
+ PA_DCHECK(page->num_unprovisioned_slots < bucket_num_slots);
size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots;
char slot_usage[kMaxSlotCount];
#if !defined(OS_WIN)
@@ -512,7 +493,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
for (internal::PartitionFreelistEntry* entry = page->freelist_head; entry;
/**/) {
size_t slot_index = (reinterpret_cast<char*>(entry) - ptr) / slot_size;
- DCHECK(slot_index < num_slots);
+ PA_DCHECK(slot_index < num_slots);
slot_usage[slot_index] = 0;
entry = internal::EncodedPartitionFreelistEntry::Decode(entry->next);
#if !defined(OS_WIN)
@@ -532,7 +513,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
while (!slot_usage[num_slots - 1]) {
truncated_slots++;
num_slots--;
- DCHECK(num_slots);
+ PA_DCHECK(num_slots);
}
// First, do the work of calculating the discardable bytes. Don't actually
// discard anything unless the discard flag was passed in.
@@ -546,13 +527,13 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
// a slot span, so we "own" all the way up the page boundary.
end_ptr = reinterpret_cast<char*>(
RoundUpToSystemPage(reinterpret_cast<size_t>(end_ptr)));
- DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
+ PA_DCHECK(end_ptr <= ptr + bucket->get_bytes_per_span());
if (begin_ptr < end_ptr) {
unprovisioned_bytes = end_ptr - begin_ptr;
discardable_bytes += unprovisioned_bytes;
}
if (unprovisioned_bytes && discard) {
- DCHECK(truncated_slots > 0);
+ PA_DCHECK(truncated_slots > 0);
size_t num_new_entries = 0;
page->num_unprovisioned_slots += static_cast<uint16_t>(truncated_slots);
@@ -582,7 +563,7 @@ static size_t PartitionPurgePage(internal::PartitionPage<thread_safe>* page,
if (back)
back->next = internal::PartitionFreelistEntry::Encode(nullptr);
- DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
+ PA_DCHECK(num_new_entries == num_slots - page->num_allocated_slots);
// Discard the memory.
DiscardSystemPages(begin_ptr, unprovisioned_bytes);
}
@@ -626,23 +607,15 @@ static void PartitionPurgeBucket(
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) {
- DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page !=
+ internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionPurgePage(page, true);
}
}
}
-void PartitionRoot::PurgeMemory(int flags) {
- ScopedGuard guard{lock_};
- if (flags & PartitionPurgeDecommitEmptyPages)
- DecommitEmptyPages();
- // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages
- // here because that flag is only useful for allocations >= system page size.
- // We only have allocations that large inside generic partitions at the
- // moment.
-}
-
-void PartitionRootGeneric::PurgeMemory(int flags) {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::PurgeMemory(int flags) {
ScopedGuard guard{lock_};
if (flags & PartitionPurgeDecommitEmptyPages)
DecommitEmptyPages();
@@ -685,7 +658,7 @@ static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out,
} else if (page->is_full()) {
++stats_out->num_full_pages;
} else {
- DCHECK(page->is_active());
+ PA_DCHECK(page->is_active());
++stats_out->num_active_pages;
}
}
@@ -694,7 +667,7 @@ template <bool thread_safe>
static void PartitionDumpBucketStats(
PartitionBucketMemoryStats* stats_out,
const internal::PartitionBucket<thread_safe>* bucket) {
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(!bucket->is_direct_mapped());
stats_out->is_valid = false;
// If the active page list is empty (==
// internal::PartitionPage::get_sentinel_page()), the bucket might still need
@@ -719,13 +692,13 @@ static void PartitionDumpBucketStats(
for (internal::PartitionPage<thread_safe>* page = bucket->empty_pages_head;
page; page = page->next_page) {
- DCHECK(page->is_empty() || page->is_decommitted());
+ PA_DCHECK(page->is_empty() || page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
for (internal::PartitionPage<thread_safe>* page =
bucket->decommitted_pages_head;
page; page = page->next_page) {
- DCHECK(page->is_decommitted());
+ PA_DCHECK(page->is_decommitted());
PartitionDumpPageStats(stats_out, page);
}
@@ -733,15 +706,17 @@ static void PartitionDumpBucketStats(
internal::PartitionPage<thread_safe>::get_sentinel_page()) {
for (internal::PartitionPage<thread_safe>* page = bucket->active_pages_head;
page; page = page->next_page) {
- DCHECK(page != internal::PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page !=
+ internal::PartitionPage<thread_safe>::get_sentinel_page());
PartitionDumpPageStats(stats_out, page);
}
}
}
-void PartitionRootGeneric::DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
+template <bool thread_safe>
+void PartitionRoot<thread_safe>::DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* dumper) {
ScopedGuard guard{lock_};
PartitionMemoryStats stats = {0};
stats.total_mmapped_bytes =
@@ -767,7 +742,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
const Bucket* bucket = &buckets[i];
// Don't report the pseudo buckets that the generic allocator sets up in
// order to preserve a fast size->bucket map (see
- // PartitionRootGeneric::Init() for details).
+ // PartitionRoot::Init() for details).
if (!bucket->active_pages_head)
bucket_stats[i].is_valid = false;
else
@@ -780,12 +755,11 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
}
}
- for (internal::PartitionDirectMapExtent<internal::ThreadSafe>* extent =
- direct_map_list;
+ for (DirectMapExtent* extent = direct_map_list;
extent && num_direct_mapped_allocations < kMaxReportableDirectMaps;
extent = extent->next_extent, ++num_direct_mapped_allocations) {
- DCHECK(!extent->next_extent ||
- extent->next_extent->prev_extent == extent);
+ PA_DCHECK(!extent->next_extent ||
+ extent->next_extent->prev_extent == extent);
size_t slot_size = extent->bucket->slot_size;
direct_mapped_allocations_total_size += slot_size;
if (is_light_dump)
@@ -796,7 +770,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
if (!is_light_dump) {
// Call |PartitionsDumpBucketStats| after collecting stats because it can
- // try to allocate using |PartitionRootGeneric::Alloc()| and it can't
+ // try to allocate using |PartitionRoot::Alloc()| and it can't
// obtain the lock.
for (size_t i = 0; i < kGenericNumBuckets; ++i) {
if (bucket_stats[i].is_valid)
@@ -823,52 +797,7 @@ void PartitionRootGeneric::DumpStats(const char* partition_name,
dumper->PartitionDumpTotals(partition_name, &stats);
}
-void PartitionRoot::DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper) {
- ScopedGuard guard{lock_};
-
- PartitionMemoryStats stats = {0};
- stats.total_mmapped_bytes = total_size_of_super_pages;
- stats.total_committed_bytes = total_size_of_committed_pages;
- DCHECK(!total_size_of_direct_mapped_pages);
-
- static constexpr size_t kMaxReportableBuckets = 4096 / sizeof(void*);
- std::unique_ptr<PartitionBucketMemoryStats[]> memory_stats;
- if (!is_light_dump) {
- memory_stats =
- std::make_unique<PartitionBucketMemoryStats[]>(kMaxReportableBuckets);
- }
-
- const size_t partition_num_buckets = num_buckets;
- DCHECK(partition_num_buckets <= kMaxReportableBuckets);
-
- for (size_t i = 0; i < partition_num_buckets; ++i) {
- PartitionBucketMemoryStats bucket_stats = {0};
- PartitionDumpBucketStats(&bucket_stats, &buckets()[i]);
- if (bucket_stats.is_valid) {
- stats.total_resident_bytes += bucket_stats.resident_bytes;
- stats.total_active_bytes += bucket_stats.active_bytes;
- stats.total_decommittable_bytes += bucket_stats.decommittable_bytes;
- stats.total_discardable_bytes += bucket_stats.discardable_bytes;
- }
- if (!is_light_dump) {
- if (bucket_stats.is_valid)
- memory_stats[i] = bucket_stats;
- else
- memory_stats[i].is_valid = false;
- }
- }
- if (!is_light_dump) {
- // PartitionsDumpBucketStats is called after collecting stats because it
- // can use PartitionRoot::Alloc() to allocate and this can affect the
- // statistics.
- for (size_t i = 0; i < partition_num_buckets; ++i) {
- if (memory_stats[i].is_valid)
- dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]);
- }
- }
- dumper->PartitionDumpTotals(partition_name, &stats);
-}
+template struct BASE_EXPORT PartitionRoot<internal::ThreadSafe>;
+template struct BASE_EXPORT PartitionRoot<internal::NotThreadSafe>;
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc.h b/chromium/base/allocator/partition_allocator/partition_alloc.h
index ef635e78291..662063ddde7 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc.h
@@ -6,38 +6,29 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H_
// DESCRIPTION
-// PartitionRoot::Alloc() / PartitionRootGeneric::Alloc() and PartitionFree() /
-// PartitionRootGeneric::Free() are approximately analagous to malloc() and
-// free().
+// PartitionRoot::Alloc() and PartitionRoot::Free() are approximately analagous
+// to malloc() and free().
//
-// The main difference is that a PartitionRoot / PartitionRootGeneric object
-// must be supplied to these functions, representing a specific "heap partition"
-// that will be used to satisfy the allocation. Different partitions are
-// guaranteed to exist in separate address spaces, including being separate from
-// the main system heap. If the contained objects are all freed, physical memory
-// is returned to the system but the address space remains reserved.
-// See PartitionAlloc.md for other security properties PartitionAlloc provides.
+// The main difference is that a PartitionRoot object must be supplied to these
+// functions, representing a specific "heap partition" that will be used to
+// satisfy the allocation. Different partitions are guaranteed to exist in
+// separate address spaces, including being separate from the main system
+// heap. If the contained objects are all freed, physical memory is returned to
+// the system but the address space remains reserved. See PartitionAlloc.md for
+// other security properties PartitionAlloc provides.
//
// THE ONLY LEGITIMATE WAY TO OBTAIN A PartitionRoot IS THROUGH THE
-// SizeSpecificPartitionAllocator / PartitionAllocatorGeneric classes. To
-// minimize the instruction count to the fullest extent possible, the
-// PartitionRoot is really just a header adjacent to other data areas provided
-// by the allocator class.
+// PartitionAllocator classes. To minimize the instruction count to the fullest
+// extent possible, the PartitionRoot is really just a header adjacent to other
+// data areas provided by the allocator class.
//
-// The PartitionRoot::Alloc() variant of the API has the following caveats:
-// - Allocations and frees against a single partition must be single threaded.
-// - Allocations must not exceed a max size, chosen at compile-time via a
-// templated parameter to PartitionAllocator.
-// - Allocation sizes must be aligned to the system pointer size.
-// - Allocations are bucketed exactly according to size.
-//
-// And for PartitionRootGeneric::Alloc():
+// The constraints for PartitionRoot::Alloc() are:
// - Multi-threaded use against a single partition is ok; locking is handled.
// - Allocations of any arbitrary size can be handled (subject to a limit of
-// INT_MAX bytes for security reasons).
+// INT_MAX bytes for security reasons).
// - Bucketing is by approximate size, for example an allocation of 4000 bytes
-// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
-// keep worst-case waste to ~10%.
+// might be placed into a 4096-byte bucket. Bucket sizes are chosen to try and
+// keep worst-case waste to ~10%.
//
// The allocators are designed to be extremely fast, thanks to the following
// properties and design:
@@ -65,19 +56,25 @@
#include "base/allocator/partition_allocator/memory_reclaimer.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_address_space.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
+#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/allocator/partition_allocator/spin_lock.h"
#include "base/base_export.h"
#include "base/bits.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/partition_alloc_buildflags.h"
#include "base/stl_util.h"
#include "base/sys_byteorder.h"
#include "build/build_config.h"
+#include "build/buildflag.h"
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
#include <stdlib.h>
@@ -90,86 +87,171 @@
if (flags & PartitionAllocReturnNull) { \
return nullptr; \
} \
- CHECK(false); \
+ PA_CHECK(false); \
}
namespace base {
-class PartitionStatsDumper;
+typedef void (*OomFunction)(size_t);
-enum PartitionPurgeFlags {
- // Decommitting the ring list of empty pages is reasonably fast.
- PartitionPurgeDecommitEmptyPages = 1 << 0,
- // Discarding unused system pages is slower, because it involves walking all
- // freelists in all active partition pages of all buckets >= system page
- // size. It often frees a similar amount of memory to decommitting the empty
- // pages, though.
- PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
+// PartitionAlloc supports setting hooks to observe allocations/frees as they
+// occur as well as 'override' hooks that allow overriding those operations.
+class BASE_EXPORT PartitionAllocHooks {
+ public:
+ // Log allocation and free events.
+ typedef void AllocationObserverHook(void* address,
+ size_t size,
+ const char* type_name);
+ typedef void FreeObserverHook(void* address);
+
+ // If it returns true, the allocation has been overridden with the pointer in
+ // *out.
+ typedef bool AllocationOverrideHook(void** out,
+ int flags,
+ size_t size,
+ const char* type_name);
+ // If it returns true, then the allocation was overridden and has been freed.
+ typedef bool FreeOverrideHook(void* address);
+ // If it returns true, the underlying allocation is overridden and *out holds
+ // the size of the underlying allocation.
+ typedef bool ReallocOverrideHook(size_t* out, void* address);
+
+ // To unhook, call Set*Hooks with nullptrs.
+ static void SetObserverHooks(AllocationObserverHook* alloc_hook,
+ FreeObserverHook* free_hook);
+ static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
+ FreeOverrideHook* free_hook,
+ ReallocOverrideHook realloc_hook);
+
+ // Helper method to check whether hooks are enabled. This is an optimization
+ // so that if a function needs to call observer and override hooks in two
+ // different places this value can be cached and only loaded once.
+ static bool AreHooksEnabled() {
+ return hooks_enabled_.load(std::memory_order_relaxed);
+ }
+
+ static void AllocationObserverHookIfEnabled(void* address,
+ size_t size,
+ const char* type_name);
+ static bool AllocationOverrideHookIfEnabled(void** out,
+ int flags,
+ size_t size,
+ const char* type_name);
+
+ static void FreeObserverHookIfEnabled(void* address);
+ static bool FreeOverrideHookIfEnabled(void* address);
+
+ static void ReallocObserverHookIfEnabled(void* old_address,
+ void* new_address,
+ size_t size,
+ const char* type_name);
+ static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
+
+ private:
+ // Single bool that is used to indicate whether observer or allocation hooks
+ // are set to reduce the numbers of loads required to check whether hooking is
+ // enabled.
+ static std::atomic<bool> hooks_enabled_;
+
+ // Lock used to synchronize Set*Hooks calls.
+ static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
+ static std::atomic<FreeObserverHook*> free_observer_hook_;
+
+ static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
+ static std::atomic<FreeOverrideHook*> free_override_hook_;
+ static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
};
-// Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
-struct BASE_EXPORT PartitionRoot
- : public internal::PartitionRootBase<internal::NotThreadSafe> {
- PartitionRoot();
- ~PartitionRoot() override;
- // This references the buckets OFF the edge of this struct. All uses of
- // PartitionRoot must have the bucket array come right after.
- //
- // The PartitionAlloc templated class ensures the following is correct.
- ALWAYS_INLINE Bucket* buckets() {
- return reinterpret_cast<Bucket*>(this + 1);
- }
- ALWAYS_INLINE const Bucket* buckets() const {
- return reinterpret_cast<const Bucket*>(this + 1);
- }
+namespace internal {
- void Init(size_t bucket_count, size_t maximum_allocation);
+template <bool thread_safe>
+class LOCKABLE MaybeSpinLock {
+ public:
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
+ void Unlock() UNLOCK_FUNCTION() {}
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
+};
- ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
- ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+template <bool thread_safe>
+class SCOPED_LOCKABLE ScopedGuard {
+ public:
+ explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
+ EXCLUSIVE_LOCK_FUNCTION(lock)
+ : lock_(lock) {
+ lock_.Lock();
+ }
+ ~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
- void PurgeMemory(int flags) override;
+ private:
+ MaybeSpinLock<thread_safe>& lock_;
+};
- void DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* dumper);
+#if DCHECK_IS_ON()
+template <>
+class LOCKABLE MaybeSpinLock<ThreadSafe> {
+ public:
+ MaybeSpinLock() : lock_() {}
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_->Acquire(); }
+ void Unlock() UNLOCK_FUNCTION() { lock_->Release(); }
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
+ lock_->AssertAcquired();
+ }
+
+ private:
+ // NoDestructor to avoid issues with the "static destruction order fiasco".
+ //
+ // This also means that for DCHECK_IS_ON() builds we leak a lock when a
+ // partition is destructed. This will in practice only show in some tests, as
+ // partitons are not destructed in regular use. In addition, on most
+ // platforms, base::Lock doesn't allocate memory and neither does the OS
+ // library, and the destructor is a no-op.
+ base::NoDestructor<base::Lock> lock_;
};
-// Never instantiate a PartitionRootGeneric directly, instead use
-// PartitionAllocatorGeneric.
-struct BASE_EXPORT PartitionRootGeneric
- : public internal::PartitionRootBase<internal::ThreadSafe> {
- PartitionRootGeneric();
- ~PartitionRootGeneric() override;
- // Some pre-computed constants.
- size_t order_index_shifts[kBitsPerSizeT + 1] = {};
- size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
- // The bucket lookup table lets us map a size_t to a bucket quickly.
- // The trailing +1 caters for the overflow case for very large allocation
- // sizes. It is one flat array instead of a 2D array because in the 2D
- // world, we'd need to index array[blah][max+1] which risks undefined
- // behavior.
- Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) +
- 1] = {};
- Bucket buckets[kGenericNumBuckets] = {};
+#else
+template <>
+class LOCKABLE MaybeSpinLock<ThreadSafe> {
+ public:
+ void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_.lock(); }
+ void Unlock() UNLOCK_FUNCTION() { lock_.unlock(); }
+ void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
+ // Not supported by subtle::SpinLock.
+ }
- // Public API.
- void Init();
+ private:
+ subtle::SpinLock lock_;
+};
+#endif // DCHECK_IS_ON()
+
+// An "extent" is a span of consecutive superpages. We link to the partition's
+// next extent (if there is one) to the very start of a superpage's metadata
+// area.
+template <bool thread_safe>
+struct PartitionSuperPageExtentEntry {
+ PartitionRoot<thread_safe>* root;
+ char* super_page_base;
+ char* super_pages_end;
+ PartitionSuperPageExtentEntry<thread_safe>* next;
+};
+static_assert(
+ sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
+ "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
- ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
- ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
- NOINLINE void* Realloc(void* ptr, size_t new_size, const char* type_name);
- // Overload that may return nullptr if reallocation isn't possible. In this
- // case, |ptr| remains valid.
- NOINLINE void* TryRealloc(void* ptr, size_t new_size, const char* type_name);
+// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
+static OomFunction g_oom_handling_function = nullptr;
- ALWAYS_INLINE size_t ActualSize(size_t size);
+} // namespace internal
- void PurgeMemory(int flags) override;
+class PartitionStatsDumper;
- void DumpStats(const char* partition_name,
- bool is_light_dump,
- PartitionStatsDumper* partition_stats_dumper);
+enum PartitionPurgeFlags {
+ // Decommitting the ring list of empty pages is reasonably fast.
+ PartitionPurgeDecommitEmptyPages = 1 << 0,
+ // Discarding unused system pages is slower, because it involves walking all
+ // freelists in all active partition pages of all buckets >= system page
+ // size. It often frees a similar amount of memory to decommitting the empty
+ // pages, though.
+ PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
};
// Struct used to retrieve total memory usage of a partition. Used by
@@ -218,49 +300,286 @@ class BASE_EXPORT PartitionStatsDumper {
const PartitionBucketMemoryStats*) = 0;
};
-BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
+// Never instantiate a PartitionRoot directly, instead use
+// PartitionAllocator.
+template <bool thread_safe>
+struct BASE_EXPORT PartitionRoot {
+ using Page = internal::PartitionPage<thread_safe>;
+ using Bucket = internal::PartitionBucket<thread_safe>;
+ using SuperPageExtentEntry =
+ internal::PartitionSuperPageExtentEntry<thread_safe>;
+ using DirectMapExtent = internal::PartitionDirectMapExtent<thread_safe>;
+ using ScopedGuard = internal::ScopedGuard<thread_safe>;
+
+ internal::MaybeSpinLock<thread_safe> lock_;
+ size_t total_size_of_committed_pages = 0;
+ size_t total_size_of_super_pages = 0;
+ size_t total_size_of_direct_mapped_pages = 0;
+ // Invariant: total_size_of_committed_pages <=
+ // total_size_of_super_pages +
+ // total_size_of_direct_mapped_pages.
+ unsigned num_buckets = 0;
+ unsigned max_allocation = 0;
+ // Atomic as initialization can be concurrent.
+ std::atomic<bool> initialized = {};
+ char* next_super_page = nullptr;
+ char* next_partition_page = nullptr;
+ char* next_partition_page_end = nullptr;
+ SuperPageExtentEntry* current_extent = nullptr;
+ SuperPageExtentEntry* first_extent = nullptr;
+ DirectMapExtent* direct_map_list = nullptr;
+ Page* global_empty_page_ring[kMaxFreeableSpans] = {};
+ int16_t global_empty_page_ring_index = 0;
+ uintptr_t inverted_self = 0;
-ALWAYS_INLINE void* PartitionRoot::Alloc(size_t size, const char* type_name) {
- return AllocFlags(0, size, type_name);
+ // Some pre-computed constants.
+ size_t order_index_shifts[kBitsPerSizeT + 1] = {};
+ size_t order_sub_index_masks[kBitsPerSizeT + 1] = {};
+ // The bucket lookup table lets us map a size_t to a bucket quickly.
+ // The trailing +1 caters for the overflow case for very large allocation
+ // sizes. It is one flat array instead of a 2D array because in the 2D
+ // world, we'd need to index array[blah][max+1] which risks undefined
+ // behavior.
+ Bucket* bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) +
+ 1] = {};
+ Bucket buckets[kGenericNumBuckets] = {};
+
+ PartitionRoot() = default;
+ ~PartitionRoot() = default;
+
+ // Public API
+ //
+ // Allocates out of the given bucket. Properly, this function should probably
+ // be in PartitionBucket, but because the implementation needs to be inlined
+ // for performance, and because it needs to inspect PartitionPage,
+ // it becomes impossible to have it in PartitionBucket as this causes a
+ // cyclical dependency on PartitionPage function implementations.
+ //
+ // Moving it a layer lower couples PartitionRoot and PartitionBucket, but
+ // preserves the layering of the includes.
+ ALWAYS_INLINE void Init() {
+ if (LIKELY(initialized.load(std::memory_order_relaxed)))
+ return;
+
+ InitSlowPath();
+ }
+
+ ALWAYS_INLINE static bool IsValidPage(Page* page);
+ ALWAYS_INLINE static PartitionRoot* FromPage(Page* page);
+
+ ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
+ ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+
+ NOINLINE void OutOfMemory(size_t size);
+
+ ALWAYS_INLINE void* Alloc(size_t size, const char* type_name);
+ ALWAYS_INLINE void* AllocFlags(int flags, size_t size, const char* type_name);
+
+ ALWAYS_INLINE void* Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name);
+ // Overload that may return nullptr if reallocation isn't possible. In this
+ // case, |ptr| remains valid.
+ ALWAYS_INLINE void* TryRealloc(void* ptr,
+ size_t new_size,
+ const char* type_name);
+ NOINLINE void* ReallocFlags(int flags,
+ void* ptr,
+ size_t new_size,
+ const char* type_name);
+ ALWAYS_INLINE void Free(void* ptr);
+
+ ALWAYS_INLINE size_t ActualSize(size_t size);
+
+ // Frees memory from this partition, if possible, by decommitting pages.
+ // |flags| is an OR of base::PartitionPurgeFlags.
+ void PurgeMemory(int flags);
+
+ void DumpStats(const char* partition_name,
+ bool is_light_dump,
+ PartitionStatsDumper* partition_stats_dumper);
+
+ internal::PartitionBucket<thread_safe>* SizeToBucket(size_t size) const;
+
+ private:
+ void InitSlowPath();
+ ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ bool ReallocDirectMappedInPlace(internal::PartitionPage<thread_safe>* page,
+ size_t raw_size)
+ EXCLUSIVE_LOCKS_REQUIRED(lock_);
+ void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
+};
+
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFromBucket(Bucket* bucket,
+ int flags,
+ size_t size) {
+ bool zero_fill = flags & PartitionAllocZeroFill;
+ bool is_already_zeroed = false;
+
+ Page* page = bucket->active_pages_head;
+ // Check that this page is neither full nor freed.
+ PA_DCHECK(page);
+ PA_DCHECK(page->num_allocated_slots >= 0);
+ void* ret = page->freelist_head;
+ if (LIKELY(ret)) {
+ // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
+ // if we can afford to make these CHECKs.
+ PA_DCHECK(IsValidPage(page));
+
+ // All large allocations must go through the slow path to correctly update
+ // the size metadata.
+ PA_DCHECK(page->get_raw_size() == 0);
+ internal::PartitionFreelistEntry* new_head =
+ internal::EncodedPartitionFreelistEntry::Decode(
+ page->freelist_head->next);
+ page->freelist_head = new_head;
+ page->num_allocated_slots++;
+ } else {
+ ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ PA_DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
+ }
+
+#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ if (!ret) {
+ return nullptr;
+ }
+
+ page = Page::FromPointer(ret);
+ // TODO(ajwong): Can |page->bucket| ever not be |bucket|? If not, can this
+ // just be bucket->slot_size?
+ size_t new_slot_size = page->bucket->slot_size;
+ size_t raw_size = page->get_raw_size();
+ if (raw_size) {
+ PA_DCHECK(raw_size == size);
+ new_slot_size = raw_size;
+ }
+ size_t no_cookie_size =
+ internal::PartitionCookieSizeAdjustSubtract(new_slot_size);
+ char* char_ret = static_cast<char*>(ret);
+ // The value given to the application is actually just after the cookie.
+ ret = char_ret + internal::kCookieSize;
+
+ // Fill the region kUninitializedByte or 0, and surround it with 2 cookies.
+ internal::PartitionCookieWriteValue(char_ret);
+ if (!zero_fill) {
+ memset(ret, kUninitializedByte, no_cookie_size);
+ } else if (!is_already_zeroed) {
+ memset(ret, 0, no_cookie_size);
+ }
+ internal::PartitionCookieWriteValue(char_ret + internal::kCookieSize +
+ no_cookie_size);
+#else
+ if (ret && zero_fill && !is_already_zeroed) {
+ memset(ret, 0, size);
+ }
+#endif
+
+ return ret;
}
-ALWAYS_INLINE void* PartitionRoot::AllocFlags(int flags,
- size_t size,
- const char* type_name) {
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::Free(void* ptr) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
- void* result = malloc(size);
- CHECK(result);
- return result;
+ free(ptr);
#else
- DCHECK(max_allocation == 0 || size <= max_allocation);
- void* result;
- const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
- if (UNLIKELY(hooks_enabled)) {
- if (PartitionAllocHooks::AllocationOverrideHookIfEnabled(&result, flags,
- size, type_name)) {
- PartitionAllocHooks::AllocationObserverHookIfEnabled(result, size,
- type_name);
- return result;
- }
+ PA_DCHECK(initialized);
+
+ if (UNLIKELY(!ptr))
+ return;
+
+ if (PartitionAllocHooks::AreHooksEnabled()) {
+ PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
+ if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
+ return;
}
- size_t requested_size = size;
- size = internal::PartitionCookieSizeAdjustAdd(size);
- DCHECK(initialized);
- size_t index = size >> kBucketShift;
- DCHECK(index < num_buckets);
- DCHECK(size == index << kBucketShift);
+
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ Page* page = Page::FromPointer(ptr);
+ // TODO(palmer): See if we can afford to make this a CHECK.
+ PA_DCHECK(IsValidPage(page));
+ internal::DeferredUnmap deferred_unmap;
{
ScopedGuard guard{lock_};
- Bucket* bucket = &buckets()[index];
- result = AllocFromBucket(bucket, flags, size);
- }
- if (UNLIKELY(hooks_enabled)) {
- PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
- type_name);
+ deferred_unmap = page->Free(ptr);
}
- return result;
-#endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
+ deferred_unmap.Run();
+#endif
+}
+
+// static
+template <bool thread_safe>
+ALWAYS_INLINE bool PartitionRoot<thread_safe>::IsValidPage(Page* page) {
+ PartitionRoot* root = FromPage(page);
+ return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE PartitionRoot<thread_safe>* PartitionRoot<thread_safe>::FromPage(
+ Page* page) {
+ auto* extent_entry = reinterpret_cast<SuperPageExtentEntry*>(
+ reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
+ return extent_entry->root;
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::IncreaseCommittedPages(
+ size_t len) {
+ total_size_of_committed_pages += len;
+ PA_DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::DecreaseCommittedPages(
+ size_t len) {
+ total_size_of_committed_pages -= len;
+ PA_DCHECK(total_size_of_committed_pages <=
+ total_size_of_super_pages + total_size_of_direct_mapped_pages);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::DecommitSystemPages(
+ void* address,
+ size_t length) {
+ ::base::DecommitSystemPages(address, length);
+ DecreaseCommittedPages(length);
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE void PartitionRoot<thread_safe>::RecommitSystemPages(
+ void* address,
+ size_t length) {
+ PA_CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
+ IncreaseCommittedPages(length);
+}
+
+BASE_EXPORT void PartitionAllocGlobalInit(OomFunction on_out_of_memory);
+BASE_EXPORT void PartitionAllocGlobalUninitForTesting();
+
+ALWAYS_INLINE bool IsManagedByPartitionAlloc(const void* address) {
+#if BUILDFLAG(USE_PARTITION_ALLOC) && defined(ARCH_CPU_64_BITS) && \
+ !defined(OS_NACL)
+ return internal::PartitionAddressSpace::Contains(address);
+#else
+ return false;
+#endif
+}
+
+ALWAYS_INLINE bool IsManagedByPartitionAllocAndNotDirectMapped(
+ const void* address) {
+#if BUILDFLAG(USE_PARTITION_ALLOC) && defined(ARCH_CPU_64_BITS) && \
+ !defined(OS_NACL)
+ return internal::PartitionAddressSpace::IsInNormalBucketPool(address);
+#else
+ return false;
+#endif
}
ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
@@ -271,52 +590,91 @@ ALWAYS_INLINE bool PartitionAllocSupportsGetSize() {
#endif
}
-ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+namespace internal {
+// Gets the PartitionPage object for the first partition page of the slot span
+// that contains |ptr|. It's used with intention to do obtain the slot size.
+// CAUTION! It works well for normal buckets, but for direct-mapped allocations
+// it'll only work if |ptr| is in the first partition page of the allocation.
+template <bool thread_safe>
+ALWAYS_INLINE internal::PartitionPage<thread_safe>*
+PartitionAllocGetPageForSize(void* ptr) {
// No need to lock here. Only |ptr| being freed by another thread could
// cause trouble, and the caller is responsible for that not happening.
- DCHECK(PartitionAllocSupportsGetSize());
- ptr = internal::PartitionCookieFreePointerAdjust(ptr);
- internal::PartitionPage<internal::ThreadSafe>* page =
- internal::PartitionPage<internal::ThreadSafe>::FromPointer(ptr);
+ PA_DCHECK(PartitionAllocSupportsGetSize());
+ auto* page =
+ internal::PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(ptr);
// TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(internal::PartitionRootBase<internal::ThreadSafe>::IsValidPage(page));
- size_t size = page->bucket->slot_size;
- return internal::PartitionCookieSizeAdjustSubtract(size);
+ PA_DCHECK(PartitionRoot<thread_safe>::IsValidPage(page));
+ return page;
+}
+} // namespace internal
+
+// Gets the size of the allocated slot that contains |ptr|, adjusted for cookie
+// (if any).
+// CAUTION! For direct-mapped allocation, |ptr| has to be within the first
+// partition page.
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) {
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
+ return internal::PartitionCookieSizeAdjustSubtract(page->bucket->slot_size);
}
-ALWAYS_INLINE internal::PartitionBucket<internal::ThreadSafe>*
-PartitionGenericSizeToBucket(PartitionRootGeneric* root, size_t size) {
+// Gets the offset from the beginning of the allocated slot, adjusted for cookie
+// (if any).
+// CAUTION! Use only for normal buckets. Using on direct-mapped allocations may
+// lead to undefined behavior.
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionAllocGetSlotOffset(void* ptr) {
+ PA_DCHECK(IsManagedByPartitionAllocAndNotDirectMapped(ptr));
+ ptr = internal::PartitionCookieFreePointerAdjust(ptr);
+ auto* page = internal::PartitionAllocGetPageForSize<thread_safe>(ptr);
+ size_t slot_size = page->bucket->slot_size;
+
+ // Get the offset from the beginning of the slot span.
+ uintptr_t ptr_addr = reinterpret_cast<uintptr_t>(ptr);
+ uintptr_t slot_span_start = reinterpret_cast<uintptr_t>(
+ internal::PartitionPage<thread_safe>::ToPointer(page));
+ size_t offset_in_slot_span = ptr_addr - slot_span_start;
+ // Knowing that slots are tightly packed in a slot span, calculate an offset
+ // within a slot using simple % operation.
+ // TODO(bartekn): Try to replace % with multiplication&shift magic.
+ size_t offset_in_slot = offset_in_slot_span % slot_size;
+ return offset_in_slot;
+}
+
+template <bool thread_safe>
+ALWAYS_INLINE internal::PartitionBucket<thread_safe>*
+PartitionRoot<thread_safe>::SizeToBucket(size_t size) const {
size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size);
// The order index is simply the next few bits after the most significant bit.
- size_t order_index = (size >> root->order_index_shifts[order]) &
- (kGenericNumBucketsPerOrder - 1);
+ size_t order_index =
+ (size >> order_index_shifts[order]) & (kGenericNumBucketsPerOrder - 1);
// And if the remaining bits are non-zero we must bump the bucket up.
- size_t sub_order_index = size & root->order_sub_index_masks[order];
- internal::PartitionBucket<internal::ThreadSafe>* bucket =
- root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
- order_index + !!sub_order_index];
- CHECK(bucket);
- DCHECK(!bucket->slot_size || bucket->slot_size >= size);
- DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
+ size_t sub_order_index = size & order_sub_index_masks[order];
+ Bucket* bucket = bucket_lookups[(order << kGenericNumBucketsPerOrderBits) +
+ order_index + !!sub_order_index];
+ PA_CHECK(bucket);
+ PA_DCHECK(!bucket->slot_size || bucket->slot_size >= size);
+ PA_DCHECK(!(bucket->slot_size % kGenericSmallestBucket));
return bucket;
}
-ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
- int flags,
- size_t size,
- const char* type_name) {
- DCHECK_LT(flags, PartitionAllocLastFlag << 1);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::AllocFlags(
+ int flags,
+ size_t size,
+ const char* type_name) {
+ PA_DCHECK(flags < PartitionAllocLastFlag << 1);
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
CHECK_MAX_SIZE_OR_RETURN_NULLPTR(size, flags);
const bool zero_fill = flags & PartitionAllocZeroFill;
void* result = zero_fill ? calloc(1, size) : malloc(size);
- CHECK(result || flags & PartitionAllocReturnNull);
+ PA_CHECK(result || flags & PartitionAllocReturnNull);
return result;
#else
- DCHECK(root->initialized);
- // Only SizeSpecificPartitionAllocator should use max_allocation.
- DCHECK(root->max_allocation == 0);
+ PA_DCHECK(initialized);
void* result;
const bool hooks_enabled = PartitionAllocHooks::AreHooksEnabled();
if (UNLIKELY(hooks_enabled)) {
@@ -329,12 +687,11 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
}
size_t requested_size = size;
size = internal::PartitionCookieSizeAdjustAdd(size);
- internal::PartitionBucket<internal::ThreadSafe>* bucket =
- PartitionGenericSizeToBucket(root, size);
- DCHECK(bucket);
+ auto* bucket = SizeToBucket(size);
+ PA_DCHECK(bucket);
{
- PartitionRootGeneric::ScopedGuard guard{root->lock_};
- result = root->AllocFromBucket(bucket, flags, size);
+ internal::ScopedGuard<thread_safe> guard{lock_};
+ result = AllocFromBucket(bucket, flags, size);
}
if (UNLIKELY(hooks_enabled)) {
PartitionAllocHooks::AllocationObserverHookIfEnabled(result, requested_size,
@@ -345,30 +702,35 @@ ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root,
#endif
}
-ALWAYS_INLINE void* PartitionRootGeneric::Alloc(size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(this, 0, size, type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::Alloc(size_t size,
+ const char* type_name) {
+ return AllocFlags(0, size, type_name);
}
-ALWAYS_INLINE void* PartitionRootGeneric::AllocFlags(int flags,
- size_t size,
- const char* type_name) {
- return PartitionAllocGenericFlags(this, flags, size, type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::Realloc(void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return ReallocFlags(0, ptr, new_size, type_name);
}
-BASE_EXPORT void* PartitionReallocGenericFlags(PartitionRootGeneric* root,
- int flags,
- void* ptr,
- size_t new_size,
- const char* type_name);
+template <bool thread_safe>
+ALWAYS_INLINE void* PartitionRoot<thread_safe>::TryRealloc(
+ void* ptr,
+ size_t new_size,
+ const char* type_name) {
+ return ReallocFlags(PartitionAllocReturnNull, ptr, new_size, type_name);
+}
-ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
+template <bool thread_safe>
+ALWAYS_INLINE size_t PartitionRoot<thread_safe>::ActualSize(size_t size) {
#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
return size;
#else
- DCHECK(initialized);
+ PA_DCHECK(PartitionRoot<thread_safe>::initialized);
size = internal::PartitionCookieSizeAdjustAdd(size);
- Bucket* bucket = PartitionGenericSizeToBucket(this, size);
+ auto* bucket = SizeToBucket(size);
if (LIKELY(!bucket->is_direct_mapped())) {
size = bucket->slot_size;
} else if (size > kGenericMaxDirectMapped) {
@@ -380,35 +742,11 @@ ALWAYS_INLINE size_t PartitionRootGeneric::ActualSize(size_t size) {
#endif
}
-template <size_t N>
-class SizeSpecificPartitionAllocator {
- public:
- SizeSpecificPartitionAllocator() {
- memset(actual_buckets_, 0,
- sizeof(PartitionRoot::Bucket) * base::size(actual_buckets_));
- }
- ~SizeSpecificPartitionAllocator() {
- PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
- &partition_root_);
- }
- static const size_t kMaxAllocation = N - kAllocationGranularity;
- static const size_t kNumBuckets = N / kAllocationGranularity;
- void init() {
- partition_root_.Init(kNumBuckets, kMaxAllocation);
- PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
- &partition_root_);
- }
- ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; }
-
- private:
- PartitionRoot partition_root_;
- PartitionRoot::Bucket actual_buckets_[kNumBuckets];
-};
-
-class BASE_EXPORT PartitionAllocatorGeneric {
- public:
- PartitionAllocatorGeneric();
- ~PartitionAllocatorGeneric() {
+namespace internal {
+template <bool thread_safe>
+struct BASE_EXPORT PartitionAllocator {
+ PartitionAllocator() = default;
+ ~PartitionAllocator() {
PartitionAllocMemoryReclaimer::Instance()->UnregisterPartition(
&partition_root_);
}
@@ -418,11 +756,19 @@ class BASE_EXPORT PartitionAllocatorGeneric {
PartitionAllocMemoryReclaimer::Instance()->RegisterPartition(
&partition_root_);
}
- ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; }
+ ALWAYS_INLINE PartitionRoot<thread_safe>* root() { return &partition_root_; }
private:
- PartitionRootGeneric partition_root_;
+ PartitionRoot<thread_safe> partition_root_;
};
+} // namespace internal
+
+using PartitionAllocator = internal::PartitionAllocator<internal::ThreadSafe>;
+using ThreadUnsafePartitionAllocator =
+ internal::PartitionAllocator<internal::NotThreadSafe>;
+
+using ThreadSafePartitionRoot = PartitionRoot<internal::ThreadSafe>;
+using ThreadUnsafePartitionRoot = PartitionRoot<internal::NotThreadSafe>;
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_check.h b/chromium/base/allocator/partition_allocator/partition_alloc_check.h
new file mode 100644
index 00000000000..e8003498489
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_check.h
@@ -0,0 +1,36 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
+
+#include "base/allocator/buildflags.h"
+#include "base/check.h"
+
+// When PartitionAlloc is used as the default allocator, we cannot use the
+// regular (D)CHECK() macros, as they allocate internally. When an assertion is
+// triggered, they format strings, leading to reentrency in the code, which none
+// of PartitionAlloc is designed to support (and especially not for error
+// paths).
+//
+// As a consequence:
+// - When PartitionAlloc is not malloc(), use the regular macros
+// - Otherwise, crash immediately. This provides worse error messages though.
+#if BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+// See base/check.h for implementation details.
+#define PA_CHECK(condition) \
+ UNLIKELY(!(condition)) ? IMMEDIATE_CRASH() : EAT_CHECK_STREAM_PARAMS()
+
+#if DCHECK_IS_ON()
+#define PA_DCHECK(condition) PA_CHECK(condition)
+#else
+#define PA_DCHECK(condition) EAT_CHECK_STREAM_PARAMS(!(condition))
+#endif // DCHECK_IS_ON()
+
+#else
+#define PA_CHECK(condition) CHECK(condition)
+#define PA_DCHECK(condition) DCHECK(condition)
+#endif // BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CHECK_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_constants.h b/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
index fbc851c15f9..ab001f168c8 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_constants.h
@@ -6,19 +6,14 @@
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_CONSTANTS_H_
#include <limits.h>
+#include <cstddef>
#include "base/allocator/partition_allocator/page_allocator_constants.h"
-#include "base/logging.h"
#include "build/build_config.h"
namespace base {
-// Allocation granularity of sizeof(void*) bytes.
-static const size_t kAllocationGranularity = sizeof(void*);
-static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
-static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
-
// Underlying partition storage pages (`PartitionPage`s) are a power-of-2 size.
// It is typical for a `PartitionPage` to be based on multiple system pages.
// Most references to "page" refer to `PartitionPage`s.
@@ -63,9 +58,12 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// We reserve virtual address space in 2 MiB chunks (aligned to 2 MiB as well).
// These chunks are called *super pages*. We do this so that we can store
// metadata in the first few pages of each 2 MiB-aligned section. This makes
-// freeing memory very fast. We specifically choose 2 MiB because this virtual
-// address block represents a full but single PTE allocation on ARM, ia32 and
-// x64.
+// freeing memory very fast. 2 MiB size & alignment were chosen, because this
+// virtual address block represents a full but single page table allocation on
+// ARM, ia32 and x64, which may be slightly more performance&memory efficient.
+// (Note, these super pages are backed by 4 KiB system pages and have nothing to
+// do with OS concept of "huge pages"/"large pages", even though the size
+// coincides.)
//
// The layout of the super page is as follows. The sizes below are the same for
// 32- and 64-bit platforms.
@@ -78,16 +76,20 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// | Slot span |
// | ... |
// | Slot span |
-// | Guard page (4 KiB) |
+// | Guard pages (16 KiB) |
// +-----------------------+
//
-// Each slot span is a contiguous range of one or more `PartitionPage`s.
+// Each slot span is a contiguous range of one or more `PartitionPage`s. Note
+// that slot spans of different sizes may co-exist with one super page. Even
+// slot spans of the same size may support different slot sizes. However, all
+// slots within a span have to be of the same size.
//
// The metadata page has the following format. Note that the `PartitionPage`
-// that is not at the head of a slot span is "unused". In other words, the
-// metadata for the slot span is stored only in the first `PartitionPage` of the
-// slot span. Metadata accesses to other `PartitionPage`s are redirected to the
-// first `PartitionPage`.
+// that is not at the head of a slot span is "unused" (by most part, it only
+// stores the offset from the head page). In other words, the metadata for the
+// slot span is stored only in the first `PartitionPage` of the slot span.
+// Metadata accesses to other `PartitionPage`s are redirected to the first
+// `PartitionPage`.
//
// +---------------------------------------------+
// | SuperPageExtentEntry (32 B) |
@@ -97,28 +99,30 @@ static const size_t kMaxSystemPagesPerSlotSpan =
// | PartitionPage of slot span 2 (32 B, used) |
// | PartitionPage of slot span 3 (32 B, used) |
// | ... |
+// | PartitionPage of slot span N (32 B, used) |
+// | PartitionPage of slot span N (32 B, unused) |
// | PartitionPage of slot span N (32 B, unused) |
// +---------------------------------------------+
//
-// A direct-mapped page has a similar layout to fake it looking like a super
-// page:
+// A direct-mapped page has an identical layout at the beginning to fake it
+// looking like a super page:
//
-// +-----------------------+
-// | Guard page (4 KiB) |
-// | Metadata page (4 KiB) |
-// | Guard pages (8 KiB) |
-// | Direct mapped object |
-// | Guard page (4 KiB) |
-// +-----------------------+
+// +---------------------------------+
+// | Guard page (4 KiB) |
+// | Metadata page (4 KiB) |
+// | Guard pages (8 KiB) |
+// | Direct mapped object |
+// | Guard page (4 KiB, 32-bit only) |
+// +---------------------------------+
//
// A direct-mapped page's metadata page has the following layout:
//
-// +--------------------------------+
-// | SuperPageExtentEntry (32 B) |
-// | PartitionPage (32 B) |
-// | PartitionBucket (32 B) |
-// | PartitionDirectMapExtent (8 B) |
-// +--------------------------------+
+// +---------------------------------+
+// | SuperPageExtentEntry (32 B) |
+// | PartitionPage (32 B) |
+// | PartitionBucket (32 B) |
+// | PartitionDirectMapExtent (32 B) |
+// +---------------------------------+
static const size_t kSuperPageShift = 21; // 2 MiB
static const size_t kSuperPageSize = 1 << kSuperPageShift;
@@ -136,7 +140,18 @@ static const size_t kNumPartitionPagesPerSuperPage =
// In terms of allocation sizes, order 0 covers 0, order 1 covers 1, order 2
// covers 2->3, order 3 covers 4->7, order 4 covers 8->15.
-static const size_t kGenericMinBucketedOrder = 4; // 8 bytes.
+static_assert(alignof(std::max_align_t) <= 16,
+ "PartitionAlloc doesn't support a fundamental alignment larger "
+ "than 16 bytes.");
+// PartitionAlloc should return memory properly aligned for any type, to behave
+// properly as a generic allocator. This is not strictly required as long as
+// types are explicitly allocated with PartitionAlloc, but is to use it as a
+// malloc() implementation, and generally to match malloc()'s behavior.
+//
+// In practice, this means 8 bytes alignment on 32 bit architectures, and 16
+// bytes on 64 bit ones.
+static const size_t kGenericMinBucketedOrder =
+ alignof(std::max_align_t) == 16 ? 5 : 4; // 2^(order - 1), that is 16 or 8.
// The largest bucketed order is 1 << (20 - 1), storing [512 KiB, 1 MiB):
static const size_t kGenericMaxBucketedOrder = 20;
static const size_t kGenericNumBucketedOrders =
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_features.cc b/chromium/base/allocator/partition_allocator/partition_alloc_features.cc
new file mode 100644
index 00000000000..7f8facf979a
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_features.cc
@@ -0,0 +1,16 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
+
+#include "base/feature_list.h"
+
+namespace base {
+
+// If enabled, PartitionAllocator reserves an address space(named, giga cage)
+// initially and uses a part of the address space for each allocation.
+const Feature kPartitionAllocGigaCage{"PartitionAllocGigaCage",
+ FEATURE_DISABLED_BY_DEFAULT};
+
+} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_features.h b/chromium/base/allocator/partition_allocator/partition_alloc_features.h
new file mode 100644
index 00000000000..7cf1f547afc
--- /dev/null
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_features.h
@@ -0,0 +1,34 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
+#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
+
+#include "base/allocator/buildflags.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/metrics/field_trial_params.h"
+#include "build/build_config.h"
+
+namespace base {
+
+struct Feature;
+
+extern const BASE_EXPORT Feature kPartitionAllocGigaCage;
+
+ALWAYS_INLINE bool IsPartitionAllocGigaCageEnabled() {
+ // The feature is not applicable to 32 bit architectures (not enough address
+ // space). It is also incompatible with PartitionAlloc as malloc(), as the
+ // base::Feature code allocates, leading to reentrancy in PartitionAlloc.
+#if !(defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)) || \
+ BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+ return false;
+#else
+ return FeatureList::IsEnabled(kPartitionAllocGigaCage);
+#endif
+}
+
+} // namespace base
+
+#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FEATURES_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_forward.h b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
index c2019e511bf..0737d282400 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_forward.h
@@ -13,10 +13,12 @@ struct PartitionPage;
constexpr bool ThreadSafe = true;
constexpr bool NotThreadSafe = false;
-template <bool thread_safe>
-struct PartitionRootBase;
} // namespace internal
+
+template <bool thread_safe>
+struct PartitionRoot;
+
} // namespace base
#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_FORWARD_H_
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
index e6afbd949b3..d3ab0b36edb 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_perftest.cc
@@ -6,6 +6,7 @@
#include <vector>
#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/strings/stringprintf.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
@@ -63,8 +64,7 @@ class SystemAllocator : public Allocator {
class PartitionAllocator : public Allocator {
public:
- PartitionAllocator()
- : alloc_(std::make_unique<PartitionAllocatorGeneric>()) {}
+ PartitionAllocator() : alloc_(std::make_unique<base::PartitionAllocator>()) {}
~PartitionAllocator() override = default;
void Init() override { alloc_->init(); }
@@ -72,14 +72,14 @@ class PartitionAllocator : public Allocator {
void Free(void* data) override { return alloc_->root()->Free(data); }
private:
- std::unique_ptr<PartitionAllocatorGeneric> alloc_;
+ std::unique_ptr<base::PartitionAllocator> alloc_;
};
class TestLoopThread : public PlatformThread::Delegate {
public:
explicit TestLoopThread(OnceCallback<float()> test_fn)
: test_fn_(std::move(test_fn)) {
- CHECK(PlatformThread::Create(0, this, &thread_handle_));
+ PA_CHECK(PlatformThread::Create(0, this, &thread_handle_));
}
float Run() {
diff --git a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
index e6fae94cfcc..99219a8d559 100644
--- a/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
+++ b/chromium/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -7,15 +7,22 @@
#include <stdlib.h>
#include <string.h>
+#include <algorithm>
+#include <cstddef>
#include <limits>
#include <memory>
#include <vector>
#include "base/allocator/partition_allocator/address_space_randomization.h"
+#include "base/allocator/partition_allocator/page_allocator_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/logging.h"
#include "base/rand_util.h"
#include "base/stl_util.h"
#include "base/system/sys_info.h"
+#include "base/test/scoped_feature_list.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -29,8 +36,6 @@
namespace {
-constexpr size_t kTestMaxAllocation = base::kSystemPageSize;
-
bool IsLargeMemoryDevice() {
// Treat any device with 2GiB or more of physical memory as a "large memory
// device". We check for slightly less than 2GiB so that devices with a small
@@ -86,19 +91,20 @@ const size_t kTestSizes[] = {
100,
base::kSystemPageSize,
base::kSystemPageSize + 1,
- base::PartitionRootGeneric::Bucket::get_direct_map_size(100),
+ base::PartitionRoot<
+ base::internal::ThreadSafe>::Bucket::get_direct_map_size(100),
1 << 20,
1 << 21,
};
constexpr size_t kTestSizesCount = base::size(kTestSizes);
-void AllocateRandomly(base::PartitionRootGeneric* root,
+void AllocateRandomly(base::PartitionRoot<base::internal::ThreadSafe>* root,
size_t count,
int flags) {
std::vector<void*> allocations(count, nullptr);
for (size_t i = 0; i < count; ++i) {
const size_t size = kTestSizes[base::RandGenerator(kTestSizesCount)];
- allocations[i] = PartitionAllocGenericFlags(root, flags, size, nullptr);
+ allocations[i] = root->AllocFlags(flags, size, nullptr);
EXPECT_NE(nullptr, allocations[i]) << " size: " << size << " i: " << i;
}
@@ -108,6 +114,10 @@ void AllocateRandomly(base::PartitionRootGeneric* root,
}
}
+void HandleOOM(size_t unused_size) {
+ LOG(FATAL) << "Out of memory";
+}
+
} // namespace
namespace base {
@@ -129,7 +139,6 @@ const size_t kPointerOffset = kCookieSize;
const size_t kExtraAllocSize = kCookieSize * 2;
#endif
const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize;
-const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift;
const char* type_name = nullptr;
@@ -140,16 +149,30 @@ class PartitionAllocTest : public testing::Test {
~PartitionAllocTest() override = default;
void SetUp() override {
+ scoped_feature_list.InitWithFeatures({kPartitionAllocGigaCage}, {});
+ PartitionAllocGlobalInit(HandleOOM);
allocator.init();
- generic_allocator.init();
+ test_bucket_index_ = SizeToIndex(kRealAllocSize);
+ }
+
+ size_t SizeToIndex(size_t size) {
+ return allocator.root()->SizeToBucket(size) - allocator.root()->buckets;
+ }
+
+ void TearDown() override {
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages |
+ PartitionPurgeDiscardUnusedSystemPages);
+ PartitionAllocGlobalUninitForTesting();
}
- PartitionRoot::Page* GetFullPage(size_t size) {
+ PartitionRoot<ThreadSafe>::Page* GetFullPage(size_t size) {
size_t real_size = size + kExtraAllocSize;
- size_t bucket_index = real_size >> kBucketShift;
- PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index];
+ size_t bucket_index = SizeToIndex(real_size);
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[bucket_index];
size_t num_slots =
- (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+ (bucket->num_system_pages_per_slot_span * kSystemPageSize) /
+ bucket->slot_size;
void* first = nullptr;
void* last = nullptr;
size_t i;
@@ -161,8 +184,8 @@ class PartitionAllocTest : public testing::Test {
else if (i == num_slots - 1)
last = PartitionCookieFreePointerAdjust(ptr);
}
- EXPECT_EQ(PartitionRoot::Page::FromPointer(first),
- PartitionRoot::Page::FromPointer(last));
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::FromPointer(first),
+ PartitionRoot<ThreadSafe>::Page::FromPointer(last));
if (bucket->num_system_pages_per_slot_span ==
kNumSystemPagesPerPartitionPage)
EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask,
@@ -172,18 +195,16 @@ class PartitionAllocTest : public testing::Test {
EXPECT_EQ(nullptr, bucket->active_pages_head->freelist_head);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->active_pages_head !=
- PartitionRoot::Page::get_sentinel_page());
+ PartitionRoot<ThreadSafe>::Page::get_sentinel_page());
return bucket->active_pages_head;
}
void CycleFreeCache(size_t size) {
- size_t real_size = size + kExtraAllocSize;
- size_t bucket_index = real_size >> kBucketShift;
- PartitionRoot::Bucket* bucket = &allocator.root()->buckets()[bucket_index];
- DCHECK(!bucket->active_pages_head->num_allocated_slots);
-
for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
void* ptr = allocator.root()->Alloc(size, type_name);
+ auto* page = PartitionRoot<base::internal::ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
+ auto* bucket = page->bucket;
EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
allocator.root()->Free(ptr);
EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
@@ -191,20 +212,6 @@ class PartitionAllocTest : public testing::Test {
}
}
- void CycleGenericFreeCache(size_t size) {
- for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
- PartitionRootGeneric::Page* page =
- PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
- PartitionRootGeneric::Bucket* bucket = page->bucket;
- EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots);
- generic_allocator.root()->Free(ptr);
- EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots);
- EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index);
- }
- }
-
enum ReturnNullTestMode {
kPartitionAllocGenericFlags,
kPartitionReallocGenericFlags,
@@ -226,40 +233,37 @@ class PartitionAllocTest : public testing::Test {
// Work out the number of allocations for 6 GB of memory.
const int num_allocations = (6 * 1024 * 1024) / (alloc_size / 1024);
- void** ptrs = reinterpret_cast<void**>(generic_allocator.root()->Alloc(
- num_allocations * sizeof(void*), type_name));
+ void** ptrs = reinterpret_cast<void**>(
+ allocator.root()->Alloc(num_allocations * sizeof(void*), type_name));
int i;
for (i = 0; i < num_allocations; ++i) {
switch (mode) {
case kPartitionAllocGenericFlags: {
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
break;
}
case kPartitionReallocGenericFlags: {
- ptrs[i] = PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, 1, type_name);
- ptrs[i] = PartitionReallocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, ptrs[i],
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
+ type_name);
+ ptrs[i] = allocator.root()->ReallocFlags(
+ PartitionAllocReturnNull, ptrs[i], alloc_size, type_name);
break;
}
case kPartitionRootGenericTryRealloc: {
- ptrs[i] = PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull, 1, type_name);
- ptrs[i] = generic_allocator.root()->TryRealloc(ptrs[i], alloc_size,
- type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull, 1,
+ type_name);
+ ptrs[i] =
+ allocator.root()->TryRealloc(ptrs[i], alloc_size, type_name);
}
}
if (!i)
EXPECT_TRUE(ptrs[0]);
if (!ptrs[i]) {
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- alloc_size, type_name);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
EXPECT_FALSE(ptrs[i]);
break;
}
@@ -272,34 +276,36 @@ class PartitionAllocTest : public testing::Test {
// Free, reallocate and free again each block we allocated. We do this to
// check that freeing memory also works correctly after a failed allocation.
for (--i; i >= 0; --i) {
- generic_allocator.root()->Free(ptrs[i]);
- ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, alloc_size,
- type_name);
+ allocator.root()->Free(ptrs[i]);
+ ptrs[i] = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ alloc_size, type_name);
EXPECT_TRUE(ptrs[i]);
- generic_allocator.root()->Free(ptrs[i]);
+ allocator.root()->Free(ptrs[i]);
}
- generic_allocator.root()->Free(ptrs);
+ allocator.root()->Free(ptrs);
EXPECT_TRUE(ClearAddressSpaceLimit());
LOG(FATAL) << "DoReturnNullTest";
}
- SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator;
- PartitionAllocatorGeneric generic_allocator;
+ base::test::ScopedFeatureList scoped_feature_list;
+ PartitionAllocator<base::internal::ThreadSafe> allocator;
+ size_t test_bucket_index_;
};
class PartitionAllocDeathTest : public PartitionAllocTest {};
namespace {
-void FreeFullPage(PartitionRoot* root, PartitionRoot::Page* page) {
+void FreeFullPage(PartitionRoot<base::internal::ThreadSafe>* root,
+ PartitionRoot<base::internal::ThreadSafe>::Page* page) {
size_t size = page->bucket->slot_size;
size_t num_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size;
EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots)));
- char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page));
+ char* ptr = reinterpret_cast<char*>(
+ PartitionRoot<base::internal::ThreadSafe>::Page::ToPointer(page));
size_t i;
for (i = 0; i < num_slots; ++i) {
root->Free(ptr + kPointerOffset);
@@ -342,7 +348,7 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
const PartitionBucketMemoryStats* stats) override {
(void)partition_name;
EXPECT_TRUE(stats->is_valid);
- EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask);
+ EXPECT_EQ(0u, stats->bucket_slot_size & sizeof(void*));
bucket_stats.push_back(*stats);
total_resident_bytes += stats->resident_bytes;
total_active_bytes += stats->active_bytes;
@@ -375,9 +381,10 @@ class MockPartitionStatsDumper : public PartitionStatsDumper {
// Check that the most basic of allocate / free pairs work.
TEST_F(PartitionAllocTest, Basic) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
- PartitionRoot::Page* seed_page = PartitionRoot::Page::get_sentinel_page();
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
+ PartitionRoot<ThreadSafe>::Page* seed_page =
+ PartitionRoot<ThreadSafe>::Page::get_sentinel_page();
EXPECT_FALSE(bucket->empty_pages_head);
EXPECT_FALSE(bucket->decommitted_pages_head);
@@ -437,25 +444,27 @@ TEST_F(PartitionAllocTest, MultiAlloc) {
// Test a bucket with multiple pages.
TEST_F(PartitionAllocTest, MultiPages) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
- PartitionRoot::Page* page = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page = GetFullPage(kTestAllocSize);
FreeFullPage(allocator.root(), page);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_EQ(nullptr, page->next_page);
EXPECT_EQ(0, page->num_allocated_slots);
page = GetFullPage(kTestAllocSize);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
EXPECT_EQ(nullptr, page2->next_page);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page)) &
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page)) &
kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(PartitionRoot::Page::ToPointer(page2)) &
+ reinterpret_cast<uintptr_t>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) &
kSuperPageBaseMask);
// Fully free the non-current page. This will leave us with no current
@@ -463,7 +472,7 @@ TEST_F(PartitionAllocTest, MultiPages) {
FreeFullPage(allocator.root(), page);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_TRUE(bucket->empty_pages_head);
- EXPECT_EQ(PartitionPage<base::internal::NotThreadSafe>::get_sentinel_page(),
+ EXPECT_EQ(PartitionPage<ThreadSafe>::get_sentinel_page(),
bucket->active_pages_head);
// Allocate a new page, it should pull from the freelist.
@@ -481,18 +490,19 @@ TEST_F(PartitionAllocTest, MultiPages) {
// Test some finer aspects of internal page transitions.
TEST_F(PartitionAllocTest, PageTransitions) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page1, bucket->active_pages_head);
EXPECT_EQ(nullptr, page1->next_page);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page2, bucket->active_pages_head);
EXPECT_EQ(nullptr, page2->next_page);
// Bounce page1 back into the non-full list then fill it up again.
- char* ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ char* ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr);
EXPECT_EQ(page1, bucket->active_pages_head);
@@ -503,12 +513,13 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Allocating another page at this point should cause us to scan over page1
// (which is both full and NOT our current page), and evict it from the
// freelist. Older code had a O(n^2) condition due to failure to do this.
- PartitionRoot::Page* page3 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page3 = GetFullPage(kTestAllocSize);
EXPECT_EQ(page3, bucket->active_pages_head);
EXPECT_EQ(nullptr, page3->next_page);
// Work out a pointer into page2 and free it.
- ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) +
+ ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) +
kPointerOffset;
allocator.root()->Free(ptr);
// Trying to allocate at this time should cause us to cycle around to page2
@@ -521,7 +532,8 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Work out a pointer into page1 and free it. This should pull the page
// back into the list of available pages.
- ptr = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ ptr = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr);
// This allocation should be satisfied by page1.
@@ -544,16 +556,17 @@ TEST_F(PartitionAllocTest, PageTransitions) {
// Test some corner cases relating to page transitions in the internal
// free page list metadata bucket.
TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
size_t num_to_fill_free_list_page =
- kPartitionPageSize / (sizeof(PartitionRoot::Page) + kExtraAllocSize);
+ kPartitionPageSize /
+ (sizeof(PartitionRoot<ThreadSafe>::Page) + kExtraAllocSize);
// The +1 is because we need to account for the fact that the current page
// never gets thrown on the freelist.
++num_to_fill_free_list_page;
- auto pages =
- std::make_unique<PartitionRoot::Page*[]>(num_to_fill_free_list_page);
+ auto pages = std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_to_fill_free_list_page);
size_t i;
for (i = 0; i < num_to_fill_free_list_page; ++i) {
@@ -562,15 +575,15 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
EXPECT_EQ(pages[num_to_fill_free_list_page - 1], bucket->active_pages_head);
for (i = 0; i < num_to_fill_free_list_page; ++i)
FreeFullPage(allocator.root(), pages[i]);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
// Allocate / free in a different bucket size so we get control of a
// different free page list. We need two pages because one will be the last
// active page and not get freed.
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize * 2);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize * 2);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize * 2);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize * 2);
FreeFullPage(allocator.root(), page1);
FreeFullPage(allocator.root(), page2);
@@ -581,7 +594,7 @@ TEST_F(PartitionAllocTest, FreePageListPageTransitions) {
for (i = 0; i < num_to_fill_free_list_page; ++i)
FreeFullPage(allocator.root(), pages[i]);
- EXPECT_EQ(PartitionRoot::Page::get_sentinel_page(),
+ EXPECT_EQ(PartitionRoot<ThreadSafe>::Page::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
}
@@ -597,12 +610,13 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
--num_pages_needed;
EXPECT_GT(num_pages_needed, 1u);
- auto pages = std::make_unique<PartitionRoot::Page*[]>(num_pages_needed);
+ auto pages =
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(num_pages_needed);
uintptr_t first_super_page_base = 0;
size_t i;
for (i = 0; i < num_pages_needed; ++i) {
pages[i] = GetFullPage(kTestAllocSize);
- void* storage_ptr = PartitionRoot::Page::ToPointer(pages[i]);
+ void* storage_ptr = PartitionRoot<ThreadSafe>::Page::ToPointer(pages[i]);
if (!i)
first_super_page_base =
reinterpret_cast<uintptr_t>(storage_ptr) & kSuperPageBaseMask;
@@ -623,31 +637,30 @@ TEST_F(PartitionAllocTest, MultiPageAllocs) {
// Test the generic allocation functions that can handle arbitrary sizes and
// reallocing etc.
TEST_F(PartitionAllocTest, GenericAlloc) {
- void* ptr = generic_allocator.root()->Alloc(1, type_name);
+ void* ptr = allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
- ptr = generic_allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
+ allocator.root()->Free(ptr);
+ ptr = allocator.root()->Alloc(kGenericMaxBucketed + 1, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
- ptr = generic_allocator.root()->Alloc(1, type_name);
+ ptr = allocator.root()->Alloc(1, type_name);
EXPECT_TRUE(ptr);
void* orig_ptr = ptr;
char* char_ptr = static_cast<char*>(ptr);
*char_ptr = 'A';
// Change the size of the realloc, remaining inside the same bucket.
- void* new_ptr = generic_allocator.root()->Realloc(ptr, 2, type_name);
+ void* new_ptr = allocator.root()->Realloc(ptr, 2, type_name);
EXPECT_EQ(ptr, new_ptr);
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(ptr, new_ptr);
- new_ptr =
- generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericSmallestBucket, type_name);
EXPECT_EQ(ptr, new_ptr);
// Change the size of the realloc, switching buckets.
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1,
- type_name);
+ new_ptr =
+ allocator.root()->Realloc(ptr, kGenericSmallestBucket + 1, type_name);
EXPECT_NE(new_ptr, ptr);
// Check that the realloc copied correctly.
char* new_char_ptr = static_cast<char*>(new_ptr);
@@ -663,13 +676,13 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// The realloc moved. To check that the old allocation was freed, we can
// do an alloc of the old allocation size and check that the old allocation
// address is at the head of the freelist and reused.
- void* reused_ptr = generic_allocator.root()->Alloc(1, type_name);
+ void* reused_ptr = allocator.root()->Alloc(1, type_name);
EXPECT_EQ(reused_ptr, orig_ptr);
- generic_allocator.root()->Free(reused_ptr);
+ allocator.root()->Free(reused_ptr);
// Downsize the realloc.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_EQ(new_ptr, orig_ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'B');
@@ -677,8 +690,7 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize the realloc to outside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed + 1, type_name);
EXPECT_NE(new_ptr, ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'C');
@@ -686,85 +698,85 @@ TEST_F(PartitionAllocTest, GenericAlloc) {
// Upsize and downsize the realloc, remaining outside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 10, type_name);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'D');
*new_char_ptr = 'E';
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2,
- type_name);
+ new_ptr = allocator.root()->Realloc(ptr, kGenericMaxBucketed * 2, type_name);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'E');
*new_char_ptr = 'F';
// Downsize the realloc to inside the partition.
ptr = new_ptr;
- new_ptr = generic_allocator.root()->Realloc(ptr, 1, type_name);
+ new_ptr = allocator.root()->Realloc(ptr, 1, type_name);
EXPECT_NE(new_ptr, ptr);
EXPECT_EQ(new_ptr, orig_ptr);
new_char_ptr = static_cast<char*>(new_ptr);
EXPECT_EQ(*new_char_ptr, 'F');
- generic_allocator.root()->Free(new_ptr);
+ allocator.root()->Free(new_ptr);
}
// Test the generic allocation functions can handle some specific sizes of
// interest.
TEST_F(PartitionAllocTest, GenericAllocSizes) {
- void* ptr = generic_allocator.root()->Alloc(0, type_name);
+ void* ptr = allocator.root()->Alloc(0, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// kPartitionPageSize is interesting because it results in just one
// allocation per page, which tripped up some corner cases.
size_t size = kPartitionPageSize - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Should be freeable at this point.
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_NE(-1, page->empty_cache_index);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) -
kSystemPageSize) /
2) -
kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 'A', size);
- ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
- void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr3 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr3);
- void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr4 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr4);
page = PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr));
- PartitionRootGeneric::Page* page2 = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr3));
+ PartitionRoot<ThreadSafe>::Page* page2 =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr3));
EXPECT_NE(page, page2);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr2);
// Should be freeable at this point.
EXPECT_NE(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots);
EXPECT_EQ(0, page->num_unprovisioned_slots);
- void* new_ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* new_ptr = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr3, new_ptr);
- new_ptr = generic_allocator.root()->Alloc(size, type_name);
+ new_ptr = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr2, new_ptr);
- generic_allocator.root()->Free(new_ptr);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr4);
+ allocator.root()->Free(new_ptr);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
#if DCHECK_IS_ON()
// |PartitionPage::Free| must poison the slot's contents with |kFreedByte|.
@@ -777,8 +789,8 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
// Test this only if the device has enough memory or it might fail due
// to OOM.
if (IsLargeMemoryDevice()) {
- ptr = generic_allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
- generic_allocator.root()->Free(ptr);
+ ptr = allocator.root()->Alloc(512 * 1024 * 1024 + 1, type_name);
+ allocator.root()->Free(ptr);
}
// Check a more reasonable, but still direct mapped, size.
@@ -786,22 +798,22 @@ TEST_F(PartitionAllocTest, GenericAllocSizes) {
size = 20 * 1024 * 1024;
size -= kSystemPageSize;
size -= 1;
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
char* char_ptr = reinterpret_cast<char*>(ptr);
*(char_ptr + (size - 1)) = 'A';
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Can we free null?
- generic_allocator.root()->Free(nullptr);
+ allocator.root()->Free(nullptr);
// Do we correctly get a null for a failed allocation?
- EXPECT_EQ(nullptr, PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocReturnNull,
- 3u * 1024 * 1024 * 1024, type_name));
+ EXPECT_EQ(nullptr,
+ allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ 3u * 1024 * 1024 * 1024, type_name));
}
// Test that we can fetch the real allocated size after an allocation.
-TEST_F(PartitionAllocTest, GenericAllocGetSize) {
+TEST_F(PartitionAllocTest, GenericAllocGetSizeAndOffset) {
void* ptr;
size_t requested_size, actual_size, predicted_size;
@@ -809,24 +821,38 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
// Allocate something small.
requested_size = 511 - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; ++offset) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
+ allocator.root()->Free(ptr);
// Allocate a size that should be a perfect match for a bucket, because it
// is an exact power of 2.
requested_size = (256 * 1024) - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; offset += 877) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
+ allocator.root()->Free(ptr);
// Allocate a size that is a system page smaller than a bucket. GetSize()
// should return a larger size than we asked for now.
@@ -835,55 +861,93 @@ TEST_F(PartitionAllocTest, GenericAllocGetSize) {
num /= 2;
}
requested_size = num * kSystemPageSize - kSystemPageSize - kExtraAllocSize;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_EQ(requested_size + kSystemPageSize, actual_size);
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ for (size_t offset = 0; offset < requested_size; offset += 4999) {
+ size_t actual_offset = PartitionAllocGetSlotOffset<ThreadSafe>(
+ static_cast<char*>(ptr) + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+#endif
// Check that we can write at the end of the reported size too.
char* char_ptr = reinterpret_cast<char*>(ptr);
*(char_ptr + (actual_size - 1)) = 'A';
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Allocate something very large, and uneven.
if (IsLargeMemoryDevice()) {
requested_size = 512 * 1024 * 1024 - 1;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
- ptr = generic_allocator.root()->Alloc(requested_size, type_name);
+ predicted_size = allocator.root()->ActualSize(requested_size);
+ ptr = allocator.root()->Alloc(requested_size, type_name);
EXPECT_TRUE(ptr);
- actual_size = PartitionAllocGetSize(ptr);
+ actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
EXPECT_EQ(predicted_size, actual_size);
EXPECT_LT(requested_size, actual_size);
- generic_allocator.root()->Free(ptr);
+ // Unlike above, don't test for PartitionAllocGetSlotOffset. Such large
+ // allocations are direct-mapped, for which one can't easily obtain the
+ // offset.
+ allocator.root()->Free(ptr);
}
// Too large allocation.
requested_size = kGenericMaxDirectMapped + 1;
- predicted_size = generic_allocator.root()->ActualSize(requested_size);
+ predicted_size = allocator.root()->ActualSize(requested_size);
EXPECT_EQ(requested_size, predicted_size);
}
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+TEST_F(PartitionAllocTest, GetOffsetMultiplePages) {
+ size_t size = 48;
+ size_t real_size = size + kExtraAllocSize;
+ PartitionBucket<ThreadSafe>* bucket =
+ allocator.root()->SizeToBucket(real_size);
+ // Make sure the test is testing multiple partition pages case.
+ EXPECT_GT(bucket->num_system_pages_per_slot_span,
+ kPartitionPageSize / kSystemPageSize);
+ size_t num_slots =
+ (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size;
+ std::vector<void*> ptrs;
+ for (size_t i = 0; i < num_slots; ++i) {
+ ptrs.push_back(allocator.root()->Alloc(size, type_name));
+ }
+ for (size_t i = 0; i < num_slots; ++i) {
+ char* ptr = static_cast<char*>(ptrs[i]);
+ for (size_t offset = 0; offset < size; offset += 13) {
+ EXPECT_EQ(PartitionAllocGetSize<ThreadSafe>(ptr), size);
+ size_t actual_offset =
+ PartitionAllocGetSlotOffset<ThreadSafe>(ptr + offset);
+ EXPECT_EQ(actual_offset, offset);
+ }
+ allocator.root()->Free(ptr);
+ }
+}
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
// Test the realloc() contract.
TEST_F(PartitionAllocTest, Realloc) {
// realloc(0, size) should be equivalent to malloc().
- void* ptr =
- generic_allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Realloc(nullptr, kTestAllocSize, type_name);
memset(ptr, 'A', kTestAllocSize);
- PartitionRootGeneric::Page* page = PartitionRootGeneric::Page::FromPointer(
- PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
// realloc(ptr, 0) should be equivalent to free().
- void* ptr2 = generic_allocator.root()->Realloc(ptr, 0, type_name);
+ void* ptr2 = allocator.root()->Realloc(ptr, 0, type_name);
EXPECT_EQ(nullptr, ptr2);
EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head);
// Test that growing an allocation with realloc() copies everything from the
// old allocation.
size_t size = kSystemPageSize - kExtraAllocSize;
- EXPECT_EQ(size, generic_allocator.root()->ActualSize(size));
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ EXPECT_EQ(size, allocator.root()->ActualSize(size));
+ ptr = allocator.root()->Alloc(size, type_name);
memset(ptr, 'A', size);
- ptr2 = generic_allocator.root()->Realloc(ptr, size + 1, type_name);
+ ptr2 = allocator.root()->Realloc(ptr, size + 1, type_name);
EXPECT_NE(ptr, ptr2);
char* char_ptr2 = static_cast<char*>(ptr2);
EXPECT_EQ('A', char_ptr2[0]);
@@ -894,7 +958,7 @@ TEST_F(PartitionAllocTest, Realloc) {
// Test that shrinking an allocation with realloc() also copies everything
// from the old allocation.
- ptr = generic_allocator.root()->Realloc(ptr2, size - 1, type_name);
+ ptr = allocator.root()->Realloc(ptr2, size - 1, type_name);
EXPECT_NE(ptr2, ptr);
char* char_ptr = static_cast<char*>(ptr);
EXPECT_EQ('A', char_ptr[0]);
@@ -903,47 +967,47 @@ TEST_F(PartitionAllocTest, Realloc) {
EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(char_ptr[size - 1]));
#endif
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// Test that shrinking a direct mapped allocation happens in-place.
size = kGenericMaxBucketed + 16 * kSystemPageSize;
- ptr = generic_allocator.root()->Alloc(size, type_name);
- size_t actual_size = PartitionAllocGetSize(ptr);
- ptr2 = generic_allocator.root()->Realloc(
+ ptr = allocator.root()->Alloc(size, type_name);
+ size_t actual_size = PartitionAllocGetSize<ThreadSafe>(ptr);
+ ptr2 = allocator.root()->Realloc(
ptr, kGenericMaxBucketed + 8 * kSystemPageSize, type_name);
EXPECT_EQ(ptr, ptr2);
- EXPECT_EQ(actual_size - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2));
+ EXPECT_EQ(actual_size - 8 * kSystemPageSize,
+ PartitionAllocGetSize<ThreadSafe>(ptr2));
// Test that a previously in-place shrunk direct mapped allocation can be
// expanded up again within its original size.
- ptr = generic_allocator.root()->Realloc(ptr2, size - kSystemPageSize,
- type_name);
+ ptr = allocator.root()->Realloc(ptr2, size - kSystemPageSize, type_name);
EXPECT_EQ(ptr2, ptr);
- EXPECT_EQ(actual_size - kSystemPageSize, PartitionAllocGetSize(ptr));
+ EXPECT_EQ(actual_size - kSystemPageSize,
+ PartitionAllocGetSize<ThreadSafe>(ptr));
// Test that a direct mapped allocation is performed not in-place when the
// new size is small enough.
- ptr2 = generic_allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
+ ptr2 = allocator.root()->Realloc(ptr, kSystemPageSize, type_name);
EXPECT_NE(ptr, ptr2);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
// Tests the handing out of freelists for partial pages.
TEST_F(PartitionAllocTest, PartialPageFreelists) {
- size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
- EXPECT_EQ(kSystemPageSize - kAllocationGranularity,
- big_size + kExtraAllocSize);
- size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
- PartitionBucket<base::internal::NotThreadSafe>* bucket =
- &allocator.root()->buckets()[bucket_index];
+ size_t big_size = kSystemPageSize - kExtraAllocSize;
+ size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
size_t total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(big_size + kExtraAllocSize);
@@ -951,9 +1015,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// The freelist should have one entry, because we were able to exactly fit
// one object slot and one freelist pointer (the null that the head points
// to) into a system page.
- EXPECT_TRUE(page->freelist_head);
+ EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_EQ(2, page->num_unprovisioned_slots);
+ EXPECT_EQ(3, page->num_unprovisioned_slots);
void* ptr2 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr2);
@@ -963,9 +1027,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
void* ptr3 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr3);
- EXPECT_TRUE(page->freelist_head);
+ EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(3, page->num_allocated_slots);
- EXPECT_EQ(0, page->num_unprovisioned_slots);
+ EXPECT_EQ(1, page->num_unprovisioned_slots);
void* ptr4 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr4);
@@ -976,8 +1040,9 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
void* ptr5 = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr5);
- PartitionRoot::Page* page2 =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr5));
+ PartitionRoot<ThreadSafe>::Page* page2 =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr5));
EXPECT_EQ(1, page2->num_allocated_slots);
// Churn things a little whilst there's a partial page freelist.
@@ -998,59 +1063,59 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// And test a couple of sizes that do not cross kSystemPageSize with a single
// allocation.
- size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize;
- bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t medium_size = (kSystemPageSize / 2) - kExtraAllocSize;
+ bucket_index = SizeToIndex(medium_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(mediumSize, type_name);
+ ptr = allocator.root()->Alloc(medium_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (mediumSize + kExtraAllocSize);
- size_t first_page_slots = kSystemPageSize / (mediumSize + kExtraAllocSize);
+ (medium_size + kExtraAllocSize);
+ size_t first_page_slots = kSystemPageSize / (medium_size + kExtraAllocSize);
EXPECT_EQ(2u, first_page_slots);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
- size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize;
- bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t small_size = (kSystemPageSize / 4) - kExtraAllocSize;
+ bucket_index = SizeToIndex(small_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(smallSize, type_name);
+ ptr = allocator.root()->Alloc(small_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (smallSize + kExtraAllocSize);
- first_page_slots = kSystemPageSize / (smallSize + kExtraAllocSize);
+ (small_size + kExtraAllocSize);
+ first_page_slots = kSystemPageSize / (small_size + kExtraAllocSize);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
EXPECT_TRUE(page->freelist_head);
EXPECT_EQ(0, page->num_allocated_slots);
- size_t verySmallSize = 32 - kExtraAllocSize;
- bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift;
- bucket = &allocator.root()->buckets()[bucket_index];
+ size_t very_small_size = 32 - kExtraAllocSize;
+ bucket_index = SizeToIndex(very_small_size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
EXPECT_EQ(nullptr, bucket->empty_pages_head);
- ptr = allocator.root()->Alloc(verySmallSize, type_name);
+ ptr = allocator.root()->Alloc(very_small_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
- (verySmallSize + kExtraAllocSize);
- first_page_slots = kSystemPageSize / (verySmallSize + kExtraAllocSize);
+ (very_small_size + kExtraAllocSize);
+ first_page_slots = kSystemPageSize / (very_small_size + kExtraAllocSize);
EXPECT_EQ(total_slots - first_page_slots, page->num_unprovisioned_slots);
allocator.root()->Free(ptr);
@@ -1061,54 +1126,57 @@ TEST_F(PartitionAllocTest, PartialPageFreelists) {
// larger than a system page.
size_t page_and_a_half_size =
(kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(page_and_a_half_size, type_name);
+ ptr = allocator.root()->Alloc(page_and_a_half_size, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
EXPECT_TRUE(page->freelist_head);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(page_and_a_half_size + kExtraAllocSize);
EXPECT_EQ(total_slots - 2, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
// And then make sure than exactly the page size only faults one page.
size_t pageSize = kSystemPageSize - kExtraAllocSize;
- ptr = generic_allocator.root()->Alloc(pageSize, type_name);
+ ptr = allocator.root()->Alloc(pageSize, type_name);
EXPECT_TRUE(ptr);
- page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ page = PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_FALSE(page->freelist_head);
+ EXPECT_TRUE(page->freelist_head);
total_slots =
(page->bucket->num_system_pages_per_slot_span * kSystemPageSize) /
(pageSize + kExtraAllocSize);
- EXPECT_EQ(total_slots - 1, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr);
+ EXPECT_EQ(total_slots - 2, page->num_unprovisioned_slots);
+ allocator.root()->Free(ptr);
}
// Test some of the fragmentation-resistant properties of the allocator.
TEST_F(PartitionAllocTest, PageRefilling) {
- PartitionRoot::Bucket* bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
+ PartitionRoot<ThreadSafe>::Bucket* bucket =
+ &allocator.root()->buckets[test_bucket_index_];
// Grab two full pages and a non-full page.
- PartitionRoot::Page* page1 = GetFullPage(kTestAllocSize);
- PartitionRoot::Page* page2 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(kTestAllocSize);
void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
EXPECT_NE(page1, bucket->active_pages_head);
EXPECT_NE(page2, bucket->active_pages_head);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(1, page->num_allocated_slots);
// Work out a pointer into page2 and free it; and then page1 and free it.
- char* ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page1)) +
+ char* ptr2 = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page1)) +
kPointerOffset;
allocator.root()->Free(ptr2);
- ptr2 = reinterpret_cast<char*>(PartitionRoot::Page::ToPointer(page2)) +
+ ptr2 = reinterpret_cast<char*>(
+ PartitionRoot<ThreadSafe>::Page::ToPointer(page2)) +
kPointerOffset;
allocator.root()->Free(ptr2);
@@ -1127,18 +1195,21 @@ TEST_F(PartitionAllocTest, PageRefilling) {
TEST_F(PartitionAllocTest, PartialPages) {
// Find a size that is backed by a partial partition page.
size_t size = sizeof(void*);
- PartitionRoot::Bucket* bucket = nullptr;
- while (size < kTestMaxAllocation) {
- bucket = &allocator.root()->buckets()[size >> kBucketShift];
+ size_t bucket_index;
+
+ PartitionRoot<ThreadSafe>::Bucket* bucket = nullptr;
+ while (size < 1000u) {
+ bucket_index = SizeToIndex(size + kExtraAllocSize);
+ bucket = &allocator.root()->buckets[bucket_index];
if (bucket->num_system_pages_per_slot_span %
kNumSystemPagesPerPartitionPage)
break;
size += sizeof(void*);
}
- EXPECT_LT(size, kTestMaxAllocation);
+ EXPECT_LT(size, 1000u);
- PartitionRoot::Page* page1 = GetFullPage(size);
- PartitionRoot::Page* page2 = GetFullPage(size);
+ PartitionRoot<ThreadSafe>::Page* page1 = GetFullPage(size);
+ PartitionRoot<ThreadSafe>::Page* page2 = GetFullPage(size);
FreeFullPage(allocator.root(), page2);
FreeFullPage(allocator.root(), page1);
}
@@ -1149,16 +1220,18 @@ TEST_F(PartitionAllocTest, MappingCollision) {
// guard pages.
size_t num_partition_pages_needed = kNumPartitionPagesPerSuperPage - 2;
auto first_super_page_pages =
- std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed);
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_partition_pages_needed);
auto second_super_page_pages =
- std::make_unique<PartitionRoot::Page*[]>(num_partition_pages_needed);
+ std::make_unique<PartitionRoot<ThreadSafe>::Page*[]>(
+ num_partition_pages_needed);
size_t i;
for (i = 0; i < num_partition_pages_needed; ++i)
first_super_page_pages[i] = GetFullPage(kTestAllocSize);
char* page_base = reinterpret_cast<char*>(
- PartitionRoot::Page::ToPointer(first_super_page_pages[0]));
+ PartitionRoot<ThreadSafe>::Page::ToPointer(first_super_page_pages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
page_base -= kPartitionPageSize;
@@ -1180,7 +1253,7 @@ TEST_F(PartitionAllocTest, MappingCollision) {
FreePages(map2, kPageAllocationGranularity);
page_base = reinterpret_cast<char*>(
- PartitionRoot::Page::ToPointer(second_super_page_pages[0]));
+ PartitionRoot<ThreadSafe>::Page::ToPointer(second_super_page_pages[0]));
EXPECT_EQ(kPartitionPageSize,
reinterpret_cast<uintptr_t>(page_base) & kSuperPageOffsetMask);
page_base -= kPartitionPageSize;
@@ -1199,27 +1272,31 @@ TEST_F(PartitionAllocTest, MappingCollision) {
EXPECT_TRUE(TrySetSystemPagesAccess(map2, kPageAllocationGranularity,
PageInaccessible));
- PartitionRoot::Page* page_in_third_super_page = GetFullPage(kTestAllocSize);
+ PartitionRoot<ThreadSafe>::Page* page_in_third_super_page =
+ GetFullPage(kTestAllocSize);
FreePages(map1, kPageAllocationGranularity);
FreePages(map2, kPageAllocationGranularity);
EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
+ PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
kPartitionPageOffsetMask);
// And make sure we really did get a page in a new superpage.
- EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(first_super_page_pages[0])) &
- kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
- kSuperPageBaseMask);
- EXPECT_NE(reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(second_super_page_pages[0])) &
- kSuperPageBaseMask,
- reinterpret_cast<uintptr_t>(
- PartitionRoot::Page::ToPointer(page_in_third_super_page)) &
- kSuperPageBaseMask);
+ EXPECT_NE(
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ first_super_page_pages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
+ kSuperPageBaseMask);
+ EXPECT_NE(
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ second_super_page_pages[0])) &
+ kSuperPageBaseMask,
+ reinterpret_cast<uintptr_t>(PartitionRoot<ThreadSafe>::Page::ToPointer(
+ page_in_third_super_page)) &
+ kSuperPageBaseMask);
FreeFullPage(allocator.root(), page_in_third_super_page);
for (i = 0; i < num_partition_pages_needed; ++i) {
@@ -1232,18 +1309,20 @@ TEST_F(PartitionAllocTest, MappingCollision) {
TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages);
- size_t big_size = allocator.root()->max_allocation - kExtraAllocSize;
- size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift;
- PartitionBucket<base::internal::NotThreadSafe>* bucket =
- &allocator.root()->buckets()[bucket_index];
+ size_t big_size = 1000 - kExtraAllocSize;
+ size_t bucket_index = SizeToIndex(big_size + kExtraAllocSize);
+ PartitionBucket<base::internal::ThreadSafe>* bucket =
+ &allocator.root()->buckets[bucket_index];
void* ptr = allocator.root()->Alloc(big_size, type_name);
EXPECT_TRUE(ptr);
- PartitionRoot::Page* page =
- PartitionRoot::Page::FromPointer(PartitionCookieFreePointerAdjust(ptr));
+ PartitionRoot<ThreadSafe>::Page* page =
+ PartitionRoot<ThreadSafe>::Page::FromPointer(
+ PartitionCookieFreePointerAdjust(ptr));
EXPECT_EQ(nullptr, bucket->empty_pages_head);
EXPECT_EQ(1, page->num_allocated_slots);
- EXPECT_EQ(kPartitionPageSize,
+ size_t expected_committed_size = kPartitionPageSize;
+ EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages);
allocator.root()->Free(ptr);
EXPECT_EQ(0, page->num_allocated_slots);
@@ -1256,11 +1335,11 @@ TEST_F(PartitionAllocTest, FreeCache) {
EXPECT_FALSE(page->freelist_head);
EXPECT_EQ(-1, page->empty_cache_index);
EXPECT_EQ(0, page->num_allocated_slots);
- PartitionBucket<base::internal::NotThreadSafe>* cycle_free_cache_bucket =
- &allocator.root()->buckets()[kTestBucketIndex];
- EXPECT_EQ(
- cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize,
- allocator.root()->total_size_of_committed_pages);
+ PartitionBucket<base::internal::ThreadSafe>* cycle_free_cache_bucket =
+ &allocator.root()->buckets[test_bucket_index_];
+ size_t expected_size =
+ cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize;
+ EXPECT_EQ(expected_size, allocator.root()->total_size_of_committed_pages);
// Check that an allocation works ok whilst in this state (a free'd page
// as the active pages head).
@@ -1276,7 +1355,7 @@ TEST_F(PartitionAllocTest, FreeCache) {
allocator.root()->Free(ptr);
EXPECT_TRUE(page->freelist_head);
}
- EXPECT_EQ(kPartitionPageSize,
+ EXPECT_EQ(expected_committed_size,
allocator.root()->total_size_of_committed_pages);
}
@@ -1284,9 +1363,9 @@ TEST_F(PartitionAllocTest, FreeCache) {
TEST_F(PartitionAllocTest, LostFreePagesBug) {
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr2);
PartitionPage<base::internal::ThreadSafe>* page =
@@ -1301,8 +1380,8 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_EQ(-1, page->num_allocated_slots);
EXPECT_EQ(1, page2->num_allocated_slots);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->empty_pages_head->next_page);
@@ -1311,7 +1390,7 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
EXPECT_TRUE(page->freelist_head);
EXPECT_TRUE(page2->freelist_head);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
EXPECT_FALSE(page->freelist_head);
EXPECT_FALSE(page2->freelist_head);
@@ -1322,23 +1401,23 @@ TEST_F(PartitionAllocTest, LostFreePagesBug) {
bucket->active_pages_head);
// At this moment, we have two decommitted pages, on the empty list.
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_EQ(PartitionPage<base::internal::ThreadSafe>::get_sentinel_page(),
bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
EXPECT_TRUE(bucket->decommitted_pages_head);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
// We're now set up to trigger a historical bug by scanning over the active
// pages list. The current code gets into a different state, but we'll keep
// the test as being an interesting corner case.
- ptr = generic_allocator.root()->Alloc(size, type_name);
+ ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_TRUE(bucket->active_pages_head);
EXPECT_TRUE(bucket->empty_pages_head);
@@ -1421,35 +1500,33 @@ TEST_F(PartitionAllocDeathTest, RepeatedTryReallocReturnNull) {
// malloc(0), which is not good.
TEST_F(PartitionAllocDeathTest, LargeAllocs) {
// Largest alloc.
- EXPECT_DEATH(
- generic_allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
+ EXPECT_DEATH(allocator.root()->Alloc(static_cast<size_t>(-1), type_name), "");
// And the smallest allocation we expect to die.
- EXPECT_DEATH(
- generic_allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
- "");
+ EXPECT_DEATH(allocator.root()->Alloc(kGenericMaxDirectMapped + 1, type_name),
+ "");
}
// Check that our immediate double-free detection works.
TEST_F(PartitionAllocDeathTest, ImmediateDoubleFree) {
- void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
- EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+ EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
// Check that our refcount-based double-free detection works.
TEST_F(PartitionAllocDeathTest, RefcountDoubleFree) {
- void* ptr = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr);
- void* ptr2 = generic_allocator.root()->Alloc(kTestAllocSize, type_name);
+ void* ptr2 = allocator.root()->Alloc(kTestAllocSize, type_name);
EXPECT_TRUE(ptr2);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
// This is not an immediate double-free so our immediate detection won't
// fire. However, it does take the "refcount" of the partition page to -1,
// which is illegal and should be trapped.
- EXPECT_DEATH(generic_allocator.root()->Free(ptr), "");
+ EXPECT_DEATH(allocator.root()->Free(ptr), "");
}
// Check that guard pages are present where expected.
@@ -1474,7 +1551,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
static_assert(kSize > kGenericMaxBucketed,
"allocation not large enough for direct allocation");
size_t size = kSize - kExtraAllocSize;
- void* ptr = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr = allocator.root()->Alloc(size, type_name);
EXPECT_TRUE(ptr);
char* char_ptr = reinterpret_cast<char*>(ptr) - kPointerOffset;
@@ -1482,22 +1559,7 @@ TEST_F(PartitionAllocDeathTest, GuardPages) {
EXPECT_DEATH(*(char_ptr - 1) = 'A', "");
EXPECT_DEATH(*(char_ptr + size + kExtraAllocSize) = 'A', "");
- generic_allocator.root()->Free(ptr);
-}
-
-// Check that a bad free() is caught where the free() refers to an unused
-// partition page of a large allocation.
-TEST_F(PartitionAllocDeathTest, FreeWrongPartitionPage) {
- // This large size will result in a direct mapped allocation with guard
- // pages at either end.
- void* ptr =
- generic_allocator.root()->Alloc(kPartitionPageSize * 2, type_name);
- EXPECT_TRUE(ptr);
- char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize;
-
- EXPECT_DEATH(generic_allocator.root()->Free(badPtr), "");
-
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
#endif // !defined(OS_ANDROID) && !defined(OS_IOS)
@@ -1517,11 +1579,10 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This series of tests checks the active -> empty -> decommitted states.
{
{
- void* ptr =
- generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
+ void* ptr = allocator.root()->Alloc(2048 - kExtraAllocSize, type_name);
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1536,13 +1597,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(1u, stats->num_active_pages);
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(0u, stats->num_decommitted_pages);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1562,12 +1623,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// TODO(crbug.com/722911): Commenting this out causes this test to fail when
// run singly (--gtest_filter=PartitionAllocTest.DumpMemoryStats), but not
// when run with the others (--gtest_filter=PartitionAllocTest.*).
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1588,19 +1649,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
// This test checks for correct empty page list accounting.
{
size_t size = kPartitionPageSize - kExtraAllocSize;
- void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
+ void* ptr1 = allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
- CycleGenericFreeCache(kTestAllocSize);
+ CycleFreeCache(kTestAllocSize);
- ptr1 = generic_allocator.root()->Alloc(size, type_name);
+ ptr1 = allocator.root()->Alloc(size, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1617,7 +1678,7 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_empty_pages);
EXPECT_EQ(1u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
// This test checks for correct direct mapped accounting.
@@ -1628,13 +1689,13 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
(size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask;
size_t real_size_bigger =
(size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask;
- void* ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
+ void* ptr = allocator.root()->Alloc(size_smaller, type_name);
+ void* ptr2 = allocator.root()->Alloc(size_bigger, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1667,26 +1728,26 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr);
// Whilst we're here, allocate again and free with different ordering to
// give a workout to our linked list code.
- ptr = generic_allocator.root()->Alloc(size_smaller, type_name);
- ptr2 = generic_allocator.root()->Alloc(size_bigger, type_name);
- generic_allocator.root()->Free(ptr);
- generic_allocator.root()->Free(ptr2);
+ ptr = allocator.root()->Alloc(size_smaller, type_name);
+ ptr2 = allocator.root()->Alloc(size_bigger, type_name);
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
}
// This test checks large-but-not-quite-direct allocations.
{
constexpr size_t requested_size = 16 * kSystemPageSize;
- void* ptr = generic_allocator.root()->Alloc(requested_size + 1, type_name);
+ void* ptr = allocator.root()->Alloc(requested_size + 1, type_name);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1707,12 +1768,12 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1732,14 +1793,14 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- void* ptr2 = generic_allocator.root()->Alloc(
- requested_size + kSystemPageSize + 1, type_name);
+ void* ptr2 = allocator.root()->Alloc(requested_size + kSystemPageSize + 1,
+ type_name);
EXPECT_EQ(ptr, ptr2);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
size_t slot_size =
@@ -1761,19 +1822,19 @@ TEST_F(PartitionAllocTest, DumpMemoryStats) {
EXPECT_EQ(0u, stats->num_decommitted_pages);
}
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
}
// Tests the API to purge freeable memory.
TEST_F(PartitionAllocTest, Purge) {
char* ptr = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Alloc(2048 - kExtraAllocSize, type_name));
+ allocator.root()->Free(ptr);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1782,11 +1843,11 @@ TEST_F(PartitionAllocTest, Purge) {
EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes);
EXPECT_EQ(kSystemPageSize, stats->resident_bytes);
}
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_FALSE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048);
@@ -1797,12 +1858,12 @@ TEST_F(PartitionAllocTest, Purge) {
}
// Calling purge again here is a good way of testing we didn't mess up the
// state of the free cache ring.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
- char* big_ptr = reinterpret_cast<char*>(
- generic_allocator.root()->Alloc(256 * 1024, type_name));
- generic_allocator.root()->Free(big_ptr);
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ char* big_ptr =
+ reinterpret_cast<char*>(allocator.root()->Alloc(256 * 1024, type_name));
+ allocator.root()->Free(big_ptr);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
CHECK_PAGE_IN_CORE(ptr - kPointerOffset, false);
CHECK_PAGE_IN_CORE(big_ptr - kPointerOffset, false);
@@ -1816,12 +1877,12 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Allocate 3 full slot spans worth of 8192-byte allocations.
// Each slot span for this size is 16384 bytes, or 1 partition page and 2
// slots.
- void* ptr1 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr2 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr5 = generic_allocator.root()->Alloc(size, type_name);
- void* ptr6 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr1 = allocator.root()->Alloc(size, type_name);
+ void* ptr2 = allocator.root()->Alloc(size, type_name);
+ void* ptr3 = allocator.root()->Alloc(size, type_name);
+ void* ptr4 = allocator.root()->Alloc(size, type_name);
+ void* ptr5 = allocator.root()->Alloc(size, type_name);
+ void* ptr6 = allocator.root()->Alloc(size, type_name);
PartitionPage<base::internal::ThreadSafe>* page1 =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
@@ -1840,45 +1901,45 @@ TEST_F(PartitionAllocTest, PreferActiveOverEmpty) {
// Free up the 2nd slot in each slot span.
// This leaves the active list containing 3 pages, each with 1 used and 1
// free slot. The active page will be the one containing ptr1.
- generic_allocator.root()->Free(ptr6);
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr6);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr2);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the middle page in the active list.
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr3);
EXPECT_EQ(page1, bucket->active_pages_head);
// Empty the the first page in the active list -- also the current page.
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
// A good choice here is to re-fill the third page since the first two are
// empty. We used to fail that.
- void* ptr7 = generic_allocator.root()->Alloc(size, type_name);
+ void* ptr7 = allocator.root()->Alloc(size, type_name);
EXPECT_EQ(ptr6, ptr7);
EXPECT_EQ(page3, bucket->active_pages_head);
- generic_allocator.root()->Free(ptr5);
- generic_allocator.root()->Free(ptr7);
+ allocator.root()->Free(ptr5);
+ allocator.root()->Free(ptr7);
}
// Tests the API to purge discardable memory.
TEST_F(PartitionAllocTest, PurgeDiscardable) {
// Free the second of two 4096 byte allocations and then purge.
{
- void* ptr1 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- char* ptr2 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- generic_allocator.root()->Free(ptr2);
+ void* ptr1 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr2 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ allocator.root()->Free(ptr2);
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
EXPECT_EQ(2u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1891,24 +1952,23 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr2 - kPointerOffset, false);
EXPECT_EQ(3u, page->num_unprovisioned_slots);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
// Free the first of two 4096 byte allocations and then purge.
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- generic_allocator.root()->Free(ptr1);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1925,30 +1985,29 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes);
}
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, false);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr2);
}
{
constexpr size_t requested_size = 2.25 * kSystemPageSize;
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- requested_size - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(requested_size - kExtraAllocSize, type_name);
memset(ptr1, 'A', requested_size - kExtraAllocSize);
memset(ptr2, 'A', requested_size - kExtraAllocSize);
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr1);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -1965,16 +2024,15 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 4), true);
- generic_allocator.root()->Free(ptr3);
- generic_allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
}
// When kSystemPageSize = 16384 (as on _MIPS_ARCH_LOONGSON), 64 *
@@ -1985,16 +2043,16 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
// for clarity of purpose and for applicability to more architectures.
#if defined(_MIPS_ARCH_LOONGSON)
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ char* ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(32 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (32 * kSystemPageSize) - kExtraAllocSize);
- generic_allocator.root()->Free(ptr1);
- ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(31 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2008,25 +2066,24 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
}
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 30), true);
CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 31), false);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
#else
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ char* ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(64 * kSystemPageSize) - kExtraAllocSize, type_name));
memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize);
- generic_allocator.root()->Free(ptr1);
- ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
+ allocator.root()->Free(ptr1);
+ ptr1 = reinterpret_cast<char*>(allocator.root()->Alloc(
(61 * kSystemPageSize) - kExtraAllocSize, type_name));
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2042,28 +2099,27 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 60), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 61), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 62), false);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 63), false);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr1);
}
#endif
// This sub-test tests truncation of the provisioned slots in a trickier
// case where the freelist is rewritten.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
@@ -2071,15 +2127,15 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr1);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2099,8 +2155,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(1u, page->num_unprovisioned_slots);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, false);
@@ -2108,29 +2163,29 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), false);
// Let's check we didn't brick the freelist.
- void* ptr1b = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr1b =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr1, ptr1b);
- void* ptr2b = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr2b =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
EXPECT_EQ(ptr2, ptr2b);
EXPECT_FALSE(page->freelist_head);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr3);
}
// This sub-test is similar, but tests a double-truncation.
- generic_allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDecommitEmptyPages);
{
- char* ptr1 = reinterpret_cast<char*>(generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name));
- void* ptr2 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr3 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
- void* ptr4 = generic_allocator.root()->Alloc(
- kSystemPageSize - kExtraAllocSize, type_name);
+ char* ptr1 = reinterpret_cast<char*>(
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name));
+ void* ptr2 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr3 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
+ void* ptr4 =
+ allocator.root()->Alloc(kSystemPageSize - kExtraAllocSize, type_name);
ptr1[0] = 'A';
ptr1[kSystemPageSize] = 'A';
ptr1[kSystemPageSize * 2] = 'A';
@@ -2138,14 +2193,14 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
PartitionPage<base::internal::ThreadSafe>* page =
PartitionPage<base::internal::ThreadSafe>::FromPointer(
PartitionCookieFreePointerAdjust(ptr1));
- generic_allocator.root()->Free(ptr4);
- generic_allocator.root()->Free(ptr3);
+ allocator.root()->Free(ptr4);
+ allocator.root()->Free(ptr3);
EXPECT_EQ(0u, page->num_unprovisioned_slots);
{
MockPartitionStatsDumper dumper;
- generic_allocator.root()->DumpStats("mock_generic_allocator",
- false /* detailed dump */, &dumper);
+ allocator.root()->DumpStats("mock_allocator", false /* detailed dump */,
+ &dumper);
EXPECT_TRUE(dumper.IsMemoryAllocationRecorded());
const PartitionBucketMemoryStats* stats =
@@ -2161,8 +2216,7 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 2), true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + (kSystemPageSize * 3), true);
- generic_allocator.root()->PurgeMemory(
- PartitionPurgeDiscardUnusedSystemPages);
+ allocator.root()->PurgeMemory(PartitionPurgeDiscardUnusedSystemPages);
EXPECT_EQ(2u, page->num_unprovisioned_slots);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset, true);
CHECK_PAGE_IN_CORE(ptr1 - kPointerOffset + kSystemPageSize, true);
@@ -2171,8 +2225,8 @@ TEST_F(PartitionAllocTest, PurgeDiscardable) {
EXPECT_FALSE(page->freelist_head);
- generic_allocator.root()->Free(ptr1);
- generic_allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr1);
+ allocator.root()->Free(ptr2);
}
}
@@ -2183,35 +2237,35 @@ TEST_F(PartitionAllocTest, ReallocMovesCookies) {
// and we can track the "raw" size. See https://crbug.com/709271
static constexpr size_t kSize =
base::kMaxSystemPagesPerSlotSpan * base::kSystemPageSize;
- void* ptr = generic_allocator.root()->Alloc(kSize + 1, type_name);
+ void* ptr = allocator.root()->Alloc(kSize + 1, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 1);
- ptr = generic_allocator.root()->Realloc(ptr, kSize + 2, type_name);
+ ptr = allocator.root()->Realloc(ptr, kSize + 2, type_name);
EXPECT_TRUE(ptr);
memset(ptr, 0xbd, kSize + 2);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, SmallReallocDoesNotMoveTrailingCookie) {
// For crbug.com/781473
static constexpr size_t kSize = 264;
- void* ptr = generic_allocator.root()->Alloc(kSize, type_name);
+ void* ptr = allocator.root()->Alloc(kSize, type_name);
EXPECT_TRUE(ptr);
- ptr = generic_allocator.root()->Realloc(ptr, kSize + 16, type_name);
+ ptr = allocator.root()->Realloc(ptr, kSize + 16, type_name);
EXPECT_TRUE(ptr);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, ZeroFill) {
constexpr static size_t kAllZerosSentinel =
std::numeric_limits<size_t>::max();
for (size_t size : kTestSizes) {
- char* p = static_cast<char*>(PartitionAllocGenericFlags(
- generic_allocator.root(), PartitionAllocZeroFill, size, nullptr));
+ char* p = static_cast<char*>(
+ allocator.root()->AllocFlags(PartitionAllocZeroFill, size, nullptr));
size_t non_zero_position = kAllZerosSentinel;
for (size_t i = 0; i < size; ++i) {
if (0 != p[i]) {
@@ -2221,12 +2275,12 @@ TEST_F(PartitionAllocTest, ZeroFill) {
}
EXPECT_EQ(kAllZerosSentinel, non_zero_position)
<< "test allocation size: " << size;
- generic_allocator.root()->Free(p);
+ allocator.root()->Free(p);
}
for (int i = 0; i < 10; ++i) {
SCOPED_TRACE(i);
- AllocateRandomly(generic_allocator.root(), 250, PartitionAllocZeroFill);
+ AllocateRandomly(allocator.root(), 250, PartitionAllocZeroFill);
}
}
@@ -2236,16 +2290,14 @@ TEST_F(PartitionAllocTest, Bug_897585) {
// test case in the indicated bug.
size_t kInitialSize = 983040;
size_t kDesiredSize = 983100;
- void* ptr = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, kInitialSize,
- nullptr);
+ void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ kInitialSize, nullptr);
ASSERT_NE(nullptr, ptr);
- ptr = PartitionReallocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, ptr,
- kDesiredSize, nullptr);
+ ptr = allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr,
+ kDesiredSize, nullptr);
ASSERT_NE(nullptr, ptr);
memset(ptr, 0xbd, kDesiredSize);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
}
TEST_F(PartitionAllocTest, OverrideHooks) {
@@ -2282,29 +2334,90 @@ TEST_F(PartitionAllocTest, OverrideHooks) {
return false;
});
- void* ptr = PartitionAllocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull,
- kOverriddenSize, kOverriddenType);
+ void* ptr = allocator.root()->AllocFlags(PartitionAllocReturnNull,
+ kOverriddenSize, kOverriddenType);
ASSERT_EQ(ptr, overridden_allocation);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
EXPECT_TRUE(free_called);
// overridden_allocation has not actually been freed so we can now immediately
// realloc it.
free_called = false;
- ptr = PartitionReallocGenericFlags(generic_allocator.root(),
- PartitionAllocReturnNull, ptr, 1, nullptr);
+ ptr =
+ allocator.root()->ReallocFlags(PartitionAllocReturnNull, ptr, 1, nullptr);
ASSERT_NE(ptr, nullptr);
EXPECT_NE(ptr, overridden_allocation);
EXPECT_TRUE(free_called);
EXPECT_EQ(*(char*)ptr, kOverriddenChar);
- generic_allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr);
PartitionAllocHooks::SetOverrideHooks(nullptr, nullptr, nullptr);
free(overridden_allocation);
}
+TEST_F(PartitionAllocTest, Alignment) {
+ std::vector<void*> allocated_ptrs;
+
+ for (size_t size = 1; size <= base::kSystemPageSize; size <<= 1) {
+ // All allocations which are not direct-mapped occupy contiguous slots of a
+ // span, starting on a page boundary. This means that allocations are first
+ // rounded up to the nearest bucket size, then have an address of the form:
+ //
+ // (page-aligned address) + i * bucket_size.
+#if DCHECK_IS_ON()
+ // When DCHECK_IS_ON(), a kCookieSize (16) cookie is added on both sides
+ // before rounding up the allocation size. The returned pointer points after
+ // the cookie.
+ //
+ // All in all, a power-of-two allocation is aligned on
+ // min(16, requested_size).
+ size_t expected_alignment = std::min(size, static_cast<size_t>(16));
+#else
+ // All powers of two are bucket sizes, meaning that all power of two
+ // allocations smaller than a page will be aligned on the allocation size.
+ size_t expected_alignment = size;
+#endif
+ for (int index = 0; index < 3; index++) {
+ void* ptr = allocator.root()->Alloc(size, "");
+ allocated_ptrs.push_back(ptr);
+ EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) % expected_alignment)
+ << index << "-th allocation of size = " << size;
+ }
+ }
+
+ for (void* ptr : allocated_ptrs)
+ allocator.root()->Free(ptr);
+}
+
+TEST_F(PartitionAllocTest, FundamentalAlignment) {
+ // See the test above for details. Essentially, checking the bucket size is
+ // sufficient to ensure that alignment will always be respected, as long as
+ // the fundamental alignment is <= 16 bytes.
+ size_t fundamental_alignment = alignof(std::max_align_t);
+ for (size_t size = 0; size < base::kSystemPageSize; size++) {
+ // Allocate several pointers, as the first one in use in a size class will
+ // be aligned on a page boundary.
+ void* ptr = allocator.root()->Alloc(size, "");
+ void* ptr2 = allocator.root()->Alloc(size, "");
+ void* ptr3 = allocator.root()->Alloc(size, "");
+
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr2) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr3) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+
+ EXPECT_EQ(PartitionAllocGetSize<ThreadSafe>(ptr) % fundamental_alignment,
+ static_cast<uintptr_t>(0));
+
+ allocator.root()->Free(ptr);
+ allocator.root()->Free(ptr2);
+ allocator.root()->Free(ptr3);
+ }
+}
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.cc b/chromium/base/allocator/partition_allocator/partition_bucket.cc
index a52efccbf6a..f73a43eef8d 100644
--- a/chromium/base/allocator/partition_allocator/partition_bucket.cc
+++ b/chromium/base/allocator/partition_allocator/partition_bucket.cc
@@ -4,13 +4,18 @@
#include "base/allocator/partition_allocator/partition_bucket.h"
+#include "base/allocator/partition_allocator/address_pool_manager.h"
#include "base/allocator/partition_allocator/oom.h"
#include "base/allocator/partition_allocator/page_allocator.h"
+#include "base/allocator/partition_allocator/partition_address_space.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
#include "base/allocator/partition_allocator/partition_oom.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
+
#include "base/check.h"
#include "build/build_config.h"
@@ -21,7 +26,7 @@ namespace {
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
size_t raw_size) {
size_t size = PartitionBucket<thread_safe>::get_direct_map_size(raw_size);
@@ -41,9 +46,19 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
map_size += kPageAllocationGranularityOffsetMask;
map_size &= kPageAllocationGranularityBaseMask;
- char* ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size,
- kSuperPageSize, PageReadWrite,
- PageTag::kPartitionAlloc));
+ char* ptr = nullptr;
+ if (IsPartitionAllocGigaCageEnabled()) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ ptr = internal::AddressPoolManager::GetInstance()->Alloc(GetDirectMapPool(),
+ map_size);
+#else
+ NOTREACHED();
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ } else {
+ ptr = reinterpret_cast<char*>(AllocPages(nullptr, map_size, kSuperPageSize,
+ PageReadWrite,
+ PageTag::kPartitionAlloc));
+ }
if (UNLIKELY(!ptr))
return nullptr;
@@ -65,29 +80,29 @@ ALWAYS_INLINE PartitionPage<thread_safe>* PartitionDirectMap(
extent->root = root;
// The new structures are all located inside a fresh system page so they
// will all be zeroed out. These DCHECKs are for documentation.
- DCHECK(!extent->super_page_base);
- DCHECK(!extent->super_pages_end);
- DCHECK(!extent->next);
+ PA_DCHECK(!extent->super_page_base);
+ PA_DCHECK(!extent->super_pages_end);
+ PA_DCHECK(!extent->next);
PartitionPage<thread_safe>* page =
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(slot);
auto* bucket = reinterpret_cast<PartitionBucket<thread_safe>*>(
reinterpret_cast<char*>(page) + (kPageMetadataSize * 2));
- DCHECK(!page->next_page);
- DCHECK(!page->num_allocated_slots);
- DCHECK(!page->num_unprovisioned_slots);
- DCHECK(!page->page_offset);
- DCHECK(!page->empty_cache_index);
+ PA_DCHECK(!page->next_page);
+ PA_DCHECK(!page->num_allocated_slots);
+ PA_DCHECK(!page->num_unprovisioned_slots);
+ PA_DCHECK(!page->page_offset);
+ PA_DCHECK(!page->empty_cache_index);
page->bucket = bucket;
page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot);
PartitionFreelistEntry* next_entry =
reinterpret_cast<PartitionFreelistEntry*>(slot);
next_entry->next = PartitionFreelistEntry::Encode(nullptr);
- DCHECK(!bucket->active_pages_head);
- DCHECK(!bucket->empty_pages_head);
- DCHECK(!bucket->decommitted_pages_head);
- DCHECK(!bucket->num_system_pages_per_slot_span);
- DCHECK(!bucket->num_full_pages);
+ PA_DCHECK(!bucket->active_pages_head);
+ PA_DCHECK(!bucket->empty_pages_head);
+ PA_DCHECK(!bucket->decommitted_pages_head);
+ PA_DCHECK(!bucket->num_system_pages_per_slot_span);
+ PA_DCHECK(!bucket->num_full_pages);
bucket->slot_size = size;
PartitionDirectMapExtent<thread_safe>* map_extent =
@@ -142,15 +157,15 @@ uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
if (slot_size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) {
// TODO(ajwong): Why is there a DCHECK here for this?
// http://crbug.com/776537
- DCHECK(!(slot_size % kSystemPageSize));
+ PA_DCHECK(!(slot_size % kSystemPageSize));
best_pages = static_cast<uint16_t>(slot_size / kSystemPageSize);
// TODO(ajwong): Should this be checking against
// kMaxSystemPagesPerSlotSpan or numeric_limits<uint8_t>::max?
// http://crbug.com/776537
- CHECK(best_pages < (1 << 8));
+ PA_CHECK(best_pages < (1 << 8));
return static_cast<uint8_t>(best_pages);
}
- DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
+ PA_DCHECK(slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize);
for (uint16_t i = kNumSystemPagesPerPartitionPage - 1;
i <= kMaxSystemPagesPerSlotSpan; ++i) {
size_t page_size = kSystemPageSize * i;
@@ -176,8 +191,8 @@ uint8_t PartitionBucket<thread_safe>::get_system_pages_per_slot_span() {
best_pages = i;
}
}
- DCHECK(best_pages > 0);
- CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
+ PA_DCHECK(best_pages > 0);
+ PA_CHECK(best_pages <= kMaxSystemPagesPerSlotSpan);
return static_cast<uint8_t>(best_pages);
}
@@ -198,14 +213,14 @@ NOINLINE void PartitionBucket<thread_safe>::OnFull() {
template <bool thread_safe>
ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages) {
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
- kPartitionPageSize));
- DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
- kPartitionPageSize));
- DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) %
+ kPartitionPageSize));
+ PA_DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) %
+ kPartitionPageSize));
+ PA_DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage);
size_t total_size = kPartitionPageSize * num_partition_pages;
size_t num_partition_pages_left =
(root->next_partition_page_end - root->next_partition_page) >>
@@ -229,9 +244,19 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
// page table bloat and not fragmenting address spaces in 32 bit
// architectures.
char* requested_address = root->next_super_page;
- char* super_page = reinterpret_cast<char*>(
- AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
- PageReadWrite, PageTag::kPartitionAlloc));
+ char* super_page = nullptr;
+ if (IsPartitionAllocGigaCageEnabled()) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ super_page = AddressPoolManager::GetInstance()->Alloc(GetNormalBucketPool(),
+ kSuperPageSize);
+#else
+ NOTREACHED();
+#endif
+ } else {
+ super_page = reinterpret_cast<char*>(
+ AllocPages(requested_address, kSuperPageSize, kSuperPageSize,
+ PageReadWrite, PageTag::kPartitionAlloc));
+ }
if (UNLIKELY(!super_page))
return nullptr;
@@ -294,13 +319,13 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
PartitionSuperPageExtentEntry<thread_safe>* current_extent =
root->current_extent;
- bool is_new_extent = (super_page != requested_address);
+ const bool is_new_extent = super_page != requested_address;
if (UNLIKELY(is_new_extent)) {
if (UNLIKELY(!current_extent)) {
- DCHECK(!root->first_extent);
+ PA_DCHECK(!root->first_extent);
root->first_extent = latest_extent;
} else {
- DCHECK(current_extent->super_page_base);
+ PA_DCHECK(current_extent->super_page_base);
current_extent->next = latest_extent;
}
root->current_extent = latest_extent;
@@ -309,10 +334,10 @@ ALWAYS_INLINE void* PartitionBucket<thread_safe>::AllocNewSlotSpan(
} else {
// We allocated next to an existing extent so just nudge the size up a
// little.
- DCHECK(current_extent->super_pages_end);
+ PA_DCHECK(current_extent->super_pages_end);
current_extent->super_pages_end += kSuperPageSize;
- DCHECK(ret >= current_extent->super_page_base &&
- ret < current_extent->super_pages_end);
+ PA_DCHECK(ret >= current_extent->super_page_base &&
+ ret < current_extent->super_pages_end);
}
return ret;
}
@@ -334,12 +359,6 @@ ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
page->Reset();
- // If this page has just a single slot, do not set up page offsets for any
- // page metadata other than the first one. This ensures that attempts to
- // touch invalid page metadata fail.
- if (page->num_unprovisioned_slots == 1)
- return;
-
uint16_t num_partition_pages = get_pages_per_slot_span();
char* page_char_ptr = reinterpret_cast<char*>(page);
for (uint16_t i = 1; i < num_partition_pages; ++i) {
@@ -353,16 +372,16 @@ ALWAYS_INLINE void PartitionBucket<thread_safe>::InitializeSlotSpan(
template <bool thread_safe>
ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
PartitionPage<thread_safe>* page) {
- DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(page != PartitionPage<thread_safe>::get_sentinel_page());
uint16_t num_slots = page->num_unprovisioned_slots;
- DCHECK(num_slots);
+ PA_DCHECK(num_slots);
// We should only get here when _every_ slot is either used or unprovisioned.
// (The third state is "on the freelist". If we have a non-empty freelist, we
// should not get here.)
- DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
+ PA_DCHECK(num_slots + page->num_allocated_slots == get_slots_per_span());
// Similarly, make explicitly sure that the freelist is empty.
- DCHECK(!page->freelist_head);
- DCHECK(page->num_allocated_slots >= 0);
+ PA_DCHECK(!page->freelist_head);
+ PA_DCHECK(page->num_allocated_slots >= 0);
size_t size = slot_size;
char* base =
@@ -396,7 +415,7 @@ ALWAYS_INLINE char* PartitionBucket<thread_safe>::AllocAndFillFreelist(
// We always return an object slot -- that's the +1 below.
// We do not neccessarily create any new freelist entries, because we cross
// sub page boundaries frequently for large bucket sizes.
- DCHECK(num_new_freelist_entries + 1 <= num_slots);
+ PA_DCHECK(num_new_freelist_entries + 1 <= num_slots);
num_slots -= (num_new_freelist_entries + 1);
page->num_unprovisioned_slots = num_slots;
page->num_allocated_slots++;
@@ -429,9 +448,9 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
for (; page; page = next_page) {
next_page = page->next_page;
- DCHECK(page->bucket == this);
- DCHECK(page != empty_pages_head);
- DCHECK(page != decommitted_pages_head);
+ PA_DCHECK(page->bucket == this);
+ PA_DCHECK(page != empty_pages_head);
+ PA_DCHECK(page != decommitted_pages_head);
if (LIKELY(page->is_active())) {
// This page is usable because it has freelist entries, or has
@@ -448,7 +467,7 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
page->next_page = decommitted_pages_head;
decommitted_pages_head = page;
} else {
- DCHECK(page->is_full());
+ PA_DCHECK(page->is_full());
// If we get here, we found a full page. Skip over it too, and also
// tag it as full (via a negative value). We need it tagged so that
// free'ing can tell, and move it back into the active page list.
@@ -469,12 +488,12 @@ bool PartitionBucket<thread_safe>::SetNewActivePage() {
template <bool thread_safe>
void* PartitionBucket<thread_safe>::SlowPathAlloc(
- PartitionRootBase<thread_safe>* root,
+ PartitionRoot<thread_safe>* root,
int flags,
size_t size,
bool* is_already_zeroed) {
// The slow path is called when the freelist is empty.
- DCHECK(!active_pages_head->freelist_head);
+ PA_DCHECK(!active_pages_head->freelist_head);
PartitionPage<thread_safe>* new_page = nullptr;
*is_already_zeroed = false;
@@ -490,10 +509,10 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// the empty or decommitted lists which affects the subsequent conditional.
bool return_null = flags & PartitionAllocReturnNull;
if (UNLIKELY(is_direct_mapped())) {
- DCHECK(size > kGenericMaxBucketed);
- DCHECK(this == get_sentinel_bucket());
- DCHECK(active_pages_head ==
- PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(size > kGenericMaxBucketed);
+ PA_DCHECK(this == get_sentinel_bucket());
+ PA_DCHECK(active_pages_head ==
+ PartitionPage<thread_safe>::get_sentinel_page());
if (size > kGenericMaxDirectMapped) {
if (return_null)
return nullptr;
@@ -504,29 +523,29 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
} else if (LIKELY(SetNewActivePage())) {
// First, did we find an active page in the active pages list?
new_page = active_pages_head;
- DCHECK(new_page->is_active());
+ PA_DCHECK(new_page->is_active());
} else if (LIKELY(empty_pages_head != nullptr) ||
LIKELY(decommitted_pages_head != nullptr)) {
// Second, look in our lists of empty and decommitted pages.
// Check empty pages first, which are preferred, but beware that an
// empty page might have been decommitted.
while (LIKELY((new_page = empty_pages_head) != nullptr)) {
- DCHECK(new_page->bucket == this);
- DCHECK(new_page->is_empty() || new_page->is_decommitted());
+ PA_DCHECK(new_page->bucket == this);
+ PA_DCHECK(new_page->is_empty() || new_page->is_decommitted());
empty_pages_head = new_page->next_page;
// Accept the empty page unless it got decommitted.
if (new_page->freelist_head) {
new_page->next_page = nullptr;
break;
}
- DCHECK(new_page->is_decommitted());
+ PA_DCHECK(new_page->is_decommitted());
new_page->next_page = decommitted_pages_head;
decommitted_pages_head = new_page;
}
if (UNLIKELY(!new_page) && LIKELY(decommitted_pages_head != nullptr)) {
new_page = decommitted_pages_head;
- DCHECK(new_page->bucket == this);
- DCHECK(new_page->is_decommitted());
+ PA_DCHECK(new_page->bucket == this);
+ PA_DCHECK(new_page->is_decommitted());
decommitted_pages_head = new_page->next_page;
void* addr = PartitionPage<thread_safe>::ToPointer(new_page);
root->RecommitSystemPages(addr, new_page->bucket->get_bytes_per_span());
@@ -535,7 +554,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// not be zeroed.
// *is_already_zeroed = true;
}
- DCHECK(new_page);
+ PA_DCHECK(new_page);
} else {
// Third. If we get here, we need a brand new page.
uint16_t num_partition_pages = get_pages_per_slot_span();
@@ -552,8 +571,8 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// Bail if we had a memory allocation failure.
if (UNLIKELY(!new_page)) {
- DCHECK(active_pages_head ==
- PartitionPage<thread_safe>::get_sentinel_page());
+ PA_DCHECK(active_pages_head ==
+ PartitionPage<thread_safe>::get_sentinel_page());
if (return_null)
return nullptr;
root->OutOfMemory(size);
@@ -563,7 +582,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
// It seems like in many of the conditional branches above, |this| ==
// |new_page->bucket|. Maybe pull this into another function?
PartitionBucket* bucket = new_page->bucket;
- DCHECK(bucket != get_sentinel_bucket());
+ PA_DCHECK(bucket != get_sentinel_bucket());
bucket->active_pages_head = new_page;
new_page->set_raw_size(size);
@@ -578,7 +597,7 @@ void* PartitionBucket<thread_safe>::SlowPathAlloc(
return entry;
}
// Otherwise, we need to build the freelist.
- DCHECK(new_page->num_unprovisioned_slots);
+ PA_DCHECK(new_page->num_unprovisioned_slots);
return AllocAndFillFreelist(new_page);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_bucket.h b/chromium/base/allocator/partition_allocator/partition_bucket.h
index 608b81b0dd8..030a98b1c9a 100644
--- a/chromium/base/allocator/partition_allocator/partition_bucket.h
+++ b/chromium/base/allocator/partition_allocator/partition_bucket.h
@@ -8,11 +8,12 @@
#include <stddef.h>
#include <stdint.h>
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/base_export.h"
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/thread_annotations.h"
namespace base {
@@ -36,10 +37,10 @@ struct PartitionBucket {
// requesting (a) new page(s) from the operating system, or false otherwise.
// This enables an optimization for when callers use |PartitionAllocZeroFill|:
// there is no need to call memset on fresh pages; the OS has already zeroed
- // them. (See |PartitionRootBase::AllocFromBucket|.)
+ // them. (See |PartitionRoot::AllocFromBucket|.)
//
// Note the matching Free() functions are in PartitionPage.
- BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRootBase<thread_safe>* root,
+ BASE_EXPORT NOINLINE void* SlowPathAlloc(PartitionRoot<thread_safe>* root,
int flags,
size_t size,
bool* is_already_zeroed)
@@ -63,7 +64,7 @@ struct PartitionBucket {
// Caller must check that the size is not above the kGenericMaxDirectMapped
// limit before calling. This also guards against integer overflow in the
// calculation here.
- DCHECK(size <= kGenericMaxDirectMapped);
+ PA_DCHECK(size <= kGenericMaxDirectMapped);
return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
}
@@ -101,7 +102,7 @@ struct PartitionBucket {
// Allocates a new slot span with size |num_partition_pages| from the
// current extent. Metadata within this slot span will be uninitialized.
// Returns nullptr on error.
- ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRootBase<thread_safe>* root,
+ ALWAYS_INLINE void* AllocNewSlotSpan(PartitionRoot<thread_safe>* root,
int flags,
uint16_t num_partition_pages)
EXCLUSIVE_LOCKS_REQUIRED(root->lock_);
diff --git a/chromium/base/allocator/partition_allocator/partition_cookie.h b/chromium/base/allocator/partition_allocator/partition_cookie.h
index 750ac6154f8..ca29ab64f73 100644
--- a/chromium/base/allocator/partition_allocator/partition_cookie.h
+++ b/chromium/base/allocator/partition_allocator/partition_cookie.h
@@ -5,65 +5,90 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_COOKIE_H_
+#include "base/allocator/buildflags.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
namespace base {
namespace internal {
-#if DCHECK_IS_ON()
// Handles alignment up to XMM instructions on Intel.
static constexpr size_t kCookieSize = 16;
+// Cookies are enabled for debug builds, unless PartitionAlloc is used as the
+// malloc() implementation. This is a temporary workaround the alignment issues
+// caused by cookies. With them, PartitionAlloc cannot support posix_memalign(),
+// which is required.
+//
+// TODO(lizeb): Support cookies when used as the malloc() implementation.
+#if DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
static constexpr unsigned char kCookieValue[kCookieSize] = {
0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
-#endif
ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {
-#if DCHECK_IS_ON()
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
- DCHECK(*cookie_ptr == kCookieValue[i]);
-#endif
+ PA_DCHECK(*cookie_ptr == kCookieValue[i]);
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
-#if DCHECK_IS_ON()
// Add space for cookies, checking for integer overflow. TODO(palmer):
// Investigate the performance and code size implications of using
// CheckedNumeric throughout PA.
- DCHECK(size + (2 * kCookieSize) > size);
+ PA_DCHECK(size + (2 * kCookieSize) > size);
size += 2 * kCookieSize;
-#endif
return size;
}
ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
-#if DCHECK_IS_ON()
// The value given to the application is actually just after the cookie.
ptr = static_cast<char*>(ptr) - kCookieSize;
-#endif
return ptr;
}
ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
-#if DCHECK_IS_ON()
// Remove space for cookies.
- DCHECK(size >= 2 * kCookieSize);
+ PA_DCHECK(size >= 2 * kCookieSize);
size -= 2 * kCookieSize;
-#endif
return size;
}
-ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
+ALWAYS_INLINE size_t PartitionCookieOffsetSubtract(size_t offset) {
#if DCHECK_IS_ON()
+ // Convert offset from the beginning of the allocated slot to offset from
+ // the value given to the application, which is just after the cookie.
+ offset -= kCookieSize;
+#endif
+ return offset;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {
unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr);
for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr)
*cookie_ptr = kCookieValue[i];
-#endif
}
+#else
+
+ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) {}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) {
+ return size;
+}
+
+ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) {
+ return ptr;
+}
+
+ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) {
+ return size;
+}
+
+ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) {}
+#endif // DCHECK_IS_ON() && !BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
+
} // namespace internal
} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
index c9b6d13b6f0..494f23526f0 100644
--- a/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
+++ b/chromium/base/allocator/partition_allocator/partition_direct_map_extent.h
@@ -5,9 +5,10 @@
#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_DIRECT_MAP_EXTENT_H_
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/logging.h"
+#include "base/check.h"
namespace base {
namespace internal {
@@ -27,7 +28,7 @@ template <bool thread_safe>
ALWAYS_INLINE PartitionDirectMapExtent<thread_safe>*
PartitionDirectMapExtent<thread_safe>::FromPage(
PartitionPage<thread_safe>* page) {
- DCHECK(page->bucket->is_direct_mapped());
+ PA_DCHECK(page->bucket->is_direct_mapped());
return reinterpret_cast<PartitionDirectMapExtent<thread_safe>*>(
reinterpret_cast<char*>(page) + 3 * kPageMetadataSize);
}
diff --git a/chromium/base/allocator/partition_allocator/partition_page.cc b/chromium/base/allocator/partition_allocator/partition_page.cc
index b094abc05b6..e40230c201d 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.cc
+++ b/chromium/base/allocator/partition_allocator/partition_page.cc
@@ -4,9 +4,15 @@
#include "base/allocator/partition_allocator/partition_page.h"
+#include "base/allocator/partition_allocator/address_pool_manager.h"
+#include "base/allocator/partition_allocator/partition_alloc.h"
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
+#include "base/allocator/partition_allocator/partition_alloc_features.h"
#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_root_base.h"
#include "base/check.h"
+#include "base/feature_list.h"
+#include "base/notreached.h"
+#include "build/build_config.h"
namespace base {
namespace internal {
@@ -16,21 +22,20 @@ namespace {
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap
PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
- PartitionRootBase<thread_safe>* root =
- PartitionRootBase<thread_safe>::FromPage(page);
+ PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
const PartitionDirectMapExtent<thread_safe>* extent =
PartitionDirectMapExtent<thread_safe>::FromPage(page);
size_t unmap_size = extent->map_size;
// Maintain the doubly-linked list of all direct mappings.
if (extent->prev_extent) {
- DCHECK(extent->prev_extent->next_extent == extent);
+ PA_DCHECK(extent->prev_extent->next_extent == extent);
extent->prev_extent->next_extent = extent->next_extent;
} else {
root->direct_map_list = extent->next_extent;
}
if (extent->next_extent) {
- DCHECK(extent->next_extent->prev_extent == extent);
+ PA_DCHECK(extent->next_extent->prev_extent == extent);
extent->next_extent->prev_extent = extent->prev_extent;
}
@@ -40,10 +45,10 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
size_t uncommitted_page_size = page->bucket->slot_size + kSystemPageSize;
root->DecreaseCommittedPages(uncommitted_page_size);
- DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
+ PA_DCHECK(root->total_size_of_direct_mapped_pages >= uncommitted_page_size);
root->total_size_of_direct_mapped_pages -= uncommitted_page_size;
- DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
+ PA_DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask));
char* ptr =
reinterpret_cast<char*>(PartitionPage<thread_safe>::ToPointer(page));
@@ -56,16 +61,16 @@ PartitionDirectUnmap(PartitionPage<thread_safe>* page) {
template <bool thread_safe>
ALWAYS_INLINE void PartitionRegisterEmptyPage(
PartitionPage<thread_safe>* page) {
- DCHECK(page->is_empty());
- PartitionRootBase<thread_safe>* root =
- PartitionRootBase<thread_safe>::FromPage(page);
+ PA_DCHECK(page->is_empty());
+ PartitionRoot<thread_safe>* root = PartitionRoot<thread_safe>::FromPage(page);
root->lock_.AssertAcquired();
// If the page is already registered as empty, give it another life.
if (page->empty_cache_index != -1) {
- DCHECK(page->empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans);
- DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
+ PA_DCHECK(page->empty_cache_index >= 0);
+ PA_DCHECK(static_cast<unsigned>(page->empty_cache_index) <
+ kMaxFreeableSpans);
+ PA_DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page);
root->global_empty_page_ring[page->empty_cache_index] = nullptr;
}
@@ -103,7 +108,7 @@ PartitionPage<thread_safe>* PartitionPage<thread_safe>::get_sentinel_page() {
template <bool thread_safe>
DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
- DCHECK(this != get_sentinel_page());
+ PA_DCHECK(this != get_sentinel_page());
if (LIKELY(num_allocated_slots == 0)) {
// Page became fully unused.
if (UNLIKELY(bucket->is_direct_mapped())) {
@@ -113,27 +118,27 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
// the empty list as a force towards defragmentation.
if (LIKELY(this == bucket->active_pages_head))
bucket->SetNewActivePage();
- DCHECK(bucket->active_pages_head != this);
+ PA_DCHECK(bucket->active_pages_head != this);
set_raw_size(0);
- DCHECK(!get_raw_size());
+ PA_DCHECK(!get_raw_size());
PartitionRegisterEmptyPage(this);
} else {
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(!bucket->is_direct_mapped());
// Ensure that the page is full. That's the only valid case if we
// arrive here.
- DCHECK(num_allocated_slots < 0);
+ PA_DCHECK(num_allocated_slots < 0);
// A transition of num_allocated_slots from 0 to -1 is not legal, and
// likely indicates a double-free.
- CHECK(num_allocated_slots != -1);
+ PA_CHECK(num_allocated_slots != -1);
num_allocated_slots = -num_allocated_slots - 2;
- DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
+ PA_DCHECK(num_allocated_slots == bucket->get_slots_per_span() - 1);
// Fully used page became partially used. It must be put back on the
// non-full page list. Also make it the current page to increase the
// chances of it being filled up again. The old current page will be
// the next page.
- DCHECK(!next_page);
+ PA_DCHECK(!next_page);
if (LIKELY(bucket->active_pages_head != get_sentinel_page()))
next_page = bucket->active_pages_head;
bucket->active_pages_head = this;
@@ -147,11 +152,10 @@ DeferredUnmap PartitionPage<thread_safe>::FreeSlowPath() {
}
template <bool thread_safe>
-void PartitionPage<thread_safe>::Decommit(
- PartitionRootBase<thread_safe>* root) {
+void PartitionPage<thread_safe>::Decommit(PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
- DCHECK(is_empty());
- DCHECK(!bucket->is_direct_mapped());
+ PA_DCHECK(is_empty());
+ PA_DCHECK(!bucket->is_direct_mapped());
void* addr = PartitionPage::ToPointer(this);
root->DecommitSystemPages(addr, bucket->get_bytes_per_span());
@@ -163,23 +167,33 @@ void PartitionPage<thread_safe>::Decommit(
// 32 bytes in size.
freelist_head = nullptr;
num_unprovisioned_slots = 0;
- DCHECK(is_decommitted());
+ PA_DCHECK(is_decommitted());
}
template <bool thread_safe>
void PartitionPage<thread_safe>::DecommitIfPossible(
- PartitionRootBase<thread_safe>* root) {
+ PartitionRoot<thread_safe>* root) {
root->lock_.AssertAcquired();
- DCHECK(empty_cache_index >= 0);
- DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
- DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
+ PA_DCHECK(empty_cache_index >= 0);
+ PA_DCHECK(static_cast<unsigned>(empty_cache_index) < kMaxFreeableSpans);
+ PA_DCHECK(this == root->global_empty_page_ring[empty_cache_index]);
empty_cache_index = -1;
if (is_empty())
Decommit(root);
}
void DeferredUnmap::Unmap() {
- FreePages(ptr, size);
+ PA_DCHECK(ptr && size > 0);
+ if (IsManagedByPartitionAlloc(ptr)) {
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ internal::AddressPoolManager::GetInstance()->Free(
+ internal::GetDirectMapPool(), ptr, size);
+#else
+ NOTREACHED();
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ } else {
+ FreePages(ptr, size);
+ }
}
template struct PartitionPage<ThreadSafe>;
diff --git a/chromium/base/allocator/partition_allocator/partition_page.h b/chromium/base/allocator/partition_allocator/partition_page.h
index cc04e78fb50..642b3d93115 100644
--- a/chromium/base/allocator/partition_allocator/partition_page.h
+++ b/chromium/base/allocator/partition_allocator/partition_page.h
@@ -7,13 +7,14 @@
#include <string.h>
+#include "base/allocator/partition_allocator/partition_alloc_check.h"
#include "base/allocator/partition_allocator/partition_alloc_constants.h"
#include "base/allocator/partition_allocator/partition_alloc_forward.h"
#include "base/allocator/partition_allocator/partition_bucket.h"
#include "base/allocator/partition_allocator/partition_cookie.h"
#include "base/allocator/partition_allocator/partition_freelist_entry.h"
#include "base/allocator/partition_allocator/random.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/thread_annotations.h"
namespace base {
@@ -81,8 +82,8 @@ struct PartitionPage {
BASE_EXPORT NOINLINE DeferredUnmap FreeSlowPath() WARN_UNUSED_RESULT;
ALWAYS_INLINE DeferredUnmap Free(void* ptr) WARN_UNUSED_RESULT;
- void Decommit(PartitionRootBase<thread_safe>* root);
- void DecommitIfPossible(PartitionRootBase<thread_safe>* root);
+ void Decommit(PartitionRoot<thread_safe>* root);
+ void DecommitIfPossible(PartitionRoot<thread_safe>* root);
// Pointer manipulation functions. These must be static as the input |page|
// pointer may be the result of an offset calculation and therefore cannot
@@ -134,12 +135,13 @@ static_assert(sizeof(PartitionPage<ThreadSafe>) <= kPageMetadataSize,
ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) {
uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr);
- DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
+ PA_DCHECK(!(pointer_as_uint & kSuperPageOffsetMask));
// The metadata area is exactly one system page (the guard page) into the
// super page.
return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize);
}
+// See the comment for |FromPointer|.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
@@ -148,22 +150,25 @@ PartitionPage<thread_safe>::FromPointerNoAlignmentCheck(void* ptr) {
reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask);
uintptr_t partition_page_index =
(pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift;
- // Index 0 is invalid because it is the metadata and guard area and
- // the last index is invalid because it is a guard page.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ // Index 0 is invalid because it is the super page extent metadata and the
+ // last index is invalid because the whole PartitionPage is set as guard
+ // pages.
+ PA_DCHECK(partition_page_index);
+ PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
auto* page = reinterpret_cast<PartitionPage*>(
PartitionSuperPageToMetadataArea(super_page_ptr) +
(partition_page_index << kPageMetadataShift));
- // Partition pages in the same slot span can share the same page object.
- // Adjust for that.
+ // Partition pages in the same slot span share the same page object. Adjust
+ // for that.
size_t delta = page->page_offset << kPageMetadataShift;
page =
reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
return page;
}
-// Returns: start of the slot span for the PartitionPage.
+// Converts from a pointer to the PartitionPage object (within super pages's
+// metadata) into a pointer to the beginning of the partition page.
+// This doesn't have to be the first page in the slot span.
template <bool thread_safe>
ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
const PartitionPage<thread_safe>* page) {
@@ -173,31 +178,36 @@ ALWAYS_INLINE void* PartitionPage<thread_safe>::ToPointer(
// A valid |page| must be past the first guard System page and within
// the following metadata region.
- DCHECK(super_page_offset > kSystemPageSize);
+ PA_DCHECK(super_page_offset > kSystemPageSize);
// Must be less than total metadata region.
- DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
- kPageMetadataSize));
+ PA_DCHECK(super_page_offset <
+ kSystemPageSize +
+ (kNumPartitionPagesPerSuperPage * kPageMetadataSize));
uintptr_t partition_page_index =
(super_page_offset - kSystemPageSize) >> kPageMetadataShift;
- // Index 0 is invalid because it is the superpage extent metadata and the
+ // Index 0 is invalid because it is the super page extent metadata and the
// last index is invalid because the whole PartitionPage is set as guard
- // pages for the metadata region.
- DCHECK(partition_page_index);
- DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
+ // pages.
+ PA_DCHECK(partition_page_index);
+ PA_DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1);
uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask);
void* ret = reinterpret_cast<void*>(
super_page_base + (partition_page_index << kPartitionPageShift));
return ret;
}
+// Converts from a pointer inside a partition page into a pointer to the
+// PartitionPage object (within super pages's metadata).
+// The first PartitionPage of the slot span will be returned, regardless where
+// inside of the slot span |ptr| points to.
template <bool thread_safe>
ALWAYS_INLINE PartitionPage<thread_safe>*
PartitionPage<thread_safe>::FromPointer(void* ptr) {
PartitionPage* page = PartitionPage::FromPointerNoAlignmentCheck(ptr);
// Checks that the pointer is a multiple of bucket size.
- DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
- reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
- page->bucket->slot_size));
+ PA_DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
+ reinterpret_cast<uintptr_t>(PartitionPage::ToPointer(page))) %
+ page->bucket->slot_size));
return page;
}
@@ -210,8 +220,8 @@ ALWAYS_INLINE const size_t* PartitionPage<thread_safe>::get_raw_size_ptr()
if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
return nullptr;
- DCHECK((bucket->slot_size % kSystemPageSize) == 0);
- DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
+ PA_DCHECK((bucket->slot_size % kSystemPageSize) == 0);
+ PA_DCHECK(bucket->is_direct_mapped() || bucket->get_slots_per_span() == 1);
const PartitionPage* the_next_page = this + 1;
return reinterpret_cast<const size_t*>(&the_next_page->freelist_head);
@@ -228,7 +238,7 @@ ALWAYS_INLINE size_t PartitionPage<thread_safe>::get_raw_size() const {
template <bool thread_safe>
ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
#if DCHECK_IS_ON()
- PartitionRootBase<thread_safe>::FromPage(this)->lock_.AssertAcquired();
+ PartitionRoot<thread_safe>::FromPage(this)->lock_.AssertAcquired();
size_t slot_size = bucket->slot_size;
const size_t raw_size = get_raw_size();
@@ -244,12 +254,12 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
memset(ptr, kFreedByte, slot_size);
#endif
- DCHECK(num_allocated_slots);
+ PA_DCHECK(num_allocated_slots);
// Catches an immediate double free.
- CHECK(ptr != freelist_head);
+ PA_CHECK(ptr != freelist_head);
// Look for double free one level deeper in debug.
- DCHECK(!freelist_head ||
- ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
+ PA_DCHECK(!freelist_head ||
+ ptr != EncodedPartitionFreelistEntry::Decode(freelist_head->next));
auto* entry = static_cast<internal::PartitionFreelistEntry*>(ptr);
entry->next = internal::PartitionFreelistEntry::Encode(freelist_head);
freelist_head = entry;
@@ -259,46 +269,46 @@ ALWAYS_INLINE DeferredUnmap PartitionPage<thread_safe>::Free(void* ptr) {
} else {
// All single-slot allocations must go through the slow path to
// correctly update the size metadata.
- DCHECK(get_raw_size() == 0);
+ PA_DCHECK(get_raw_size() == 0);
}
return {};
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_active() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
return (num_allocated_slots > 0 &&
(freelist_head || num_unprovisioned_slots));
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_full() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
bool ret = (num_allocated_slots == bucket->get_slots_per_span());
if (ret) {
- DCHECK(!freelist_head);
- DCHECK(!num_unprovisioned_slots);
+ PA_DCHECK(!freelist_head);
+ PA_DCHECK(!num_unprovisioned_slots);
}
return ret;
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_empty() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
return (!num_allocated_slots && freelist_head);
}
template <bool thread_safe>
ALWAYS_INLINE bool PartitionPage<thread_safe>::is_decommitted() const {
- DCHECK(this != get_sentinel_page());
- DCHECK(!page_offset);
+ PA_DCHECK(this != get_sentinel_page());
+ PA_DCHECK(!page_offset);
bool ret = (!num_allocated_slots && !freelist_head);
if (ret) {
- DCHECK(!num_unprovisioned_slots);
- DCHECK(empty_cache_index == -1);
+ PA_DCHECK(!num_unprovisioned_slots);
+ PA_DCHECK(empty_cache_index == -1);
}
return ret;
}
@@ -312,10 +322,10 @@ ALWAYS_INLINE void PartitionPage<thread_safe>::set_raw_size(size_t size) {
template <bool thread_safe>
ALWAYS_INLINE void PartitionPage<thread_safe>::Reset() {
- DCHECK(is_decommitted());
+ PA_DCHECK(is_decommitted());
num_unprovisioned_slots = bucket->get_slots_per_span();
- DCHECK(num_unprovisioned_slots);
+ PA_DCHECK(num_unprovisioned_slots);
next_page = nullptr;
}
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.cc b/chromium/base/allocator/partition_allocator/partition_root_base.cc
deleted file mode 100644
index c55a166b415..00000000000
--- a/chromium/base/allocator/partition_allocator/partition_root_base.cc
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright (c) 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/allocator/partition_allocator/partition_root_base.h"
-
-#include "base/allocator/partition_allocator/oom.h"
-#include "base/allocator/partition_allocator/partition_oom.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace internal {
-
-template <bool thread_safety>
-NOINLINE void PartitionRootBase<thread_safety>::OutOfMemory(size_t size) {
-#if !defined(ARCH_CPU_64_BITS)
- // Check whether this OOM is due to a lot of super pages that are allocated
- // but not committed, probably due to http://crbug.com/421387.
- if (total_size_of_super_pages + total_size_of_direct_mapped_pages -
- total_size_of_committed_pages >
- kReasonableSizeOfUnusedPages) {
- PartitionOutOfMemoryWithLotsOfUncommitedPages(size);
- }
-#endif
- if (g_oom_handling_function)
- (*g_oom_handling_function)(size);
- OOM_CRASH(size);
-}
-
-template <bool thread_safe>
-void PartitionRootBase<thread_safe>::DecommitEmptyPages() {
- for (size_t i = 0; i < kMaxFreeableSpans; ++i) {
- Page* page = global_empty_page_ring[i];
- if (page)
- page->DecommitIfPossible(this);
- global_empty_page_ring[i] = nullptr;
- }
-}
-
-template <bool thread_safe>
-internal::PartitionRootBase<thread_safe>::PartitionRootBase() = default;
-template <bool thread_safe>
-internal::PartitionRootBase<thread_safe>::~PartitionRootBase() = default;
-
-template struct PartitionRootBase<ThreadSafe>;
-template struct PartitionRootBase<NotThreadSafe>;
-
-} // namespace internal
-} // namespace base
diff --git a/chromium/base/allocator/partition_allocator/partition_root_base.h b/chromium/base/allocator/partition_allocator/partition_root_base.h
deleted file mode 100644
index de9551c71d5..00000000000
--- a/chromium/base/allocator/partition_allocator/partition_root_base.h
+++ /dev/null
@@ -1,381 +0,0 @@
-// Copyright (c) 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
-#define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
-
-#include "base/allocator/partition_allocator/page_allocator.h"
-#include "base/allocator/partition_allocator/partition_alloc_constants.h"
-#include "base/allocator/partition_allocator/partition_alloc_forward.h"
-#include "base/allocator/partition_allocator/partition_bucket.h"
-#include "base/allocator/partition_allocator/partition_direct_map_extent.h"
-#include "base/allocator/partition_allocator/partition_page.h"
-#include "base/allocator/partition_allocator/spin_lock.h"
-#include "base/logging.h"
-#include "base/no_destructor.h"
-#include "base/synchronization/lock.h"
-#include "base/thread_annotations.h"
-#include "build/build_config.h"
-
-namespace base {
-
-typedef void (*OomFunction)(size_t);
-
-// PartitionAlloc supports setting hooks to observe allocations/frees as they
-// occur as well as 'override' hooks that allow overriding those operations.
-class BASE_EXPORT PartitionAllocHooks {
- public:
- // Log allocation and free events.
- typedef void AllocationObserverHook(void* address,
- size_t size,
- const char* type_name);
- typedef void FreeObserverHook(void* address);
-
- // If it returns true, the allocation has been overridden with the pointer in
- // *out.
- typedef bool AllocationOverrideHook(void** out,
- int flags,
- size_t size,
- const char* type_name);
- // If it returns true, then the allocation was overridden and has been freed.
- typedef bool FreeOverrideHook(void* address);
- // If it returns true, the underlying allocation is overridden and *out holds
- // the size of the underlying allocation.
- typedef bool ReallocOverrideHook(size_t* out, void* address);
-
- // To unhook, call Set*Hooks with nullptrs.
- static void SetObserverHooks(AllocationObserverHook* alloc_hook,
- FreeObserverHook* free_hook);
- static void SetOverrideHooks(AllocationOverrideHook* alloc_hook,
- FreeOverrideHook* free_hook,
- ReallocOverrideHook realloc_hook);
-
- // Helper method to check whether hooks are enabled. This is an optimization
- // so that if a function needs to call observer and override hooks in two
- // different places this value can be cached and only loaded once.
- static bool AreHooksEnabled() {
- return hooks_enabled_.load(std::memory_order_relaxed);
- }
-
- static void AllocationObserverHookIfEnabled(void* address,
- size_t size,
- const char* type_name);
- static bool AllocationOverrideHookIfEnabled(void** out,
- int flags,
- size_t size,
- const char* type_name);
-
- static void FreeObserverHookIfEnabled(void* address);
- static bool FreeOverrideHookIfEnabled(void* address);
-
- static void ReallocObserverHookIfEnabled(void* old_address,
- void* new_address,
- size_t size,
- const char* type_name);
- static bool ReallocOverrideHookIfEnabled(size_t* out, void* address);
-
- private:
- // Single bool that is used to indicate whether observer or allocation hooks
- // are set to reduce the numbers of loads required to check whether hooking is
- // enabled.
- static std::atomic<bool> hooks_enabled_;
-
- // Lock used to synchronize Set*Hooks calls.
- static std::atomic<AllocationObserverHook*> allocation_observer_hook_;
- static std::atomic<FreeObserverHook*> free_observer_hook_;
-
- static std::atomic<AllocationOverrideHook*> allocation_override_hook_;
- static std::atomic<FreeOverrideHook*> free_override_hook_;
- static std::atomic<ReallocOverrideHook*> realloc_override_hook_;
-};
-
-namespace internal {
-
-template <bool thread_safe>
-class LOCKABLE MaybeSpinLock {
- public:
- void Lock() EXCLUSIVE_LOCK_FUNCTION() {}
- void Unlock() UNLOCK_FUNCTION() {}
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
-};
-
-template <bool thread_safe>
-class SCOPED_LOCKABLE ScopedGuard {
- public:
- explicit ScopedGuard(MaybeSpinLock<thread_safe>& lock)
- EXCLUSIVE_LOCK_FUNCTION(lock)
- : lock_(lock) {
- lock_.Lock();
- }
- ~ScopedGuard() UNLOCK_FUNCTION() { lock_.Unlock(); }
-
- private:
- MaybeSpinLock<thread_safe>& lock_;
-};
-
-#if DCHECK_IS_ON()
-template <>
-class LOCKABLE MaybeSpinLock<ThreadSafe> {
- public:
- MaybeSpinLock() : lock_() {}
- void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_->Acquire(); }
- void Unlock() UNLOCK_FUNCTION() { lock_->Release(); }
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
- lock_->AssertAcquired();
- }
-
- private:
- // NoDestructor to avoid issues with the "static destruction order fiasco".
- //
- // This also means that for DCHECK_IS_ON() builds we leak a lock when a
- // partition is destructed. This will in practice only show in some tests, as
- // partitons are not destructed in regular use. In addition, on most
- // platforms, base::Lock doesn't allocate memory and neither does the OS
- // library, and the destructor is a no-op.
- base::NoDestructor<base::Lock> lock_;
-};
-
-#else
-template <>
-class LOCKABLE MaybeSpinLock<ThreadSafe> {
- public:
- void Lock() EXCLUSIVE_LOCK_FUNCTION() { lock_.lock(); }
- void Unlock() UNLOCK_FUNCTION() { lock_.unlock(); }
- void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {
- // Not supported by subtle::SpinLock.
- }
-
- private:
- subtle::SpinLock lock_;
-};
-#endif // DCHECK_IS_ON()
-
-// An "extent" is a span of consecutive superpages. We link to the partition's
-// next extent (if there is one) to the very start of a superpage's metadata
-// area.
-template <bool thread_safety>
-struct PartitionSuperPageExtentEntry {
- PartitionRootBase<thread_safety>* root;
- char* super_page_base;
- char* super_pages_end;
- PartitionSuperPageExtentEntry<thread_safety>* next;
-};
-static_assert(
- sizeof(PartitionSuperPageExtentEntry<ThreadSafe>) <= kPageMetadataSize,
- "PartitionSuperPageExtentEntry must be able to fit in a metadata slot");
-
-// g_oom_handling_function is invoked when PartitionAlloc hits OutOfMemory.
-static OomFunction g_oom_handling_function = nullptr;
-
-template <bool thread_safety>
-struct BASE_EXPORT PartitionRootBase {
- using Page = PartitionPage<thread_safety>;
- using Bucket = PartitionBucket<thread_safety>;
- using ScopedGuard = internal::ScopedGuard<thread_safety>;
-
- PartitionRootBase();
- virtual ~PartitionRootBase();
- MaybeSpinLock<thread_safety> lock_;
- size_t total_size_of_committed_pages = 0;
- size_t total_size_of_super_pages = 0;
- size_t total_size_of_direct_mapped_pages = 0;
- // Invariant: total_size_of_committed_pages <=
- // total_size_of_super_pages +
- // total_size_of_direct_mapped_pages.
- unsigned num_buckets = 0;
- unsigned max_allocation = 0;
- bool initialized = false;
- char* next_super_page = nullptr;
- char* next_partition_page = nullptr;
- char* next_partition_page_end = nullptr;
- PartitionSuperPageExtentEntry<thread_safety>* current_extent = nullptr;
- PartitionSuperPageExtentEntry<thread_safety>* first_extent = nullptr;
- PartitionDirectMapExtent<thread_safety>* direct_map_list = nullptr;
- Page* global_empty_page_ring[kMaxFreeableSpans] = {};
- int16_t global_empty_page_ring_index = 0;
- uintptr_t inverted_self = 0;
-
- // Public API
-
- // Allocates out of the given bucket. Properly, this function should probably
- // be in PartitionBucket, but because the implementation needs to be inlined
- // for performance, and because it needs to inspect PartitionPage,
- // it becomes impossible to have it in PartitionBucket as this causes a
- // cyclical dependency on PartitionPage function implementations.
- //
- // Moving it a layer lower couples PartitionRootBase and PartitionBucket, but
- // preserves the layering of the includes.
- //
- // Note the matching Free() functions are in PartitionPage.
- ALWAYS_INLINE void* AllocFromBucket(Bucket* bucket, int flags, size_t size)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- ALWAYS_INLINE void Free(void* ptr);
-
- ALWAYS_INLINE static bool IsValidPage(Page* page);
- ALWAYS_INLINE static PartitionRootBase* FromPage(Page* page);
-
- ALWAYS_INLINE void IncreaseCommittedPages(size_t len);
- ALWAYS_INLINE void DecreaseCommittedPages(size_t len);
- ALWAYS_INLINE void DecommitSystemPages(void* address, size_t length)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
- ALWAYS_INLINE void RecommitSystemPages(void* address, size_t length)
- EXCLUSIVE_LOCKS_REQUIRED(lock_);
-
- // Frees memory from this partition, if possible, by decommitting pages.
- // |flags| is an OR of base::PartitionPurgeFlags.
- virtual void PurgeMemory(int flags) = 0;
- NOINLINE void OutOfMemory(size_t size);
-
- protected:
- void DecommitEmptyPages() EXCLUSIVE_LOCKS_REQUIRED(lock_);
-};
-
-template <bool thread_safety>
-ALWAYS_INLINE void* PartitionRootBase<thread_safety>::AllocFromBucket(
- Bucket* bucket,
- int flags,
- size_t size) {
- bool zero_fill = flags & PartitionAllocZeroFill;
- bool is_already_zeroed = false;
-
- Page* page = bucket->active_pages_head;
- // Check that this page is neither full nor freed.
- DCHECK(page);
- DCHECK(page->num_allocated_slots >= 0);
- void* ret = page->freelist_head;
- if (LIKELY(ret != 0)) {
- // If these DCHECKs fire, you probably corrupted memory. TODO(palmer): See
- // if we can afford to make these CHECKs.
- DCHECK(IsValidPage(page));
-
- // All large allocations must go through the slow path to correctly update
- // the size metadata.
- DCHECK(page->get_raw_size() == 0);
- internal::PartitionFreelistEntry* new_head =
- internal::EncodedPartitionFreelistEntry::Decode(
- page->freelist_head->next);
- page->freelist_head = new_head;
- page->num_allocated_slots++;
- } else {
- ret = bucket->SlowPathAlloc(this, flags, size, &is_already_zeroed);
- // TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(!ret || IsValidPage(Page::FromPointer(ret)));
- }
-
-#if DCHECK_IS_ON()
- if (!ret) {
- return nullptr;
- }
-
- page = Page::FromPointer(ret);
- // TODO(ajwong): Can |page->bucket| ever not be |this|? If not, can this just
- // be bucket->slot_size?
- size_t new_slot_size = page->bucket->slot_size;
- size_t raw_size = page->get_raw_size();
- if (raw_size) {
- DCHECK(raw_size == size);
- new_slot_size = raw_size;
- }
- size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(new_slot_size);
- char* char_ret = static_cast<char*>(ret);
- // The value given to the application is actually just after the cookie.
- ret = char_ret + kCookieSize;
-
- // Fill the region kUninitializedByte or 0, and surround it with 2 cookies.
- PartitionCookieWriteValue(char_ret);
- if (!zero_fill) {
- memset(ret, kUninitializedByte, no_cookie_size);
- } else if (!is_already_zeroed) {
- memset(ret, 0, no_cookie_size);
- }
- PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size);
-#else
- if (ret && zero_fill && !is_already_zeroed) {
- memset(ret, 0, size);
- }
-#endif
-
- return ret;
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::Free(void* ptr) {
-#if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
- free(ptr);
-#else
- DCHECK(initialized);
-
- if (UNLIKELY(!ptr))
- return;
-
- if (PartitionAllocHooks::AreHooksEnabled()) {
- PartitionAllocHooks::FreeObserverHookIfEnabled(ptr);
- if (PartitionAllocHooks::FreeOverrideHookIfEnabled(ptr))
- return;
- }
-
- ptr = internal::PartitionCookieFreePointerAdjust(ptr);
- Page* page = Page::FromPointer(ptr);
- // TODO(palmer): See if we can afford to make this a CHECK.
- DCHECK(IsValidPage(page));
- internal::DeferredUnmap deferred_unmap;
- {
- ScopedGuard guard{lock_};
- deferred_unmap = page->Free(ptr);
- }
- deferred_unmap.Run();
-#endif
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE bool PartitionRootBase<thread_safety>::IsValidPage(Page* page) {
- PartitionRootBase* root = PartitionRootBase::FromPage(page);
- return root->inverted_self == ~reinterpret_cast<uintptr_t>(root);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE PartitionRootBase<thread_safety>*
-PartitionRootBase<thread_safety>::FromPage(Page* page) {
- auto* extent_entry =
- reinterpret_cast<PartitionSuperPageExtentEntry<thread_safety>*>(
- reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
- return extent_entry->root;
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::IncreaseCommittedPages(
- size_t len) {
- total_size_of_committed_pages += len;
- DCHECK(total_size_of_committed_pages <=
- total_size_of_super_pages + total_size_of_direct_mapped_pages);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecreaseCommittedPages(
- size_t len) {
- total_size_of_committed_pages -= len;
- DCHECK(total_size_of_committed_pages <=
- total_size_of_super_pages + total_size_of_direct_mapped_pages);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::DecommitSystemPages(
- void* address,
- size_t length) {
- ::base::DecommitSystemPages(address, length);
- DecreaseCommittedPages(length);
-}
-
-template <bool thread_safety>
-ALWAYS_INLINE void PartitionRootBase<thread_safety>::RecommitSystemPages(
- void* address,
- size_t length) {
- CHECK(::base::RecommitSystemPages(address, length, PageReadWrite));
- IncreaseCommittedPages(length);
-}
-
-} // namespace internal
-} // namespace base
-
-#endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ROOT_BASE_H_
diff --git a/chromium/base/android/resource_exclusions.gni b/chromium/base/android/resource_exclusions.gni
index f5b8d140241..b70bc0ff1bb 100644
--- a/chromium/base/android/resource_exclusions.gni
+++ b/chromium/base/android/resource_exclusions.gni
@@ -13,3 +13,10 @@ common_resource_exclusion_exceptions = [
# Remove WearOS resources (a couple exist in appcompat).
common_resource_exclusion_regex += "|-watch\b"
+
+# As of May 2020, 0.2% of devices are ldpi:
+# https://developer.android.com/about/dashboards/index.html
+# Only a single resources is defined in this config:
+# ic_arrow_down_24dp
+# It saves XXkb to omit the config.
+common_resource_exclusion_regex += "|-ldpi\b"
diff --git a/chromium/base/base_paths_fuchsia.cc b/chromium/base/base_paths_fuchsia.cc
index 0d948b3aace..a7ea21af7f4 100644
--- a/chromium/base/base_paths_fuchsia.cc
+++ b/chromium/base/base_paths_fuchsia.cc
@@ -10,7 +10,7 @@
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/fuchsia/file_utils.h"
-#include "base/logging.h"
+#include "base/notreached.h"
#include "base/path_service.h"
#include "base/process/process.h"
diff --git a/chromium/base/base_paths_posix.cc b/chromium/base/base_paths_posix.cc
index 00a15696cac..5b9f34b77e0 100644
--- a/chromium/base/base_paths_posix.cc
+++ b/chromium/base/base_paths_posix.cc
@@ -20,6 +20,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/nix/xdg_util.h"
+#include "base/notreached.h"
#include "base/path_service.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
diff --git a/chromium/base/base_paths_win.cc b/chromium/base/base_paths_win.cc
index 453b249c047..1fe49b61493 100644
--- a/chromium/base/base_paths_win.cc
+++ b/chromium/base/base_paths_win.cc
@@ -107,6 +107,18 @@ bool PathProviderWin(int key, FilePath* result) {
return false;
cur = FilePath(system_buffer);
break;
+ case base::DIR_COMMON_STARTUP:
+ if (FAILED(SHGetFolderPath(nullptr, CSIDL_COMMON_STARTUP, nullptr,
+ SHGFP_TYPE_CURRENT, system_buffer)))
+ return false;
+ cur = FilePath(system_buffer);
+ break;
+ case base::DIR_USER_STARTUP:
+ if (FAILED(SHGetFolderPath(nullptr, CSIDL_STARTUP, nullptr,
+ SHGFP_TYPE_CURRENT, system_buffer)))
+ return false;
+ cur = FilePath(system_buffer);
+ break;
case base::DIR_APP_DATA:
if (FAILED(SHGetFolderPath(NULL, CSIDL_APPDATA, NULL, SHGFP_TYPE_CURRENT,
system_buffer)))
diff --git a/chromium/base/base_paths_win.h b/chromium/base/base_paths_win.h
index 2db16a62719..df1991c0da3 100644
--- a/chromium/base/base_paths_win.h
+++ b/chromium/base/base_paths_win.h
@@ -30,6 +30,10 @@ enum {
// Start Menu\Programs"
DIR_START_MENU, // Usually "C:\Users\<user>\AppData\Roaming\
// Microsoft\Windows\Start Menu\Programs"
+ DIR_COMMON_STARTUP, // Usually "C:\ProgramData\Microsoft\Windows\
+ // Start Menu\Programs\Startup"
+ DIR_USER_STARTUP, // Usually "C:\Users\<user>\AppData\Roaming\
+ // Microsoft\Windows\Start Menu\Programs\Startup"
DIR_APP_DATA, // Application Data directory under the user
// profile.
DIR_LOCAL_APP_DATA, // "Local Settings\Application Data" directory
diff --git a/chromium/base/base_switches.cc b/chromium/base/base_switches.cc
index 6a474879612..4d45e975751 100644
--- a/chromium/base/base_switches.cc
+++ b/chromium/base/base_switches.cc
@@ -136,6 +136,10 @@ const char kEnableCrashReporterForTesting[] =
// Enables the reached code profiler that samples all threads in all processes
// to determine which functions are almost never executed.
const char kEnableReachedCodeProfiler[] = "enable-reached-code-profiler";
+
+// Specifies the profiling interval in microseconds for reached code profiler.
+const char kReachedCodeSamplingIntervalUs[] =
+ "reached-code-sampling-interval-us";
#endif
#if defined(OS_LINUX)
diff --git a/chromium/base/base_switches.h b/chromium/base/base_switches.h
index b1923efc1e6..ccfb87d3acc 100644
--- a/chromium/base/base_switches.h
+++ b/chromium/base/base_switches.h
@@ -48,6 +48,7 @@ extern const char kEnableCrashReporterForTesting[];
#if defined(OS_ANDROID)
extern const char kEnableReachedCodeProfiler[];
+extern const char kReachedCodeSamplingIntervalUs[];
extern const char kOrderfileMemoryOptimization[];
#endif
diff --git a/chromium/base/big_endian.h b/chromium/base/big_endian.h
index 96650a6955f..5e232f426f2 100644
--- a/chromium/base/big_endian.h
+++ b/chromium/base/big_endian.h
@@ -7,6 +7,7 @@
#include <stddef.h>
#include <stdint.h>
+#include <type_traits>
#include "base/base_export.h"
#include "base/strings/string_piece.h"
@@ -20,6 +21,7 @@ namespace base {
// This would cause SIGBUS on ARMv5 or earlier and ARMv6-M.
template<typename T>
inline void ReadBigEndian(const char buf[], T* out) {
+ static_assert(std::is_integral<T>::value, "T has to be an integral type.");
*out = buf[0];
for (size_t i = 1; i < sizeof(T); ++i) {
*out <<= 8;
@@ -32,6 +34,7 @@ inline void ReadBigEndian(const char buf[], T* out) {
// Note: this loop is unrolled with -O1 and above.
template<typename T>
inline void WriteBigEndian(char buf[], T val) {
+ static_assert(std::is_integral<T>::value, "T has to be an integral type.");
for (size_t i = 0; i < sizeof(T); ++i) {
buf[sizeof(T)-i-1] = static_cast<char>(val & 0xFF);
val >>= 8;
diff --git a/chromium/base/bind_internal.h b/chromium/base/bind_internal.h
index d9a6a23a5c2..d8efc568c05 100644
--- a/chromium/base/bind_internal.h
+++ b/chromium/base/bind_internal.h
@@ -15,10 +15,11 @@
#include "base/bind.h"
#include "base/callback_internal.h"
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/memory/raw_scoped_refptr_mismatch_checker.h"
#include "base/memory/weak_ptr.h"
+#include "base/notreached.h"
#include "base/template_util.h"
#include "build/build_config.h"
diff --git a/chromium/base/bits.h b/chromium/base/bits.h
index d2c5ac9caa7..20f2ff278f3 100644
--- a/chromium/base/bits.h
+++ b/chromium/base/bits.h
@@ -7,13 +7,14 @@
#ifndef BASE_BITS_H_
#define BASE_BITS_H_
+#include <limits.h>
#include <stddef.h>
#include <stdint.h>
#include <type_traits>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
@@ -203,6 +204,16 @@ inline int Log2Ceiling(uint32_t n) {
return (n ? 32 : -1) - CountLeadingZeroBits(n - 1);
}
+// Returns a value of type T with a single bit set in the left-most position.
+// Can be used instead of manually shifting a 1 to the left.
+template <typename T>
+constexpr T LeftmostBit() {
+ static_assert(std::is_integral<T>::value,
+ "This function can only be used with integral types.");
+ T one(1u);
+ return one << ((CHAR_BIT * sizeof(T) - 1));
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/bits_unittest.cc b/chromium/base/bits_unittest.cc
index 71becbe510b..249bbf7c457 100644
--- a/chromium/base/bits_unittest.cc
+++ b/chromium/base/bits_unittest.cc
@@ -202,5 +202,32 @@ TEST(BitsTest, PowerOfTwo) {
EXPECT_FALSE(IsPowerOfTwo(int64_t{1} << 63));
}
+TEST(BitsTest, LeftMostBit) {
+ // Construction of a signed type from an unsigned one of the same width
+ // preserves all bits. Explicitily confirming this behavior here to illustrate
+ // correctness of reusing unsigned literals to test behavior of signed types.
+ // Using signed literals does not work with EXPECT_EQ.
+ static_assert(int64_t(0xFFFFFFFFFFFFFFFFu) == 0xFFFFFFFFFFFFFFFFl,
+ "Comparing signed with unsigned literals compares bits.");
+ static_assert((0xFFFFFFFFFFFFFFFFu ^ 0xFFFFFFFFFFFFFFFFl) == 0,
+ "Signed and unsigned literals have the same bits set");
+
+ uint64_t unsigned_long_long_value = 0x8000000000000000u;
+ EXPECT_EQ(LeftmostBit<uint64_t>(), unsigned_long_long_value);
+ EXPECT_EQ(LeftmostBit<int64_t>(), int64_t(unsigned_long_long_value));
+
+ uint32_t unsigned_long_value = 0x80000000u;
+ EXPECT_EQ(LeftmostBit<uint32_t>(), unsigned_long_value);
+ EXPECT_EQ(LeftmostBit<int32_t>(), int32_t(unsigned_long_value));
+
+ uint16_t unsigned_short_value = 0x8000u;
+ EXPECT_EQ(LeftmostBit<uint16_t>(), unsigned_short_value);
+ EXPECT_EQ(LeftmostBit<int16_t>(), int16_t(unsigned_short_value));
+
+ uint8_t unsigned_byte_value = 0x80u;
+ EXPECT_EQ(LeftmostBit<uint8_t>(), unsigned_byte_value);
+ EXPECT_EQ(LeftmostBit<int8_t>(), int8_t(unsigned_byte_value));
+}
+
} // namespace bits
} // namespace base
diff --git a/chromium/base/callback.h b/chromium/base/callback.h
index 478a353eba9..b7cf48bab84 100644
--- a/chromium/base/callback.h
+++ b/chromium/base/callback.h
@@ -13,7 +13,7 @@
#include "base/callback_forward.h"
#include "base/callback_internal.h"
-#include "base/logging.h"
+#include "base/notreached.h"
// -----------------------------------------------------------------------------
// Usage documentation
diff --git a/chromium/base/callback_list.h b/chromium/base/callback_list.h
index 15cf0b291db..f03c46fa763 100644
--- a/chromium/base/callback_list.h
+++ b/chromium/base/callback_list.h
@@ -14,8 +14,8 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/memory/weak_ptr.h"
#include "base/stl_util.h"
diff --git a/chromium/base/cancelable_callback.h b/chromium/base/cancelable_callback.h
index b4c40b1175f..aafa6d7504f 100644
--- a/chromium/base/cancelable_callback.h
+++ b/chromium/base/cancelable_callback.h
@@ -51,8 +51,8 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_internal.h"
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
diff --git a/chromium/base/check.cc b/chromium/base/check.cc
index 5035cd52b3c..9bc5fc85c66 100644
--- a/chromium/base/check.cc
+++ b/chromium/base/check.cc
@@ -11,6 +11,7 @@
#pragma clang max_tokens_here 17000
#endif
+#include "base/check_op.h"
#include "base/logging.h"
#include "build/build_config.h"
diff --git a/chromium/base/check.h b/chromium/base/check.h
index 086846d33a8..c94ab68db90 100644
--- a/chromium/base/check.h
+++ b/chromium/base/check.h
@@ -9,6 +9,7 @@
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/dcheck_is_on.h"
#include "base/immediate_crash.h"
// This header defines the CHECK, DCHECK, and DPCHECK macros.
@@ -126,12 +127,6 @@ class BASE_EXPORT CheckError {
#endif
-#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
-#define DCHECK_IS_ON() false
-#else
-#define DCHECK_IS_ON() true
-#endif
-
#if DCHECK_IS_ON()
#define DCHECK(condition) \
diff --git a/chromium/base/check_op.h b/chromium/base/check_op.h
index 4816df4e8b0..1acfa614e95 100644
--- a/chromium/base/check_op.h
+++ b/chromium/base/check_op.h
@@ -48,8 +48,10 @@ BASE_EXPORT char* StreamValToStr(const void* v,
void (*stream_func)(std::ostream&,
const void*));
-#ifndef __has_builtin
-#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
+#ifdef __has_builtin
+#define SUPPORTS_BUILTIN_ADDRESSOF (__has_builtin(__builtin_addressof))
+#else
+#define SUPPORTS_BUILTIN_ADDRESSOF 0
#endif
template <typename T>
@@ -65,7 +67,7 @@ CheckOpValueStr(const T& v) {
// operator& might be overloaded, so do the std::addressof dance.
// __builtin_addressof is preferred since it also handles Obj-C ARC pointers.
// Some casting is still needed, because T might be volatile.
-#if __has_builtin(__builtin_addressof)
+#if SUPPORTS_BUILTIN_ADDRESSOF
const void* vp = const_cast<const void*>(
reinterpret_cast<const volatile void*>(__builtin_addressof(v)));
#else
@@ -75,6 +77,8 @@ CheckOpValueStr(const T& v) {
return StreamValToStr(vp, f);
}
+#undef SUPPORTS_BUILTIN_ADDRESSOF
+
// Overload for types that have no operator<< but do have .ToString() defined.
template <typename T>
inline typename std::enable_if<
@@ -115,7 +119,7 @@ CheckOpValueStr(const T& v) {
class CheckOpResult {
public:
// An empty result signals success.
- constexpr CheckOpResult() = default;
+ constexpr CheckOpResult() {}
// A non-success result. expr_str is something like "foo != bar". v1_str and
// v2_str are the stringified run-time values of foo and bar. Takes ownership
@@ -157,9 +161,13 @@ class CheckOpResult {
#endif
-// The int-int overload avoids address-taking static int members.
+// The second overload avoids address-taking of static members for
+// fundamental types.
#define DEFINE_CHECK_OP_IMPL(name, op) \
- template <typename T, typename U> \
+ template <typename T, typename U, \
+ std::enable_if_t<!std::is_fundamental<T>::value || \
+ !std::is_fundamental<U>::value, \
+ int> = 0> \
constexpr ::logging::CheckOpResult Check##name##Impl( \
const T& v1, const U& v2, const char* expr_str) { \
if (ANALYZER_ASSUME_TRUE(v1 op v2)) \
@@ -167,7 +175,11 @@ class CheckOpResult {
return ::logging::CheckOpResult(expr_str, CheckOpValueStr(v1), \
CheckOpValueStr(v2)); \
} \
- constexpr ::logging::CheckOpResult Check##name##Impl(int v1, int v2, \
+ template <typename T, typename U, \
+ std::enable_if_t<std::is_fundamental<T>::value && \
+ std::is_fundamental<U>::value, \
+ int> = 0> \
+ constexpr ::logging::CheckOpResult Check##name##Impl(T v1, U v2, \
const char* expr_str) { \
if (ANALYZER_ASSUME_TRUE(v1 op v2)) \
return ::logging::CheckOpResult(); \
diff --git a/chromium/base/command_line.cc b/chromium/base/command_line.cc
index dc24444155b..aea15a436ed 100644
--- a/chromium/base/command_line.cc
+++ b/chromium/base/command_line.cc
@@ -10,6 +10,7 @@
#include "base/containers/span.h"
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/stl_util.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
diff --git a/chromium/base/command_line_unittest.cc b/chromium/base/command_line_unittest.cc
index a4107b59ba3..72854518d80 100644
--- a/chromium/base/command_line_unittest.cc
+++ b/chromium/base/command_line_unittest.cc
@@ -226,25 +226,25 @@ TEST(CommandLineTest, GetArgumentsString) {
CommandLine::StringType expected_str;
expected_str.append(FILE_PATH_LITERAL("--"))
- .append(expected_first_arg)
- .append(FILE_PATH_LITERAL("="))
- .append(QUOTE_ON_WIN)
- .append(kPath1)
- .append(QUOTE_ON_WIN)
- .append(FILE_PATH_LITERAL(" "))
- .append(FILE_PATH_LITERAL("--"))
- .append(expected_second_arg)
- .append(FILE_PATH_LITERAL("="))
- .append(QUOTE_ON_WIN)
- .append(kPath2)
- .append(QUOTE_ON_WIN)
- .append(FILE_PATH_LITERAL(" "))
- .append(QUOTE_ON_WIN)
- .append(expected_third_arg)
- .append(QUOTE_ON_WIN)
- .append(FILE_PATH_LITERAL(" "))
- .append(expected_fourth_arg)
- .append(FILE_PATH_LITERAL(" "));
+ .append(expected_first_arg)
+ .append(FILE_PATH_LITERAL("="))
+ .append(QUOTE_ON_WIN)
+ .append(kPath1)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(FILE_PATH_LITERAL("--"))
+ .append(expected_second_arg)
+ .append(FILE_PATH_LITERAL("="))
+ .append(QUOTE_ON_WIN)
+ .append(kPath2)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(QUOTE_ON_WIN)
+ .append(expected_third_arg)
+ .append(QUOTE_ON_WIN)
+ .append(FILE_PATH_LITERAL(" "))
+ .append(expected_fourth_arg)
+ .append(FILE_PATH_LITERAL(" "));
CommandLine::StringType expected_str_no_quote_placeholders(expected_str);
expected_str_no_quote_placeholders.append(expected_fifth_arg);
@@ -253,8 +253,8 @@ TEST(CommandLineTest, GetArgumentsString) {
#if defined(OS_WIN)
CommandLine::StringType expected_str_quote_placeholders(expected_str);
expected_str_quote_placeholders.append(QUOTE_ON_WIN)
- .append(expected_fifth_arg)
- .append(QUOTE_ON_WIN);
+ .append(expected_fifth_arg)
+ .append(QUOTE_ON_WIN);
EXPECT_EQ(expected_str_quote_placeholders,
cl.GetArgumentsStringWithPlaceholders());
#endif
diff --git a/chromium/base/containers/adapters.h b/chromium/base/containers/adapters.h
index ec33481752f..ae22de2b473 100644
--- a/chromium/base/containers/adapters.h
+++ b/chromium/base/containers/adapters.h
@@ -10,8 +10,6 @@
#include <iterator>
#include <utility>
-#include "base/macros.h"
-
namespace base {
namespace internal {
@@ -24,14 +22,13 @@ class ReversedAdapter {
explicit ReversedAdapter(T& t) : t_(t) {}
ReversedAdapter(const ReversedAdapter& ra) : t_(ra.t_) {}
+ ReversedAdapter& operator=(const ReversedAdapter&) = delete;
Iterator begin() const { return std::rbegin(t_); }
Iterator end() const { return std::rend(t_); }
private:
T& t_;
-
- DISALLOW_ASSIGN(ReversedAdapter);
};
} // namespace internal
diff --git a/chromium/base/containers/checked_iterators.h b/chromium/base/containers/checked_iterators.h
index 986f6fad102..ce3fcfc0322 100644
--- a/chromium/base/containers/checked_iterators.h
+++ b/chromium/base/containers/checked_iterators.h
@@ -9,8 +9,8 @@
#include <memory>
#include <type_traits>
+#include "base/check_op.h"
#include "base/containers/util.h"
-#include "base/logging.h"
namespace base {
diff --git a/chromium/base/containers/circular_deque.h b/chromium/base/containers/circular_deque.h
index 6c2c3a885a6..4fc61c2346b 100644
--- a/chromium/base/containers/circular_deque.h
+++ b/chromium/base/containers/circular_deque.h
@@ -11,8 +11,8 @@
#include <type_traits>
#include <utility>
+#include "base/check_op.h"
#include "base/containers/vector_buffer.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/template_util.h"
diff --git a/chromium/base/containers/flat_map.h b/chromium/base/containers/flat_map.h
index ed82c5d516c..4a0def5a378 100644
--- a/chromium/base/containers/flat_map.h
+++ b/chromium/base/containers/flat_map.h
@@ -9,8 +9,8 @@
#include <tuple>
#include <utility>
+#include "base/check.h"
#include "base/containers/flat_tree.h"
-#include "base/logging.h"
#include "base/template_util.h"
namespace base {
@@ -202,7 +202,7 @@ class flat_map : public ::base::internal::flat_tree<
~flat_map() = default;
flat_map& operator=(const flat_map&) = default;
- flat_map& operator=(flat_map&&) = default;
+ flat_map& operator=(flat_map&&) noexcept = default;
// Takes the first if there are duplicates in the initializer list.
flat_map& operator=(std::initializer_list<value_type> ilist);
diff --git a/chromium/base/containers/flat_tree.h b/chromium/base/containers/flat_tree.h
index 9412ff6af74..ce6e92b4d36 100644
--- a/chromium/base/containers/flat_tree.h
+++ b/chromium/base/containers/flat_tree.h
@@ -125,7 +125,8 @@ class flat_tree {
// Assume that move assignment invalidates iterators and references.
flat_tree& operator=(const flat_tree&);
- flat_tree& operator=(flat_tree&&);
+ flat_tree& operator=(flat_tree&&) noexcept(
+ std::is_nothrow_move_assignable<underlying_type>::value);
// Takes the first if there are duplicates in the initializer list.
flat_tree& operator=(std::initializer_list<value_type> ilist);
@@ -518,7 +519,9 @@ auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(
const flat_tree&) -> flat_tree& = default;
template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
-auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::operator=(flat_tree &&)
+auto flat_tree<Key, Value, GetKeyFromValue, KeyCompare>::
+operator=(flat_tree&&) noexcept(
+ std::is_nothrow_move_assignable<underlying_type>::value)
-> flat_tree& = default;
template <class Key, class Value, class GetKeyFromValue, class KeyCompare>
diff --git a/chromium/base/containers/flat_tree_unittest.cc b/chromium/base/containers/flat_tree_unittest.cc
index ea6af1f76fc..8eab5b6f682 100644
--- a/chromium/base/containers/flat_tree_unittest.cc
+++ b/chromium/base/containers/flat_tree_unittest.cc
@@ -35,7 +35,6 @@
#include <string>
#include <vector>
-#include "base/macros.h"
#include "base/template_util.h"
#include "base/test/move_only_int.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -95,6 +94,8 @@ class Emplaceable {
other.int_ = 0;
other.double_ = 0.0;
}
+ Emplaceable(const Emplaceable&) = delete;
+ Emplaceable& operator=(const Emplaceable&) = delete;
Emplaceable& operator=(Emplaceable&& other) {
int_ = other.int_;
@@ -115,8 +116,6 @@ class Emplaceable {
private:
int int_;
double double_;
-
- DISALLOW_COPY_AND_ASSIGN(Emplaceable);
};
struct TemplateConstructor {
diff --git a/chromium/base/containers/id_map.h b/chromium/base/containers/id_map.h
index 4c816da3767..4caf8a2e2d3 100644
--- a/chromium/base/containers/id_map.h
+++ b/chromium/base/containers/id_map.h
@@ -14,9 +14,9 @@
#include <unordered_map>
#include <utility>
+#include "base/check_op.h"
#include "base/containers/flat_set.h"
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/notreached.h"
#include "base/sequence_checker.h"
namespace base {
@@ -51,6 +51,9 @@ class IDMap final {
DETACH_FROM_SEQUENCE(sequence_checker_);
}
+ IDMap(const IDMap&) = delete;
+ IDMap& operator=(const IDMap&) = delete;
+
~IDMap() {
// Many IDMap's are static, and hence will be destroyed on the main
// thread. However, all the accesses may take place on another thread (or
@@ -281,8 +284,6 @@ class IDMap final {
bool check_on_null_data_;
SEQUENCE_CHECKER(sequence_checker_);
-
- DISALLOW_COPY_AND_ASSIGN(IDMap);
};
} // namespace base
diff --git a/chromium/base/containers/intrusive_heap.h b/chromium/base/containers/intrusive_heap.h
index 96ad0b207d0..320eea0f364 100644
--- a/chromium/base/containers/intrusive_heap.h
+++ b/chromium/base/containers/intrusive_heap.h
@@ -137,7 +137,7 @@
#include <vector>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
diff --git a/chromium/base/containers/linked_list.h b/chromium/base/containers/linked_list.h
index a913badb887..c18bda3268d 100644
--- a/chromium/base/containers/linked_list.h
+++ b/chromium/base/containers/linked_list.h
@@ -5,8 +5,6 @@
#ifndef BASE_CONTAINERS_LINKED_LIST_H_
#define BASE_CONTAINERS_LINKED_LIST_H_
-#include "base/macros.h"
-
// Simple LinkedList type. (See the Q&A section to understand how this
// differs from std::list).
//
@@ -102,6 +100,9 @@ class LinkNode {
}
}
+ LinkNode(const LinkNode&) = delete;
+ LinkNode& operator=(const LinkNode&) = delete;
+
// Insert |this| into the linked list, before |e|.
void InsertBefore(LinkNode<T>* e) {
this->next_ = e;
@@ -148,8 +149,6 @@ class LinkNode {
private:
LinkNode<T>* previous_;
LinkNode<T>* next_;
-
- DISALLOW_COPY_AND_ASSIGN(LinkNode);
};
template <typename T>
@@ -159,6 +158,8 @@ class LinkedList {
// list (root_.next() will point back to the start of the list,
// and root_->previous() wraps around to the end of the list).
LinkedList() : root_(&root_, &root_) {}
+ LinkedList(const LinkedList&) = delete;
+ LinkedList& operator=(const LinkedList&) = delete;
// Appends |e| to the end of the linked list.
void Append(LinkNode<T>* e) {
@@ -181,8 +182,6 @@ class LinkedList {
private:
LinkNode<T> root_;
-
- DISALLOW_COPY_AND_ASSIGN(LinkedList);
};
} // namespace base
diff --git a/chromium/base/containers/mru_cache.h b/chromium/base/containers/mru_cache.h
index 4a9f44e868e..fac49e909af 100644
--- a/chromium/base/containers/mru_cache.h
+++ b/chromium/base/containers/mru_cache.h
@@ -25,8 +25,7 @@
#include <unordered_map>
#include <utility>
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/check.h"
namespace base {
namespace trace_event {
@@ -82,6 +81,9 @@ class MRUCacheBase {
// can pass NO_AUTO_EVICT to not restrict the cache size.
explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
+ MRUCacheBase(const MRUCacheBase&) = delete;
+ MRUCacheBase& operator=(const MRUCacheBase&) = delete;
+
virtual ~MRUCacheBase() = default;
size_type max_size() const { return max_size_; }
@@ -211,8 +213,6 @@ class MRUCacheBase {
KeyIndex index_;
size_type max_size_;
-
- DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
};
// MRUCache --------------------------------------------------------------------
@@ -230,10 +230,9 @@ class MRUCache : public MRUCacheBase<KeyType, PayloadType, CompareType> {
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit MRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {}
- virtual ~MRUCache() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MRUCache);
+ MRUCache(const MRUCache&) = delete;
+ MRUCache& operator=(const MRUCache&) = delete;
+ ~MRUCache() override = default;
};
// HashingMRUCache ------------------------------------------------------------
@@ -257,10 +256,9 @@ class HashingMRUCache
// See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
explicit HashingMRUCache(typename ParentType::size_type max_size)
: ParentType(max_size) {}
- virtual ~HashingMRUCache() = default;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
+ HashingMRUCache(const HashingMRUCache&) = delete;
+ HashingMRUCache& operator=(const HashingMRUCache&) = delete;
+ ~HashingMRUCache() override = default;
};
} // namespace base
diff --git a/chromium/base/containers/mru_cache_unittest.cc b/chromium/base/containers/mru_cache_unittest.cc
index d4ee4827d56..cd4330e247a 100644
--- a/chromium/base/containers/mru_cache_unittest.cc
+++ b/chromium/base/containers/mru_cache_unittest.cc
@@ -7,9 +7,13 @@
#include <cstddef>
#include <memory>
-#include "base/trace_event/memory_usage_estimator.h"
+#include "base/tracing_buildflags.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_usage_estimator.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
namespace {
@@ -380,6 +384,7 @@ TEST(MRUCacheTest, Swap) {
}
}
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(MRUCacheTest, EstimateMemory) {
base::MRUCache<std::string, int> cache(10);
@@ -389,5 +394,6 @@ TEST(MRUCacheTest, EstimateMemory) {
EXPECT_GT(trace_event::EstimateMemoryUsage(cache),
trace_event::EstimateMemoryUsage(key));
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} // namespace base
diff --git a/chromium/base/containers/ring_buffer.h b/chromium/base/containers/ring_buffer.h
index ca4a48ddc9e..5c4e0aeb1e9 100644
--- a/chromium/base/containers/ring_buffer.h
+++ b/chromium/base/containers/ring_buffer.h
@@ -7,8 +7,7 @@
#include <stddef.h>
-#include "base/logging.h"
-#include "base/macros.h"
+#include "base/check.h"
namespace base {
@@ -25,6 +24,8 @@ template <typename T, size_t kSize>
class RingBuffer {
public:
RingBuffer() : current_index_(0) {}
+ RingBuffer(const RingBuffer&) = delete;
+ RingBuffer& operator=(const RingBuffer&) = delete;
size_t BufferSize() const { return kSize; }
@@ -124,8 +125,6 @@ class RingBuffer {
T buffer_[kSize];
size_t current_index_;
-
- DISALLOW_COPY_AND_ASSIGN(RingBuffer);
};
} // namespace base
diff --git a/chromium/base/containers/small_map.h b/chromium/base/containers/small_map.h
index 50ce7646dfe..e4433d3012a 100644
--- a/chromium/base/containers/small_map.h
+++ b/chromium/base/containers/small_map.h
@@ -14,7 +14,7 @@
#include <unordered_map>
#include <utility>
-#include "base/logging.h"
+#include "base/check_op.h"
namespace {
constexpr size_t kUsingFullMapSentinel = std::numeric_limits<size_t>::max();
diff --git a/chromium/base/containers/span.h b/chromium/base/containers/span.h
index 5aa95aae582..b322c85904d 100644
--- a/chromium/base/containers/span.h
+++ b/chromium/base/containers/span.h
@@ -14,8 +14,8 @@
#include <type_traits>
#include <utility>
+#include "base/check_op.h"
#include "base/containers/checked_iterators.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/template_util.h"
diff --git a/chromium/base/containers/stack_container.h b/chromium/base/containers/stack_container.h
index 46ced5b1879..c58558e3944 100644
--- a/chromium/base/containers/stack_container.h
+++ b/chromium/base/containers/stack_container.h
@@ -9,7 +9,6 @@
#include <vector>
-#include "base/macros.h"
#include "build/build_config.h"
namespace base {
@@ -84,17 +83,15 @@ class StackAllocator : public std::allocator<T> {
// for Us.
// TODO: If we were fancy pants, perhaps we could share storage
// iff sizeof(T) == sizeof(U).
- template<typename U, size_t other_capacity>
+ template <typename U, size_t other_capacity>
StackAllocator(const StackAllocator<U, other_capacity>& other)
- : source_(NULL) {
- }
+ : source_(nullptr) {}
// This constructor must exist. It creates a default allocator that doesn't
// actually have a stack buffer. glibc's std::string() will compare the
// current allocator against the default-constructed allocator, so this
// should be fast.
- StackAllocator() : source_(NULL) {
- }
+ StackAllocator() : source_(nullptr) {}
explicit StackAllocator(Source* source) : source_(source) {
}
@@ -149,6 +146,8 @@ class StackContainer {
// before doing anything else.
container_.reserve(stack_capacity);
}
+ StackContainer(const StackContainer&) = delete;
+ StackContainer& operator=(const StackContainer&) = delete;
// Getters for the actual container.
//
@@ -177,9 +176,6 @@ class StackContainer {
typename Allocator::Source stack_data_;
Allocator allocator_;
ContainerType container_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(StackContainer);
};
// Range-based iteration support for StackContainer.
diff --git a/chromium/base/containers/stack_container_unittest.cc b/chromium/base/containers/stack_container_unittest.cc
index 7fa609556a7..e0f5dc5baeb 100644
--- a/chromium/base/containers/stack_container_unittest.cc
+++ b/chromium/base/containers/stack_container_unittest.cc
@@ -8,6 +8,7 @@
#include <algorithm>
+#include "base/memory/aligned_memory.h"
#include "base/memory/ref_counted.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -16,14 +17,14 @@ namespace base {
namespace {
-class Dummy : public base::RefCounted<Dummy> {
+class Dummy : public RefCounted<Dummy> {
public:
explicit Dummy(int* alive) : alive_(alive) {
++*alive_;
}
private:
- friend class base::RefCounted<Dummy>;
+ friend class RefCounted<Dummy>;
~Dummy() {
--*alive_;
@@ -110,31 +111,28 @@ class AlignedData {
alignas(alignment) char data_[alignment];
};
-} // anonymous namespace
-
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
+} // namespace
TEST(StackContainer, BufferAlignment) {
StackVector<wchar_t, 16> text;
text->push_back(L'A');
- EXPECT_ALIGNED(&text[0], alignof(wchar_t));
+ EXPECT_TRUE(IsAligned(&text[0], alignof(wchar_t)));
StackVector<double, 1> doubles;
doubles->push_back(0.0);
- EXPECT_ALIGNED(&doubles[0], alignof(double));
+ EXPECT_TRUE(IsAligned(&doubles[0], alignof(double)));
StackVector<AlignedData<16>, 1> aligned16;
aligned16->push_back(AlignedData<16>());
- EXPECT_ALIGNED(&aligned16[0], 16);
+ EXPECT_TRUE(IsAligned(&aligned16[0], 16));
#if !defined(__GNUC__) || defined(ARCH_CPU_X86_FAMILY)
// It seems that non-X86 gcc doesn't respect greater than 16 byte alignment.
// See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=33721 for details.
- // TODO(sbc):re-enable this if GCC starts respecting higher alignments.
+ // TODO(sbc): Re-enable this if GCC starts respecting higher alignments.
StackVector<AlignedData<256>, 1> aligned256;
aligned256->push_back(AlignedData<256>());
- EXPECT_ALIGNED(&aligned256[0], 256);
+ EXPECT_TRUE(IsAligned(&aligned256[0], 256));
#endif
}
diff --git a/chromium/base/containers/vector_buffer.h b/chromium/base/containers/vector_buffer.h
index 83cd2ac139e..019913cce23 100644
--- a/chromium/base/containers/vector_buffer.h
+++ b/chromium/base/containers/vector_buffer.h
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#ifndef BASE_CONTAINERS_VECTOR_BUFFERS_H_
-#define BASE_CONTAINERS_VECTOR_BUFFERS_H_
+#ifndef BASE_CONTAINERS_VECTOR_BUFFER_H_
+#define BASE_CONTAINERS_VECTOR_BUFFER_H_
#include <stdlib.h>
#include <string.h>
@@ -11,9 +11,8 @@
#include <type_traits>
#include <utility>
+#include "base/check_op.h"
#include "base/containers/util.h"
-#include "base/logging.h"
-#include "base/macros.h"
#include "base/numerics/checked_math.h"
namespace base {
@@ -57,6 +56,9 @@ class VectorBuffer {
other.capacity_ = 0;
}
+ VectorBuffer(const VectorBuffer&) = delete;
+ VectorBuffer& operator=(const VectorBuffer&) = delete;
+
~VectorBuffer() { free(buffer_); }
VectorBuffer& operator=(VectorBuffer&& other) {
@@ -178,11 +180,9 @@ class VectorBuffer {
T* buffer_ = nullptr;
size_t capacity_ = 0;
-
- DISALLOW_COPY_AND_ASSIGN(VectorBuffer);
};
} // namespace internal
} // namespace base
-#endif // BASE_CONTAINERS_VECTOR_BUFFERS_H_
+#endif // BASE_CONTAINERS_VECTOR_BUFFER_H_
diff --git a/chromium/base/dcheck_is_on.h b/chromium/base/dcheck_is_on.h
new file mode 100644
index 00000000000..ee352053cc6
--- /dev/null
+++ b/chromium/base/dcheck_is_on.h
@@ -0,0 +1,14 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_DCHECK_IS_ON_H_
+#define BASE_DCHECK_IS_ON_H_
+
+#if defined(NDEBUG) && !defined(DCHECK_ALWAYS_ON)
+#define DCHECK_IS_ON() false
+#else
+#define DCHECK_IS_ON() true
+#endif
+
+#endif // BASE_DCHECK_IS_ON_H_
diff --git a/chromium/base/debug/alias.h b/chromium/base/debug/alias.h
index cdd2372ea87..bd0904e5ab7 100644
--- a/chromium/base/debug/alias.h
+++ b/chromium/base/debug/alias.h
@@ -19,12 +19,15 @@ namespace debug {
// otherwise be live at the point of a potential crash. This can only be done
// with local variables, not globals, object members, or function return values
// - these must be copied to locals if you want to ensure they are recorded in
-// crash dumps. Note that if the local variable is a pointer then its value will
-// be retained but the memory that it points to will probably not be saved in
-// the crash dump - by default only stack memory is saved. Therefore the
-// aliasing technique is usually only worthwhile with non-pointer variables. If
-// you have a pointer to an object and you want to retain the object's state you
-// need to copy the object or its fields to local variables.
+// crash dumps. Function arguments are fine to use since the
+// base::debug::Alias() call on them will make sure they are copied to the stack
+// even if they were passed in a register. Note that if the local variable is a
+// pointer then its value will be retained but the memory that it points to will
+// probably not be saved in the crash dump - by default only stack memory is
+// saved. Therefore the aliasing technique is usually only worthwhile with
+// non-pointer variables. If you have a pointer to an object and you want to
+// retain the object's state you need to copy the object or its fields to local
+// variables.
//
// Example usage:
// int last_error = err_;
diff --git a/chromium/base/debug/debugger_posix.cc b/chromium/base/debug/debugger_posix.cc
index de383b80821..4636a1ea9b9 100644
--- a/chromium/base/debug/debugger_posix.cc
+++ b/chromium/base/debug/debugger_posix.cc
@@ -49,11 +49,11 @@
#include <ostream>
+#include "base/check.h"
#include "base/debug/alias.h"
#include "base/debug/debugging_buildflags.h"
#include "base/environment.h"
#include "base/files/file_util.h"
-#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process.h"
#include "base/strings/string_number_conversions.h"
diff --git a/chromium/base/debug/dump_without_crashing.cc b/chromium/base/debug/dump_without_crashing.cc
index 3e2fa17ca68..4aa94a15d80 100644
--- a/chromium/base/debug/dump_without_crashing.cc
+++ b/chromium/base/debug/dump_without_crashing.cc
@@ -5,6 +5,7 @@
#include "base/debug/dump_without_crashing.h"
#include "base/check.h"
+#include "base/trace_event/base_tracing.h"
namespace {
@@ -19,6 +20,7 @@ namespace base {
namespace debug {
bool DumpWithoutCrashing() {
+ TRACE_EVENT0("base", "DumpWithoutCrashing");
if (dump_without_crashing_function_) {
(*dump_without_crashing_function_)();
return true;
diff --git a/chromium/base/debug/leak_tracker.h b/chromium/base/debug/leak_tracker.h
index 7ddd5b62d1a..7013264101a 100644
--- a/chromium/base/debug/leak_tracker.h
+++ b/chromium/base/debug/leak_tracker.h
@@ -15,6 +15,7 @@
#endif
#ifdef ENABLE_LEAK_TRACKER
+#include "base/check_op.h"
#include "base/containers/linked_list.h"
#include "base/debug/stack_trace.h"
#include "base/logging.h"
diff --git a/chromium/base/feature_list.cc b/chromium/base/feature_list.cc
index 30d609505a7..3b8636c2849 100644
--- a/chromium/base/feature_list.cc
+++ b/chromium/base/feature_list.cc
@@ -321,6 +321,9 @@ void FeatureList::SetInstance(std::unique_ptr<FeatureList> instance) {
#if defined(DCHECK_IS_CONFIGURABLE)
// Update the behaviour of LOG_DCHECK to match the Feature configuration.
// DCHECK is also forced to be FATAL if we are running a death-test.
+ // TODO(crbug.com/1057995#c11): --gtest_internal_run_death_test doesn't
+ // currently run through this codepath, mitigated in
+ // base::TestSuite::Initialize() for now.
// TODO(asvitkine): If we find other use-cases that need integrating here
// then define a proper API/hook for the purpose.
if (FeatureList::IsEnabled(kDCheckIsFatalFeature) ||
diff --git a/chromium/base/files/file_descriptor_watcher_posix.h b/chromium/base/files/file_descriptor_watcher_posix.h
index 3230350efb6..51958d19ccb 100644
--- a/chromium/base/files/file_descriptor_watcher_posix.h
+++ b/chromium/base/files/file_descriptor_watcher_posix.h
@@ -9,7 +9,7 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
diff --git a/chromium/base/files/file_enumerator.h b/chromium/base/files/file_enumerator.h
index de702e1ca6c..b2bd5ab8dd9 100644
--- a/chromium/base/files/file_enumerator.h
+++ b/chromium/base/files/file_enumerator.h
@@ -37,9 +37,9 @@ namespace base {
//
// Example:
//
-// base::FileEnumerator enum(my_dir, false, base::FileEnumerator::FILES,
-// FILE_PATH_LITERAL("*.txt"));
-// for (base::FilePath name = enum.Next(); !name.empty(); name = enum.Next())
+// base::FileEnumerator e(my_dir, false, base::FileEnumerator::FILES,
+// FILE_PATH_LITERAL("*.txt"));
+// for (base::FilePath name = e.Next(); !name.empty(); name = e.Next())
// ...
class BASE_EXPORT FileEnumerator {
public:
diff --git a/chromium/base/files/file_path.cc b/chromium/base/files/file_path.cc
index f1a4b0a9bf6..56c02d2bfc7 100644
--- a/chromium/base/files/file_path.cc
+++ b/chromium/base/files/file_path.cc
@@ -40,20 +40,6 @@ const char* const kCommonDoubleExtensionSuffixes[] = {"gz", "xz", "bz2", "z",
"bz"};
const char* const kCommonDoubleExtensions[] = { "user.js" };
-// Compatibility shim for cross-platform code that passes a StringPieceType to a
-// string utility function. Most of these functions are only implemented for
-// base::StringPiece and base::StringPiece16, which is why base::WStringPieces
-// need to be converted.
-#if defined(OS_WIN)
-StringPiece16 AsCommonStringPiece(WStringPiece str) {
- return AsStringPiece16(str);
-}
-#else
-StringPiece AsCommonStringPiece(StringPiece str) {
- return str;
-}
-#endif
-
const FilePath::CharType kStringTerminator = FILE_PATH_LITERAL('\0');
// If this FilePath contains a drive letter specification, returns the
@@ -84,8 +70,7 @@ bool EqualDriveLetterCaseInsensitive(StringPieceType a, StringPieceType b) {
StringPieceType a_letter(a.substr(0, a_letter_pos + 1));
StringPieceType b_letter(b.substr(0, b_letter_pos + 1));
- if (!StartsWith(AsCommonStringPiece(a_letter), AsCommonStringPiece(b_letter),
- CompareCase::INSENSITIVE_ASCII))
+ if (!StartsWith(a_letter, b_letter, CompareCase::INSENSITIVE_ASCII))
return false;
StringPieceType a_rest(a.substr(a_letter_pos + 1));
@@ -156,13 +141,13 @@ StringType::size_type ExtensionSeparatorPosition(const StringType& path) {
for (auto* i : kCommonDoubleExtensions) {
StringType extension(path, penultimate_dot + 1);
- if (LowerCaseEqualsASCII(AsCommonStringPiece(extension), i))
+ if (LowerCaseEqualsASCII(extension, i))
return penultimate_dot;
}
StringType extension(path, last_dot + 1);
for (auto* i : kCommonDoubleExtensionSuffixes) {
- if (LowerCaseEqualsASCII(AsCommonStringPiece(extension), i)) {
+ if (LowerCaseEqualsASCII(extension, i)) {
if ((last_dot - penultimate_dot) <= 5U &&
(last_dot - penultimate_dot) > 1U) {
return penultimate_dot;
@@ -201,7 +186,7 @@ FilePath::~FilePath() = default;
FilePath& FilePath::operator=(const FilePath& that) = default;
-FilePath& FilePath::operator=(FilePath&& that) = default;
+FilePath& FilePath::operator=(FilePath&& that) noexcept = default;
bool FilePath::operator==(const FilePath& that) const {
#if defined(FILE_PATH_USES_DRIVE_LETTERS)
@@ -295,9 +280,7 @@ bool FilePath::AppendRelativePath(const FilePath& child,
// never case sensitive.
if ((FindDriveLetter(*parent_comp) != StringType::npos) &&
(FindDriveLetter(*child_comp) != StringType::npos)) {
- if (!StartsWith(AsCommonStringPiece(*parent_comp),
- AsCommonStringPiece(*child_comp),
- CompareCase::INSENSITIVE_ASCII))
+ if (!StartsWith(*parent_comp, *child_comp, CompareCase::INSENSITIVE_ASCII))
return false;
++parent_comp;
++child_comp;
@@ -613,21 +596,19 @@ bool FilePath::ReferencesParent() const {
#if defined(OS_WIN)
string16 FilePath::LossyDisplayName() const {
- return string16(as_u16cstr(path_.data()), path_.size());
+ return AsString16(path_);
}
std::string FilePath::MaybeAsASCII() const {
- if (base::IsStringASCII(AsCommonStringPiece(path_)))
- return UTF16ToASCII(AsCommonStringPiece(path_));
- return std::string();
+ return base::IsStringASCII(path_) ? WideToASCII(path_) : std::string();
}
std::string FilePath::AsUTF8Unsafe() const {
- return UTF16ToUTF8(AsCommonStringPiece(value()));
+ return WideToUTF8(value());
}
string16 FilePath::AsUTF16Unsafe() const {
- return string16(AsCommonStringPiece(value()));
+ return WideToUTF16(value());
}
// static
@@ -637,7 +618,7 @@ FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
// static
FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
- return FilePath(WStringPiece(as_wcstr(utf16.data()), utf16.size()));
+ return FilePath(AsWStringPiece(utf16));
}
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
@@ -693,7 +674,7 @@ FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
void FilePath::WriteToPickle(Pickle* pickle) const {
#if defined(OS_WIN)
- pickle->WriteString16(AsCommonStringPiece(path_));
+ pickle->WriteString16(AsStringPiece16(path_));
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
pickle->WriteString(path_);
#else
diff --git a/chromium/base/files/file_path.h b/chromium/base/files/file_path.h
index 4e23f71a92a..2160fdaca31 100644
--- a/chromium/base/files/file_path.h
+++ b/chromium/base/files/file_path.h
@@ -193,7 +193,7 @@ class BASE_EXPORT FilePath {
FilePath(FilePath&& that) noexcept;
// Replaces the contents with those of |that|, which is left in valid but
// unspecified state.
- FilePath& operator=(FilePath&& that);
+ FilePath& operator=(FilePath&& that) noexcept;
bool operator==(const FilePath& that) const;
diff --git a/chromium/base/files/file_path_watcher_linux.cc b/chromium/base/files/file_path_watcher_linux.cc
index 563e0806758..7a04ac7dfef 100644
--- a/chromium/base/files/file_path_watcher_linux.cc
+++ b/chromium/base/files/file_path_watcher_linux.cc
@@ -38,7 +38,7 @@
#include "base/threading/platform_thread.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/sequenced_task_runner_handle.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
diff --git a/chromium/base/files/file_path_watcher_unittest.cc b/chromium/base/files/file_path_watcher_unittest.cc
index ccefc27666b..055e0af45d9 100644
--- a/chromium/base/files/file_path_watcher_unittest.cc
+++ b/chromium/base/files/file_path_watcher_unittest.cc
@@ -20,6 +20,7 @@
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
diff --git a/chromium/base/files/file_util.cc b/chromium/base/files/file_util.cc
index 546934b7084..27a2e428d54 100644
--- a/chromium/base/files/file_util.cc
+++ b/chromium/base/files/file_util.cc
@@ -27,6 +27,22 @@
namespace base {
#if !defined(OS_NACL_NONSFI)
+namespace {
+
+void DeleteFileHelper(const FilePath& path) {
+ DeleteFile(path, /*recursive=*/false);
+}
+
+} // namespace
+
+OnceCallback<void(const FilePath&)> GetDeleteFileCallback() {
+ return BindOnce(&DeleteFileHelper);
+}
+
+OnceCallback<void(const FilePath&)> GetDeletePathRecursivelyCallback() {
+ return BindOnce(IgnoreResult(&DeleteFileRecursively));
+}
+
int64_t ComputeDirectorySize(const FilePath& root_path) {
int64_t running_size = 0;
FileEnumerator file_iter(root_path, true, FileEnumerator::FILES);
diff --git a/chromium/base/files/file_util.h b/chromium/base/files/file_util.h
index 76bd98767f5..c9c2470942f 100644
--- a/chromium/base/files/file_util.h
+++ b/chromium/base/files/file_util.h
@@ -23,6 +23,7 @@
#endif
#include "base/base_export.h"
+#include "base/callback_forward.h"
#include "base/containers/span.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
@@ -34,7 +35,6 @@
#include "base/win/windows_types.h"
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#include "base/file_descriptor_posix.h"
-#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#endif
@@ -59,33 +59,54 @@ BASE_EXPORT FilePath MakeAbsoluteFilePath(const FilePath& input);
BASE_EXPORT int64_t ComputeDirectorySize(const FilePath& root_path);
// Deletes the given path, whether it's a file or a directory.
+// If it's a directory, it's perfectly happy to delete all of the directory's
+// contents, but it will not recursively delete subdirectories and their
+// contents.
+// Returns true if successful, false otherwise. It is considered successful to
+// attempt to delete a file that does not exist.
+//
+// In POSIX environment and if |path| is a symbolic link, this deletes only
+// the symlink. (even if the symlink points to a non-existent file)
+BASE_EXPORT bool DeleteFile(const FilePath& path);
+
+// Deletes the given path, whether it's a file or a directory.
// If it's a directory, it's perfectly happy to delete all of the
-// directory's contents. Passing true to recursive deletes
-// subdirectories and their contents as well.
+// directory's contents, including subdirectories and their contents.
// Returns true if successful, false otherwise. It is considered successful
// to attempt to delete a file that does not exist.
//
// In POSIX environment and if |path| is a symbolic link, this deletes only
// the symlink. (even if the symlink points to a non-existent file)
//
-// WARNING: USING THIS WITH recursive==true IS EQUIVALENT
-// TO "rm -rf", SO USE WITH CAUTION.
-//
-// Note: The |recursive| parameter is in the process of being removed. Use
-// DeleteFileRecursively() instead. See https://crbug.com/1009837
-BASE_EXPORT bool DeleteFile(const FilePath& path, bool recursive);
+// WARNING: USING THIS EQUIVALENT TO "rm -rf", SO USE WITH CAUTION.
+// TODO(thestig): Rename to DeletePathRecursively().
+BASE_EXPORT bool DeleteFileRecursively(const FilePath& path);
+// DEPRECATED. Please use the functions immediately above.
+// https://crbug.com/1009837
+//
// Deletes the given path, whether it's a file or a directory.
// If it's a directory, it's perfectly happy to delete all of the
-// directory's contents, including subdirectories and their contents.
+// directory's contents. Passing true to recursively delete
+// subdirectories and their contents as well.
// Returns true if successful, false otherwise. It is considered successful
// to attempt to delete a file that does not exist.
//
// In POSIX environment and if |path| is a symbolic link, this deletes only
// the symlink. (even if the symlink points to a non-existent file)
//
-// WARNING: USING THIS EQUIVALENT TO "rm -rf", SO USE WITH CAUTION.
-BASE_EXPORT bool DeleteFileRecursively(const FilePath& path);
+// WARNING: USING THIS WITH recursive==true IS EQUIVALENT
+// TO "rm -rf", SO USE WITH CAUTION.
+BASE_EXPORT bool DeleteFile(const FilePath& path, bool recursive);
+
+// Simplified way to get a callback to do DeleteFile(path) and ignore the
+// DeleteFile() result.
+BASE_EXPORT OnceCallback<void(const FilePath&)> GetDeleteFileCallback();
+
+// Simplified way to get a callback to do DeleteFileRecursively(path) and ignore
+// the DeleteFileRecursively() result.
+BASE_EXPORT OnceCallback<void(const FilePath&)>
+GetDeletePathRecursivelyCallback();
#if defined(OS_WIN)
// Schedules to delete the given path, whether it's a file or a directory, until
@@ -223,6 +244,15 @@ BASE_EXPORT ScopedFD CreateAndOpenFdForTemporaryFileInDir(const FilePath& dir,
#if defined(OS_POSIX)
+// ReadFileToStringNonBlocking is identical to ReadFileToString except it
+// guarantees that it will not block. This guarantee is provided on POSIX by
+// opening the file as O_NONBLOCK. This variant should only be used on files
+// which are guaranteed not to block (such as kernel files). Or in situations
+// where a partial read would be acceptable because the backing store returned
+// EWOULDBLOCK.
+BASE_EXPORT bool ReadFileToStringNonBlocking(const base::FilePath& file,
+ std::string* ret);
+
// Creates a symbolic link at |symlink| pointing to |target|. Returns
// false on failure.
BASE_EXPORT bool CreateSymbolicLink(const FilePath& target,
diff --git a/chromium/base/files/file_util_posix.cc b/chromium/base/files/file_util_posix.cc
index 91514251d9c..dea1a945826 100644
--- a/chromium/base/files/file_util_posix.cc
+++ b/chromium/base/files/file_util_posix.cc
@@ -367,14 +367,18 @@ FilePath MakeAbsoluteFilePath(const FilePath& input) {
return FilePath(full_path);
}
-bool DeleteFile(const FilePath& path, bool recursive) {
- return DoDeleteFile(path, recursive);
+bool DeleteFile(const FilePath& path) {
+ return DoDeleteFile(path, /*recursive=*/false);
}
bool DeleteFileRecursively(const FilePath& path) {
return DoDeleteFile(path, /*recursive=*/true);
}
+bool DeleteFile(const FilePath& path, bool recursive) {
+ return DoDeleteFile(path, recursive);
+}
+
bool ReplaceFile(const FilePath& from_path,
const FilePath& to_path,
File::Error* error) {
@@ -736,6 +740,37 @@ bool CreateDirectoryAndGetError(const FilePath& full_path,
return true;
}
+// ReadFileToStringNonBlockingNonBlocking will read a file to a string. This
+// method should only be used on files which are known to be non-blocking such
+// as procfs or sysfs nodes. Additionally, the file is opened as O_NONBLOCK so
+// it WILL NOT block even if opened on a blocking file. It will return true if
+// the file read until EOF and it will return false otherwise, errno will remain
+// set on error conditions. |ret| will be populated with the contents of the
+// file.
+bool ReadFileToStringNonBlocking(const base::FilePath& file, std::string* ret) {
+ DCHECK(ret);
+ ret->clear();
+
+ base::ScopedFD fd(HANDLE_EINTR(
+ open(file.MaybeAsASCII().c_str(), O_CLOEXEC | O_NONBLOCK | O_RDONLY)));
+ if (!fd.is_valid()) {
+ return false;
+ }
+
+ ssize_t bytes_read = 0;
+ do {
+ char buf[4096];
+ bytes_read = HANDLE_EINTR(read(fd.get(), buf, sizeof(buf)));
+ if (bytes_read < 0) {
+ return false;
+ } else if (bytes_read > 0) {
+ ret->append(buf, bytes_read);
+ }
+ } while (bytes_read > 0);
+
+ return true;
+}
+
bool NormalizeFilePath(const FilePath& path, FilePath* normalized_path) {
FilePath real_path_result = MakeAbsoluteFilePath(path);
if (real_path_result.empty())
diff --git a/chromium/base/files/file_util_unittest.cc b/chromium/base/files/file_util_unittest.cc
index 2ade5709855..701f8b4b6cd 100644
--- a/chromium/base/files/file_util_unittest.cc
+++ b/chromium/base/files/file_util_unittest.cc
@@ -28,6 +28,7 @@
#include "base/files/scoped_file.h"
#include "base/files/scoped_temp_dir.h"
#include "base/guid.h"
+#include "base/logging.h"
#include "base/path_service.h"
#include "base/stl_util.h"
#include "base/strings/string_util.h"
@@ -738,11 +739,11 @@ TEST_F(FileUtilTest, MakeLongFilePathTest) {
EXPECT_EQ(long_test_file, MakeLongFilePath(short_test_file));
// MakeLongFilePath should return empty path if file does not exist.
- EXPECT_TRUE(DeleteFile(short_test_file, false));
+ EXPECT_TRUE(DeleteFile(short_test_file));
EXPECT_TRUE(MakeLongFilePath(short_test_file).empty());
// MakeLongFilePath should return empty path if directory does not exist.
- EXPECT_TRUE(DeleteFile(short_test_dir, false));
+ EXPECT_TRUE(DeleteFile(short_test_dir));
EXPECT_TRUE(MakeLongFilePath(short_test_dir).empty());
}
@@ -850,7 +851,7 @@ TEST_F(FileUtilTest, DeleteSymlinkToExistentFile) {
<< "Failed to create symlink.";
// Delete the symbolic link.
- EXPECT_TRUE(DeleteFile(file_link, false));
+ EXPECT_TRUE(DeleteFile(file_link));
// Make sure original file is not deleted.
EXPECT_FALSE(PathExists(file_link));
@@ -873,7 +874,7 @@ TEST_F(FileUtilTest, DeleteSymlinkToNonExistentFile) {
EXPECT_FALSE(PathExists(file_link));
// Delete the symbolic link.
- EXPECT_TRUE(DeleteFile(file_link, false));
+ EXPECT_TRUE(DeleteFile(file_link));
// Make sure the symbolic link is deleted.
EXPECT_FALSE(IsLink(file_link));
@@ -941,7 +942,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndRead) {
EXPECT_EQ(kDataSize, ReadFile(file_name, buffer, kDataSize));
// Delete the file.
- EXPECT_TRUE(DeleteFile(file_name, false));
+ EXPECT_TRUE(DeleteFile(file_name));
EXPECT_FALSE(PathExists(file_name));
}
@@ -981,7 +982,7 @@ TEST_F(FileUtilTest, ChangeFilePermissionsAndWrite) {
EXPECT_TRUE(PathIsWritable(file_name));
// Delete the file.
- EXPECT_TRUE(DeleteFile(file_name, false));
+ EXPECT_TRUE(DeleteFile(file_name));
EXPECT_FALSE(PathExists(file_name));
}
@@ -1291,7 +1292,7 @@ TEST_F(FileUtilTest, CopyFileExecutablePermission) {
expected_mode = 0600;
#endif
EXPECT_EQ(expected_mode, mode);
- ASSERT_TRUE(DeleteFile(dst, false));
+ ASSERT_TRUE(DeleteFile(dst));
ASSERT_TRUE(SetPosixFilePermissions(src, 0777));
ASSERT_TRUE(GetPosixFilePermissions(src, &mode));
@@ -1309,7 +1310,7 @@ TEST_F(FileUtilTest, CopyFileExecutablePermission) {
expected_mode = 0600;
#endif
EXPECT_EQ(expected_mode, mode);
- ASSERT_TRUE(DeleteFile(dst, false));
+ ASSERT_TRUE(DeleteFile(dst));
ASSERT_TRUE(SetPosixFilePermissions(src, 0400));
ASSERT_TRUE(GetPosixFilePermissions(src, &mode));
@@ -1403,7 +1404,7 @@ TEST_F(FileUtilTest, DeleteNonExistent) {
temp_dir_.GetPath().AppendASCII("bogus_file_dne.foobar");
ASSERT_FALSE(PathExists(non_existent));
- EXPECT_TRUE(DeleteFile(non_existent, false));
+ EXPECT_TRUE(DeleteFile(non_existent));
ASSERT_FALSE(PathExists(non_existent));
EXPECT_TRUE(DeleteFileRecursively(non_existent));
ASSERT_FALSE(PathExists(non_existent));
@@ -1414,7 +1415,7 @@ TEST_F(FileUtilTest, DeleteNonExistentWithNonExistentParent) {
non_existent = non_existent.AppendASCII("bogus_subdir");
ASSERT_FALSE(PathExists(non_existent));
- EXPECT_TRUE(DeleteFile(non_existent, false));
+ EXPECT_TRUE(DeleteFile(non_existent));
ASSERT_FALSE(PathExists(non_existent));
EXPECT_TRUE(DeleteFileRecursively(non_existent));
ASSERT_FALSE(PathExists(non_existent));
@@ -1427,7 +1428,7 @@ TEST_F(FileUtilTest, DeleteFile) {
ASSERT_TRUE(PathExists(file_name));
// Make sure it's deleted
- EXPECT_TRUE(DeleteFile(file_name, false));
+ EXPECT_TRUE(DeleteFile(file_name));
EXPECT_FALSE(PathExists(file_name));
// Test recursive case, create a new file
@@ -1461,7 +1462,7 @@ TEST_F(FileUtilTest, DeleteContentUri) {
ASSERT_TRUE(PathExists(uri_path));
// Try deleting the content URI.
- EXPECT_TRUE(DeleteFile(uri_path, false));
+ EXPECT_TRUE(DeleteFile(uri_path));
EXPECT_FALSE(PathExists(image_copy));
EXPECT_FALSE(PathExists(uri_path));
}
@@ -1487,7 +1488,7 @@ TEST_F(FileUtilTest, DeleteWildCard) {
directory_contents = directory_contents.Append(FPL("*"));
// Delete non-recursively and check that only the file is deleted
- EXPECT_TRUE(DeleteFile(directory_contents, false));
+ EXPECT_TRUE(DeleteFile(directory_contents));
EXPECT_FALSE(PathExists(file_name));
EXPECT_TRUE(PathExists(subdir_path));
@@ -1510,7 +1511,7 @@ TEST_F(FileUtilTest, DeleteNonExistantWildCard) {
directory_contents = directory_contents.Append(FPL("*"));
// Delete non-recursively and check nothing got deleted
- EXPECT_TRUE(DeleteFile(directory_contents, false));
+ EXPECT_TRUE(DeleteFile(directory_contents));
EXPECT_TRUE(PathExists(subdir_path));
// Delete recursively and check nothing got deleted
@@ -1540,11 +1541,11 @@ TEST_F(FileUtilTest, DeleteDirNonRecursive) {
ASSERT_TRUE(PathExists(subdir_path2));
// Delete non-recursively and check that the empty dir got deleted
- EXPECT_TRUE(DeleteFile(subdir_path2, false));
+ EXPECT_TRUE(DeleteFile(subdir_path2));
EXPECT_FALSE(PathExists(subdir_path2));
// Delete non-recursively and check that nothing got deleted
- EXPECT_FALSE(DeleteFile(test_subdir, false));
+ EXPECT_FALSE(DeleteFile(test_subdir));
EXPECT_TRUE(PathExists(test_subdir));
EXPECT_TRUE(PathExists(file_name));
EXPECT_TRUE(PathExists(subdir_path1));
@@ -1632,6 +1633,41 @@ TEST_F(FileUtilTest, DeleteDirRecursiveWithOpenFile) {
#endif
}
+#if defined(OS_LINUX)
+// This test will validate that files which would block when read result in a
+// failure on a call to ReadFileToStringNonBlocking. To accomplish this we will
+// use a named pipe because it appears as a file on disk and we can control how
+// much data is available to read. This allows us to simulate a file which would
+// block.
+TEST_F(FileUtilTest, TestNonBlockingFileReadLinux) {
+ FilePath fifo_path = temp_dir_.GetPath().Append(FPL("fifo"));
+ int res = mkfifo(fifo_path.MaybeAsASCII().c_str(),
+ S_IWUSR | S_IRUSR | S_IWGRP | S_IWGRP);
+ ASSERT_NE(res, -1);
+
+ base::ScopedFD fd(open(fifo_path.MaybeAsASCII().c_str(), O_RDWR));
+ ASSERT_TRUE(fd.is_valid());
+
+ std::string result;
+ // We will try to read when nothing is available on the fifo, the output
+ // string will be unmodified and it will fail with EWOULDBLOCK.
+ ASSERT_FALSE(ReadFileToStringNonBlocking(fifo_path, &result));
+ EXPECT_EQ(errno, EWOULDBLOCK);
+ EXPECT_TRUE(result.empty());
+
+ // Make a single byte available to read on the FIFO.
+ ASSERT_EQ(write(fd.get(), "a", 1), 1);
+
+ // Now the key part of the test we will call ReadFromFileNonBlocking which
+ // should fail, errno will be EWOULDBLOCK and the output string will contain
+ // the single 'a' byte.
+ ASSERT_FALSE(ReadFileToStringNonBlocking(fifo_path, &result));
+ EXPECT_EQ(errno, EWOULDBLOCK);
+ ASSERT_EQ(result.size(), 1u);
+ EXPECT_EQ(result[0], 'a');
+}
+#endif // defined(OS_LINUX)
+
TEST_F(FileUtilTest, MoveFileNew) {
// Create a file
FilePath file_name_from =
@@ -2223,7 +2259,7 @@ TEST_F(FileUtilTest, CopyDirectoryExclFileOverDanglingSymlink) {
dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
ASSERT_TRUE(PathExists(symlink_name_to));
- ASSERT_TRUE(DeleteFile(symlink_target, false));
+ ASSERT_TRUE(DeleteFile(symlink_target));
// Check that copying fails and that no file was created for the symlink's
// referent.
@@ -2258,7 +2294,7 @@ TEST_F(FileUtilTest, CopyDirectoryExclDirectoryOverDanglingSymlink) {
dir_name_to.Append(FILE_PATH_LITERAL("Copy_Test_File.txt"));
ASSERT_TRUE(CreateSymbolicLink(symlink_target, symlink_name_to));
ASSERT_TRUE(PathExists(symlink_name_to));
- ASSERT_TRUE(DeleteFile(symlink_target, false));
+ ASSERT_TRUE(DeleteFile(symlink_target));
// Check that copying fails and that no directory was created for the
// symlink's referent.
@@ -2533,7 +2569,7 @@ TEST_F(FileUtilTest, OpenFileNoInheritance) {
ASSERT_NO_FATAL_FAILURE(GetIsInheritable(file, &is_inheritable));
EXPECT_FALSE(is_inheritable);
}
- ASSERT_TRUE(DeleteFile(file_path, false));
+ ASSERT_TRUE(DeleteFile(file_path));
}
}
@@ -2565,7 +2601,7 @@ TEST_F(FileUtilTest, CreateTemporaryFileTest) {
for (int i = 0; i < 3; i++)
EXPECT_FALSE(temp_files[i] == temp_files[(i+1)%3]);
for (const auto& i : temp_files)
- EXPECT_TRUE(DeleteFile(i, false));
+ EXPECT_TRUE(DeleteFile(i));
}
TEST_F(FileUtilTest, CreateAndOpenTemporaryStreamTest) {
@@ -2588,7 +2624,7 @@ TEST_F(FileUtilTest, CreateAndOpenTemporaryStreamTest) {
// Close and delete.
for (i = 0; i < 3; ++i) {
fps[i].reset();
- EXPECT_TRUE(DeleteFile(names[i], false));
+ EXPECT_TRUE(DeleteFile(names[i]));
}
}
@@ -2666,7 +2702,7 @@ TEST_F(FileUtilTest, CreateNewTempDirectoryTest) {
FilePath temp_dir;
ASSERT_TRUE(CreateNewTempDirectory(FilePath::StringType(), &temp_dir));
EXPECT_TRUE(PathExists(temp_dir));
- EXPECT_TRUE(DeleteFile(temp_dir, false));
+ EXPECT_TRUE(DeleteFile(temp_dir));
}
TEST_F(FileUtilTest, CreateNewTemporaryDirInDirTest) {
@@ -2676,7 +2712,7 @@ TEST_F(FileUtilTest, CreateNewTemporaryDirInDirTest) {
&new_dir));
EXPECT_TRUE(PathExists(new_dir));
EXPECT_TRUE(temp_dir_.GetPath().IsParent(new_dir));
- EXPECT_TRUE(DeleteFile(new_dir, false));
+ EXPECT_TRUE(DeleteFile(new_dir));
}
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
@@ -2836,7 +2872,7 @@ TEST_F(FileUtilTest, DetectDirectoryTest) {
CreateTextFile(test_path, L"test file");
EXPECT_TRUE(PathExists(test_path));
EXPECT_FALSE(DirectoryExists(test_path));
- EXPECT_TRUE(DeleteFile(test_path, false));
+ EXPECT_TRUE(DeleteFile(test_path));
EXPECT_TRUE(DeleteFileRecursively(test_root));
}
@@ -3105,7 +3141,7 @@ TEST_F(FileUtilTest, ReadFileToString) {
EXPECT_EQ(0u, data.length());
// Delete test file.
- EXPECT_TRUE(DeleteFile(file_path, false));
+ EXPECT_TRUE(DeleteFile(file_path));
data = "temp";
EXPECT_FALSE(ReadFileToString(file_path, &data));
@@ -4153,7 +4189,7 @@ TEST(FileUtilMultiThreadedTest, MAYBE_MultiThreadedTempFiles) {
EXPECT_EQ(content, output_file_contents);
- DeleteFile(output_filename, false);
+ DeleteFile(output_filename);
});
// Post tasks to each thread in a round-robin fashion to ensure as much
diff --git a/chromium/base/files/file_util_win.cc b/chromium/base/files/file_util_win.cc
index e0ac2598455..4399e1ae3bb 100644
--- a/chromium/base/files/file_util_win.cc
+++ b/chromium/base/files/file_util_win.cc
@@ -382,14 +382,18 @@ FilePath MakeAbsoluteFilePath(const FilePath& input) {
return FilePath(file_path);
}
-bool DeleteFile(const FilePath& path, bool recursive) {
- return DeleteFileAndRecordMetrics(path, recursive);
+bool DeleteFile(const FilePath& path) {
+ return DeleteFileAndRecordMetrics(path, /*recursive=*/false);
}
bool DeleteFileRecursively(const FilePath& path) {
return DeleteFileAndRecordMetrics(path, /*recursive=*/true);
}
+bool DeleteFile(const FilePath& path, bool recursive) {
+ return DeleteFileAndRecordMetrics(path, recursive);
+}
+
bool DeleteFileAfterReboot(const FilePath& path) {
ScopedBlockingCall scoped_blocking_call(FROM_HERE, BlockingType::MAY_BLOCK);
diff --git a/chromium/base/files/important_file_writer.cc b/chromium/base/files/important_file_writer.cc
index 79921df5c1f..c956fd351a1 100644
--- a/chromium/base/files/important_file_writer.cc
+++ b/chromium/base/files/important_file_writer.cc
@@ -30,6 +30,7 @@
#include "base/strings/string_util.h"
#include "base/task_runner.h"
#include "base/task_runner_util.h"
+#include "base/threading/platform_thread.h"
#include "base/threading/sequenced_task_runner_handle.h"
#include "base/threading/thread.h"
#include "base/time/time.h"
@@ -258,20 +259,32 @@ bool ImportantFileWriter::WriteFileAtomicallyImpl(const FilePath& path,
// The file must be closed for ReplaceFile to do its job, which opens up a
// race with other software that may open the temp file (e.g., an A/V scanner
- // doing its job without oplocks). Close as late as possible to improve the
- // chances that the other software will lose the race.
+ // doing its job without oplocks). Boost a background thread's priority on
+ // Windows and close as late as possible to improve the chances that the other
+ // software will lose the race.
+#if defined(OS_WIN)
+ const auto previous_priority = PlatformThread::GetCurrentThreadPriority();
+ const bool reset_priority = previous_priority <= ThreadPriority::NORMAL;
+ if (reset_priority)
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
+#endif // defined(OS_WIN)
tmp_file.Close();
- if (!ReplaceFile(tmp_file_path, path, &replace_file_error)) {
+ const bool result = ReplaceFile(tmp_file_path, path, &replace_file_error);
+#if defined(OS_WIN)
+ if (reset_priority)
+ PlatformThread::SetCurrentThreadPriority(previous_priority);
+#endif // defined(OS_WIN)
+
+ if (!result) {
UmaHistogramExactLinearWithSuffix("ImportantFile.FileRenameError",
histogram_suffix, -replace_file_error,
-File::FILE_ERROR_MAX);
LogFailure(path, histogram_suffix, FAILED_RENAMING,
"could not rename temporary file");
DeleteTmpFileWithRetry(File(), tmp_file_path, histogram_suffix);
- return false;
}
- return true;
+ return result;
}
ImportantFileWriter::ImportantFileWriter(
diff --git a/chromium/base/files/important_file_writer_cleaner_unittest.cc b/chromium/base/files/important_file_writer_cleaner_unittest.cc
index 6b0d6a5e746..d8121f44759 100644
--- a/chromium/base/files/important_file_writer_cleaner_unittest.cc
+++ b/chromium/base/files/important_file_writer_cleaner_unittest.cc
@@ -4,11 +4,11 @@
#include "base/files/important_file_writer_cleaner.h"
+#include "base/check.h"
#include "base/files/file.h"
#include "base/files/file_path.h"
#include "base/files/file_util.h"
#include "base/files/scoped_temp_dir.h"
-#include "base/logging.h"
#include "base/optional.h"
#include "base/process/process.h"
#include "base/strings/stringprintf.h"
diff --git a/chromium/base/files/memory_mapped_file.cc b/chromium/base/files/memory_mapped_file.cc
index 6c8a0d9d581..30aefa1e639 100644
--- a/chromium/base/files/memory_mapped_file.cc
+++ b/chromium/base/files/memory_mapped_file.cc
@@ -8,6 +8,7 @@
#include "base/files/file_path.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/numerics/safe_math.h"
#include "base/system/sys_info.h"
#include "build/build_config.h"
diff --git a/chromium/base/files/memory_mapped_file_win.cc b/chromium/base/files/memory_mapped_file_win.cc
index cb43a879769..d5fece0e510 100644
--- a/chromium/base/files/memory_mapped_file_win.cc
+++ b/chromium/base/files/memory_mapped_file_win.cc
@@ -10,6 +10,7 @@
#include <limits>
#include "base/files/file_path.h"
+#include "base/logging.h"
#include "base/strings/string16.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/win/pe_image.h"
diff --git a/chromium/base/files/scoped_file.h b/chromium/base/files/scoped_file.h
index 78fee9b7944..ee49fd7e655 100644
--- a/chromium/base/files/scoped_file.h
+++ b/chromium/base/files/scoped_file.h
@@ -10,7 +10,6 @@
#include <memory>
#include "base/base_export.h"
-#include "base/logging.h"
#include "base/scoped_generic.h"
#include "build/build_config.h"
diff --git a/chromium/base/fuchsia/default_context.h b/chromium/base/fuchsia/default_context.h
index 4484f10f910..d45b68cc82b 100644
--- a/chromium/base/fuchsia/default_context.h
+++ b/chromium/base/fuchsia/default_context.h
@@ -16,8 +16,12 @@ class ComponentContext;
namespace base {
namespace fuchsia {
+
+// TODO(https://crbug.com/1090364): Remove this file when external dependencies
+// have been migrated to process_context.
// Returns default sys::ComponentContext for the current process.
BASE_EXPORT sys::ComponentContext* ComponentContextForCurrentProcess();
+
} // namespace fuchsia
// Replaces the default sys::ComponentContext for the current process, and
diff --git a/chromium/base/fuchsia/intl_profile_watcher.cc b/chromium/base/fuchsia/intl_profile_watcher.cc
index 7b16517f339..27247f87a2a 100644
--- a/chromium/base/fuchsia/intl_profile_watcher.cc
+++ b/chromium/base/fuchsia/intl_profile_watcher.cc
@@ -9,8 +9,8 @@
#include <string>
#include <vector>
-#include "base/fuchsia/default_context.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
#include "base/strings/string_piece.h"
using ::fuchsia::intl::Profile;
@@ -19,7 +19,7 @@ namespace base {
namespace fuchsia {
IntlProfileWatcher::IntlProfileWatcher(ProfileChangeCallback on_profile_changed)
- : IntlProfileWatcher(ComponentContextForCurrentProcess()
+ : IntlProfileWatcher(ComponentContextForProcess()
->svc()
->Connect<::fuchsia::intl::PropertyProvider>(),
on_profile_changed) {}
@@ -66,7 +66,7 @@ std::string IntlProfileWatcher::GetPrimaryTimeZoneIdFromProfile(
// static
std::string IntlProfileWatcher::GetPrimaryTimeZoneIdForIcuInitialization() {
::fuchsia::intl::PropertyProviderSyncPtr provider;
- ComponentContextForCurrentProcess()->svc()->Connect(provider.NewRequest());
+ ComponentContextForProcess()->svc()->Connect(provider.NewRequest());
return GetPrimaryTimeZoneIdFromPropertyProvider(std::move(provider));
}
diff --git a/chromium/base/fuchsia/process_context.cc b/chromium/base/fuchsia/process_context.cc
new file mode 100644
index 00000000000..02dfa43be75
--- /dev/null
+++ b/chromium/base/fuchsia/process_context.cc
@@ -0,0 +1,41 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/process_context.h"
+
+#include <lib/sys/cpp/component_context.h>
+#include <lib/sys/inspect/cpp/component.h>
+#include <utility>
+
+#include "base/fuchsia/process_context.h"
+#include "base/no_destructor.h"
+
+namespace base {
+
+namespace {
+std::unique_ptr<sys::ComponentContext>* ProcessComponentContextPtr() {
+ static base::NoDestructor<std::unique_ptr<sys::ComponentContext>> value(
+ std::make_unique<sys::ComponentContext>(
+ sys::ServiceDirectory::CreateFromNamespace()));
+ return value.get();
+}
+} // namespace
+
+sys::ComponentInspector* ComponentInspectorForProcess() {
+ static base::NoDestructor<sys::ComponentInspector> value(
+ ComponentContextForProcess());
+ return value.get();
+}
+
+sys::ComponentContext* ComponentContextForProcess() {
+ return ProcessComponentContextPtr()->get();
+}
+
+std::unique_ptr<sys::ComponentContext> ReplaceComponentContextForProcessForTest(
+ std::unique_ptr<sys::ComponentContext> context) {
+ std::swap(*ProcessComponentContextPtr(), context);
+ return context;
+}
+
+} // namespace base
diff --git a/chromium/base/fuchsia/process_context.h b/chromium/base/fuchsia/process_context.h
new file mode 100644
index 00000000000..7808eacd08d
--- /dev/null
+++ b/chromium/base/fuchsia/process_context.h
@@ -0,0 +1,35 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_PROCESS_CONTEXT_H_
+#define BASE_FUCHSIA_PROCESS_CONTEXT_H_
+
+#include <memory>
+
+#include "base/base_export.h"
+
+namespace sys {
+class ComponentContext;
+class ComponentInspector;
+} // namespace sys
+
+namespace base {
+
+// Returns sys::ComponentInspector for the current process.
+BASE_EXPORT sys::ComponentInspector* ComponentInspectorForProcess();
+
+// Returns default sys::ComponentContext for the current process.
+BASE_EXPORT sys::ComponentContext* ComponentContextForProcess();
+
+// Replaces the default sys::ComponentContext for the current process, and
+// returns the previously-active one.
+// Use the base::TestComponentContextForProcess rather than calling this
+// directly.
+BASE_EXPORT std::unique_ptr<sys::ComponentContext>
+ReplaceComponentContextForProcessForTest(
+ std::unique_ptr<sys::ComponentContext> context);
+
+} // namespace base
+
+#endif // BASE_FUCHSIA_PROCESS_CONTEXT_H_
diff --git a/chromium/base/fuchsia/scoped_service_binding.h b/chromium/base/fuchsia/scoped_service_binding.h
index 568a1ffe56b..9da1218510b 100644
--- a/chromium/base/fuchsia/scoped_service_binding.h
+++ b/chromium/base/fuchsia/scoped_service_binding.h
@@ -7,10 +7,13 @@
#include <lib/fidl/cpp/binding.h>
#include <lib/fidl/cpp/binding_set.h>
+#include <lib/fidl/cpp/interface_request.h>
#include <lib/zx/channel.h>
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/fuchsia/scoped_service_publisher.h"
+#include "base/optional.h"
namespace sys {
class OutgoingDirectory;
@@ -23,128 +26,95 @@ class PseudoDir;
namespace base {
namespace fuchsia {
-namespace internal {
-
-class BASE_EXPORT ScopedServiceBindingBase {
- public:
- explicit ScopedServiceBindingBase(sys::OutgoingDirectory* outgoing_directory);
- explicit ScopedServiceBindingBase(vfs::PseudoDir* pseudo_dir);
-
- ~ScopedServiceBindingBase();
-
- protected:
- // Same type as vfs::Service::Connector, so the value can be passed directly
- // to vfs::Service.
- using Connector =
- fit::function<void(zx::channel channel, async_dispatcher_t* dispatcher)>;
-
- void RegisterService(const char* service_name, Connector connector);
- void UnregisterService(const char* service_name);
-
- private:
- vfs::PseudoDir* const pseudo_dir_ = nullptr;
-};
-
-} // namespace internal
-
template <typename Interface>
-class ScopedServiceBinding : public internal::ScopedServiceBindingBase {
+class BASE_EXPORT ScopedServiceBinding {
public:
// Published a public service in the specified |outgoing_directory|.
// |outgoing_directory| and |impl| must outlive the binding.
ScopedServiceBinding(sys::OutgoingDirectory* outgoing_directory,
Interface* impl)
- : ScopedServiceBindingBase(outgoing_directory), impl_(impl) {
- RegisterService(Interface::Name_,
- fit::bind_member(this, &ScopedServiceBinding::BindClient));
- }
+ : publisher_(outgoing_directory, bindings_.GetHandler(impl)) {}
// Publishes a service in the specified |pseudo_dir|. |pseudo_dir| and |impl|
// must outlive the binding.
ScopedServiceBinding(vfs::PseudoDir* pseudo_dir, Interface* impl)
- : ScopedServiceBindingBase(pseudo_dir), impl_(impl) {
- RegisterService(Interface::Name_,
- fit::bind_member(this, &ScopedServiceBinding::BindClient));
- }
+ : publisher_(pseudo_dir, bindings_.GetHandler(impl)) {}
- ~ScopedServiceBinding() { UnregisterService(Interface::Name_); }
+ ~ScopedServiceBinding() = default;
- void SetOnLastClientCallback(base::OnceClosure on_last_client_callback) {
- on_last_client_callback_ = std::move(on_last_client_callback);
+ // |on_last_client_callback| will be called every time the number of connected
+ // clients drops to 0.
+ void SetOnLastClientCallback(base::RepeatingClosure on_last_client_callback) {
bindings_.set_empty_set_handler(
- fit::bind_member(this, &ScopedServiceBinding::OnBindingSetEmpty));
+ [callback = std::move(on_last_client_callback)] { callback.Run(); });
}
bool has_clients() const { return bindings_.size() != 0; }
private:
- void BindClient(zx::channel channel, async_dispatcher_t* dispatcher) {
- bindings_.AddBinding(impl_,
- fidl::InterfaceRequest<Interface>(std::move(channel)),
- dispatcher);
- }
-
- void OnBindingSetEmpty() {
- bindings_.set_empty_set_handler(nullptr);
- std::move(on_last_client_callback_).Run();
- }
-
- sys::OutgoingDirectory* const directory_ = nullptr;
- vfs::PseudoDir* const pseudo_dir_ = nullptr;
- Interface* const impl_;
fidl::BindingSet<Interface> bindings_;
- base::OnceClosure on_last_client_callback_;
+ ScopedServicePublisher<Interface> publisher_;
DISALLOW_COPY_AND_ASSIGN(ScopedServiceBinding);
};
// Scoped service binding which allows only a single client to be connected
// at any time. By default a new connection will disconnect an existing client.
-enum class ScopedServiceBindingPolicy { kPreferNew, kPreferExisting };
+enum class ScopedServiceBindingPolicy {
+ kPreferNew,
+ kPreferExisting,
+ kConnectOnce
+};
template <typename Interface,
ScopedServiceBindingPolicy Policy =
ScopedServiceBindingPolicy::kPreferNew>
-class ScopedSingleClientServiceBinding
- : public internal::ScopedServiceBindingBase {
+class BASE_EXPORT ScopedSingleClientServiceBinding {
public:
// |outgoing_directory| and |impl| must outlive the binding.
ScopedSingleClientServiceBinding(sys::OutgoingDirectory* outgoing_directory,
Interface* impl)
- : ScopedServiceBindingBase(outgoing_directory), binding_(impl) {
- RegisterService(
- Interface::Name_,
+ : binding_(impl) {
+ publisher_.emplace(
+ outgoing_directory,
fit::bind_member(this, &ScopedSingleClientServiceBinding::BindClient));
+ binding_.set_error_handler(fit::bind_member(
+ this, &ScopedSingleClientServiceBinding::OnBindingEmpty));
}
- ~ScopedSingleClientServiceBinding() { UnregisterService(Interface::Name_); }
+ ~ScopedSingleClientServiceBinding() = default;
typename Interface::EventSender_& events() { return binding_.events(); }
+ // |on_last_client_callback| will be called the first time a client
+ // disconnects. It is still possible for a client to connect after that point
+ // if Policy is kPreferNew of kPreferExisting.
void SetOnLastClientCallback(base::OnceClosure on_last_client_callback) {
on_last_client_callback_ = std::move(on_last_client_callback);
- binding_.set_error_handler(fit::bind_member(
- this, &ScopedSingleClientServiceBinding::OnBindingEmpty));
}
bool has_clients() const { return binding_.is_bound(); }
private:
- void BindClient(zx::channel channel, async_dispatcher_t* dispatcher) {
+ void BindClient(fidl::InterfaceRequest<Interface> request) {
if (Policy == ScopedServiceBindingPolicy::kPreferExisting &&
binding_.is_bound()) {
return;
}
- binding_.Bind(fidl::InterfaceRequest<Interface>(std::move(channel)),
- dispatcher);
+ binding_.Bind(std::move(request));
+ if (Policy == ScopedServiceBindingPolicy::kConnectOnce) {
+ publisher_.reset();
+ }
}
void OnBindingEmpty(zx_status_t status) {
- binding_.set_error_handler(nullptr);
- std::move(on_last_client_callback_).Run();
+ if (on_last_client_callback_) {
+ std::move(on_last_client_callback_).Run();
+ }
}
fidl::Binding<Interface> binding_;
+ base::Optional<ScopedServicePublisher<Interface>> publisher_;
base::OnceClosure on_last_client_callback_;
DISALLOW_COPY_AND_ASSIGN(ScopedSingleClientServiceBinding);
diff --git a/chromium/base/fuchsia/scoped_service_binding_unittest.cc b/chromium/base/fuchsia/scoped_service_binding_unittest.cc
index 4368055cad4..a01bae3ea60 100644
--- a/chromium/base/fuchsia/scoped_service_binding_unittest.cc
+++ b/chromium/base/fuchsia/scoped_service_binding_unittest.cc
@@ -122,5 +122,73 @@ TEST_F(ScopedServiceBindingTest, SingleBindingSetOnLastClientCallback) {
run_loop.Run();
}
+// Test the kConnectOnce option for ScopedSingleClientServiceBinding properly
+// stops publishing the service after a first disconnect.
+TEST_F(ScopedServiceBindingTest, ConnectOnce_OnlyFirstConnectionSucceeds) {
+ // Teardown the default multi-client binding and create a connect-once one.
+ service_binding_ = nullptr;
+ ScopedSingleClientServiceBinding<testfidl::TestInterface,
+ ScopedServiceBindingPolicy::kConnectOnce>
+ binding(outgoing_directory_.get(), &test_service_);
+
+ // Connect the first client, and verify that it is functional.
+ auto existing_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&existing_client, ZX_OK);
+
+ // Connect the second client, then verify that it gets closed and the existing
+ // one remains functional.
+ auto new_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(new_client);
+ VerifyTestInterface(&existing_client, ZX_OK);
+
+ // Disconnect the first client.
+ existing_client.Unbind().TakeChannel().reset();
+ RunLoop().RunUntilIdle();
+
+ // Re-connect the second client, then verify that it gets closed.
+ new_client = public_service_directory_->Connect<testfidl::TestInterface>();
+ RunLoop().RunUntilIdle();
+ EXPECT_FALSE(new_client);
+}
+
+class MultiUseBindingTest : public ScopedServiceBindingTest {
+ public:
+ MultiUseBindingTest() {
+ service_binding_->SetOnLastClientCallback(
+ BindRepeating(&MultiUseBindingTest::OnLastClient, Unretained(this)));
+ }
+ ~MultiUseBindingTest() override = default;
+
+ protected:
+ void OnLastClient() { disconnect_count_++; }
+
+ int disconnect_count_ = 0;
+};
+
+// Test the last client callback is called every time the number of active
+// clients reaches 0.
+TEST_F(MultiUseBindingTest, MultipleLastClientCallback) {
+ // Connect a client, verify it is functional.
+ auto stub = public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&stub, ZX_OK);
+
+ // Disconnect the client, the callback should have been called once.
+ stub.Unbind().TakeChannel().reset();
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(disconnect_count_, 1);
+
+ // Re-connect the client, verify it is functional.
+ stub = public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&stub, ZX_OK);
+
+ // Disconnect the client, the callback should have been called a second time.
+ stub.Unbind().TakeChannel().reset();
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(disconnect_count_, 2);
+}
+
} // namespace fuchsia
} // namespace base
diff --git a/chromium/base/fuchsia/scoped_service_publisher.h b/chromium/base/fuchsia/scoped_service_publisher.h
new file mode 100644
index 00000000000..60eb1cf404f
--- /dev/null
+++ b/chromium/base/fuchsia/scoped_service_publisher.h
@@ -0,0 +1,51 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FUCHSIA_SCOPED_SERVICE_PUBLISHER_H_
+#define BASE_FUCHSIA_SCOPED_SERVICE_PUBLISHER_H_
+
+#include <lib/async/dispatcher.h>
+#include <lib/fidl/cpp/interface_request.h>
+#include <lib/sys/cpp/outgoing_directory.h>
+#include <lib/vfs/cpp/pseudo_dir.h>
+#include <lib/vfs/cpp/service.h>
+#include <lib/zx/channel.h>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+
+namespace base {
+namespace fuchsia {
+
+template <typename Interface>
+class BASE_EXPORT ScopedServicePublisher {
+ public:
+ // Publishes a public service in the specified |outgoing_directory|.
+ // |outgoing_directory| and |handler| must outlive the binding.
+ ScopedServicePublisher(sys::OutgoingDirectory* outgoing_directory,
+ fidl::InterfaceRequestHandler<Interface> handler)
+ : ScopedServicePublisher(outgoing_directory->GetOrCreateDirectory("svc"),
+ std::move(handler)) {}
+
+ // Publishes a service in the specified |pseudo_dir|. |pseudo_dir| and
+ // |handler| must outlive the binding.
+ ScopedServicePublisher(vfs::PseudoDir* pseudo_dir,
+ fidl::InterfaceRequestHandler<Interface> handler)
+ : pseudo_dir_(pseudo_dir) {
+ pseudo_dir_->AddEntry(Interface::Name_,
+ std::make_unique<vfs::Service>(std::move(handler)));
+ }
+
+ ~ScopedServicePublisher() { pseudo_dir_->RemoveEntry(Interface::Name_); }
+
+ private:
+ vfs::PseudoDir* const pseudo_dir_ = nullptr;
+
+ DISALLOW_COPY_AND_ASSIGN(ScopedServicePublisher);
+};
+
+} // namespace fuchsia
+} // namespace base
+
+#endif // BASE_FUCHSIA_SCOPED_SERVICE_PUBLISHER_H_
diff --git a/chromium/base/fuchsia/scoped_service_publisher_unittest.cc b/chromium/base/fuchsia/scoped_service_publisher_unittest.cc
new file mode 100644
index 00000000000..2d9f5e41e86
--- /dev/null
+++ b/chromium/base/fuchsia/scoped_service_publisher_unittest.cc
@@ -0,0 +1,46 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/fuchsia/scoped_service_publisher.h"
+
+#include <lib/fidl/cpp/binding_set.h>
+
+#include "base/fuchsia/service_directory_test_base.h"
+#include "base/run_loop.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace fuchsia {
+
+class ScopedServicePublisherTest : public ServiceDirectoryTestBase {};
+
+TEST_F(ScopedServicePublisherTest, ConstructorPublishesService) {
+ // Remove the default service binding.
+ service_binding_.reset();
+
+ // Create bindings and register using a publisher instance.
+ fidl::BindingSet<testfidl::TestInterface> bindings;
+ ScopedServicePublisher<testfidl::TestInterface> publisher(
+ outgoing_directory_.get(), bindings.GetHandler(&test_service_));
+ auto client = public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&client, ZX_OK);
+}
+
+TEST_F(ScopedServicePublisherTest, DestructorRemovesService) {
+ // Remove the default service binding.
+ service_binding_.reset();
+
+ fidl::BindingSet<testfidl::TestInterface> bindings;
+ {
+ ScopedServicePublisher<testfidl::TestInterface> publisher(
+ outgoing_directory_.get(), bindings.GetHandler(&test_service_));
+ }
+ // Once the publisher leaves scope, the service shouldn't be available.
+ auto new_client =
+ public_service_directory_->Connect<testfidl::TestInterface>();
+ VerifyTestInterface(&new_client, ZX_ERR_PEER_CLOSED);
+}
+
+} // namespace fuchsia
+} // namespace base
diff --git a/chromium/base/fuchsia/service_provider_impl.h b/chromium/base/fuchsia/service_provider_impl.h
index f701684982c..a997a56fa65 100644
--- a/chromium/base/fuchsia/service_provider_impl.h
+++ b/chromium/base/fuchsia/service_provider_impl.h
@@ -15,7 +15,6 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/fuchsia/default_context.h"
#include "base/macros.h"
namespace sys {
diff --git a/chromium/base/fuchsia/test_component_context_for_process.cc b/chromium/base/fuchsia/test_component_context_for_process.cc
index 3f2f9ea2a39..f4ae62d12bc 100644
--- a/chromium/base/fuchsia/test_component_context_for_process.cc
+++ b/chromium/base/fuchsia/test_component_context_for_process.cc
@@ -10,9 +10,9 @@
#include <lib/sys/cpp/component_context.h>
#include "base/files/file_enumerator.h"
-#include "base/fuchsia/default_context.h"
#include "base/fuchsia/filtered_service_directory.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
#include "base/run_loop.h"
namespace base {
@@ -26,7 +26,7 @@ TestComponentContextForProcess::TestComponentContextForProcess(
// Set up |incoming_services_| to use the ServiceDirectory from the current
// default ComponentContext to fetch services from.
context_services_ = std::make_unique<fuchsia::FilteredServiceDirectory>(
- base::fuchsia::ComponentContextForCurrentProcess()->svc().get());
+ base::ComponentContextForProcess()->svc().get());
// Push all services from /svc to the test context if requested.
if (initial_state == InitialState::kCloneAll) {
@@ -47,7 +47,7 @@ TestComponentContextForProcess::TestComponentContextForProcess(
// directory of |context_services_| published by the test, and with a request
// for the process' root outgoing directory.
fidl::InterfaceHandle<::fuchsia::io::Directory> published_root_directory;
- old_context_ = ReplaceComponentContextForCurrentProcessForTest(
+ old_context_ = ReplaceComponentContextForProcessForTest(
std::make_unique<sys::ComponentContext>(
std::move(incoming_services),
published_root_directory.NewRequest().TakeChannel()));
@@ -64,7 +64,7 @@ TestComponentContextForProcess::TestComponentContextForProcess(
}
TestComponentContextForProcess::~TestComponentContextForProcess() {
- ReplaceComponentContextForCurrentProcessForTest(std::move(old_context_));
+ ReplaceComponentContextForProcessForTest(std::move(old_context_));
}
sys::OutgoingDirectory* TestComponentContextForProcess::additional_services() {
diff --git a/chromium/base/fuchsia/test_component_context_for_process.h b/chromium/base/fuchsia/test_component_context_for_process.h
index 0c3544780ba..28cd7107243 100644
--- a/chromium/base/fuchsia/test_component_context_for_process.h
+++ b/chromium/base/fuchsia/test_component_context_for_process.h
@@ -24,9 +24,8 @@ class FilteredServiceDirectory;
} // namespace fuchsia
// Replaces the process-global sys::ComponentContext (as returned by the
-// base::fuchsia::ComponentContextForCurrentProcess() function) with an empty
-// instance which the calling test can configure, and restores the original
-// when deleted.
+// base::ComponentContextForProcess() function) with an empty instance which the
+// calling test can configure, and restores the original when deleted.
//
// The test ComponentContext runs on the test main thread, which means that:
// - Tests using TestComponentContextForProcess must instantiate a
diff --git a/chromium/base/fuchsia/test_component_context_for_process_unittest.cc b/chromium/base/fuchsia/test_component_context_for_process_unittest.cc
index 9531a1cc637..e83dea9ee55 100644
--- a/chromium/base/fuchsia/test_component_context_for_process_unittest.cc
+++ b/chromium/base/fuchsia/test_component_context_for_process_unittest.cc
@@ -7,8 +7,8 @@
#include <fuchsia/intl/cpp/fidl.h>
#include <lib/sys/cpp/component_context.h>
-#include "base/fuchsia/default_context.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
#include "base/fuchsia/scoped_service_binding.h"
#include "base/fuchsia/testfidl/cpp/fidl.h"
#include "base/run_loop.h"
@@ -26,7 +26,7 @@ class TestComponentContextForProcessTest
bool HasTestInterface() {
return VerifyTestInterface(
- fuchsia::ComponentContextForCurrentProcess()
+ ComponentContextForProcess()
->svc()
->Connect<fuchsia::testfidl::TestInterface>());
}
@@ -85,8 +85,7 @@ TEST_F(TestComponentContextForProcessTest, InjectTestInterface) {
TEST_F(TestComponentContextForProcessTest, PublishTestInterface) {
// Publish TestInterface to the process' outgoing-directory.
base::fuchsia::ScopedServiceBinding<fuchsia::testfidl::TestInterface>
- service_binding(
- fuchsia::ComponentContextForCurrentProcess()->outgoing().get(), this);
+ service_binding(ComponentContextForProcess()->outgoing().get(), this);
// Attempt to use the TestInterface from the outgoing-directory.
EXPECT_TRUE(HasPublishedTestInterface());
@@ -100,7 +99,7 @@ TEST_F(TestComponentContextForProcessTest, ProvideSystemService) {
// Attempt to use the PropertyProvider via the process ComponentContext.
RunLoop wait_loop;
- auto property_provider = fuchsia::ComponentContextForCurrentProcess()
+ auto property_provider = ComponentContextForProcess()
->svc()
->Connect<::fuchsia::intl::PropertyProvider>();
property_provider.set_error_handler(
diff --git a/chromium/base/hash/hash.h b/chromium/base/hash/hash.h
index f55ae45b388..f94daa9f942 100644
--- a/chromium/base/hash/hash.h
+++ b/chromium/base/hash/hash.h
@@ -14,7 +14,6 @@
#include "base/base_export.h"
#include "base/containers/span.h"
-#include "base/logging.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
diff --git a/chromium/base/hash/md5_constexpr_internal.h b/chromium/base/hash/md5_constexpr_internal.h
index c7bbe5b4761..b705bc8ad05 100644
--- a/chromium/base/hash/md5_constexpr_internal.h
+++ b/chromium/base/hash/md5_constexpr_internal.h
@@ -9,8 +9,8 @@
#include <cstddef>
#include <cstdint>
+#include "base/check.h"
#include "base/hash/md5.h"
-#include "base/logging.h"
namespace base {
namespace internal {
diff --git a/chromium/base/i18n/time_formatting.cc b/chromium/base/i18n/time_formatting.cc
index 1a6c1389ba0..69259dadae2 100644
--- a/chromium/base/i18n/time_formatting.cc
+++ b/chromium/base/i18n/time_formatting.cc
@@ -10,6 +10,7 @@
#include "base/i18n/unicodestring.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/strings/utf_string_conversions.h"
#include "base/time/time.h"
#include "third_party/icu/source/common/unicode/utypes.h"
diff --git a/chromium/base/ios/ios_util.h b/chromium/base/ios/ios_util.h
index 0a512301abe..de44a75ad7e 100644
--- a/chromium/base/ios/ios_util.h
+++ b/chromium/base/ios/ios_util.h
@@ -25,6 +25,9 @@ BASE_EXPORT bool IsRunningOnIOS12OrLater();
// Returns whether the operating system is iOS 13 or later.
BASE_EXPORT bool IsRunningOnIOS13OrLater();
+// Returns whether the operating system is iOS 14 or later.
+BASE_EXPORT bool IsRunningOnIOS14OrLater();
+
// Returns whether the operating system is at the given version or later.
BASE_EXPORT bool IsRunningOnOrLater(int32_t major,
int32_t minor,
diff --git a/chromium/base/ios/ios_util.mm b/chromium/base/ios/ios_util.mm
index a17d19b6826..553b7354331 100644
--- a/chromium/base/ios/ios_util.mm
+++ b/chromium/base/ios/ios_util.mm
@@ -48,6 +48,11 @@ bool IsRunningOnIOS13OrLater() {
return is_running_on_or_later;
}
+bool IsRunningOnIOS14OrLater() {
+ static const bool is_running_on_or_later = IsRunningOnOrLater(14, 0, 0);
+ return is_running_on_or_later;
+}
+
bool IsRunningOnOrLater(int32_t major, int32_t minor, int32_t bug_fix) {
static const int32_t* current_version = OSVersionAsArray();
int32_t version[] = {major, minor, bug_fix};
diff --git a/chromium/base/ios/weak_nsobject.h b/chromium/base/ios/weak_nsobject.h
index 498cdee28c5..e07c0c9b20b 100644
--- a/chromium/base/ios/weak_nsobject.h
+++ b/chromium/base/ios/weak_nsobject.h
@@ -8,8 +8,8 @@
#import <Foundation/Foundation.h>
#import <objc/runtime.h>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/threading/thread_checker.h"
diff --git a/chromium/base/json/json_common.h b/chromium/base/json/json_common.h
index c0fd3eab82b..f98fb00455f 100644
--- a/chromium/base/json/json_common.h
+++ b/chromium/base/json/json_common.h
@@ -7,7 +7,7 @@
#include <stddef.h>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
namespace base {
diff --git a/chromium/base/json/json_file_value_serializer.h b/chromium/base/json/json_file_value_serializer.h
index a93950a6080..f38cec24cf7 100644
--- a/chromium/base/json/json_file_value_serializer.h
+++ b/chromium/base/json/json_file_value_serializer.h
@@ -67,7 +67,7 @@ class BASE_EXPORT JSONFileValueDeserializer : public base::ValueDeserializer {
// This enum is designed to safely overlap with JSONReader::JsonParseError.
enum JsonFileError {
JSON_NO_ERROR = 0,
- JSON_ACCESS_DENIED = 1000,
+ JSON_ACCESS_DENIED = kErrorCodeFirstMetadataError,
JSON_CANNOT_READ_FILE,
JSON_FILE_LOCKED,
JSON_NO_SUCH_FILE
diff --git a/chromium/base/json/json_parser.cc b/chromium/base/json/json_parser.cc
index e1e44c9781f..fcb479ceabd 100644
--- a/chromium/base/json/json_parser.cc
+++ b/chromium/base/json/json_parser.cc
@@ -9,7 +9,9 @@
#include <vector>
#include "base/check_op.h"
+#include "base/json/json_reader.h"
#include "base/macros.h"
+#include "base/notreached.h"
#include "base/numerics/safe_conversions.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_piece.h"
@@ -18,13 +20,47 @@
#include "base/strings/utf_string_conversion_utils.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/icu/icu_utf.h"
-#include "base/values.h"
namespace base {
namespace internal {
namespace {
+// Values 1000 and above are used by JSONFileValueSerializer::JsonFileError.
+static_assert(JSONParser::JSON_PARSE_ERROR_COUNT < 1000,
+ "JSONParser error out of bounds");
+
+std::string ErrorCodeToString(JSONParser::JsonParseError error_code) {
+ switch (error_code) {
+ case JSONParser::JSON_NO_ERROR:
+ return std::string();
+ case JSONParser::JSON_SYNTAX_ERROR:
+ return JSONParser::kSyntaxError;
+ case JSONParser::JSON_INVALID_ESCAPE:
+ return JSONParser::kInvalidEscape;
+ case JSONParser::JSON_UNEXPECTED_TOKEN:
+ return JSONParser::kUnexpectedToken;
+ case JSONParser::JSON_TRAILING_COMMA:
+ return JSONParser::kTrailingComma;
+ case JSONParser::JSON_TOO_MUCH_NESTING:
+ return JSONParser::kTooMuchNesting;
+ case JSONParser::JSON_UNEXPECTED_DATA_AFTER_ROOT:
+ return JSONParser::kUnexpectedDataAfterRoot;
+ case JSONParser::JSON_UNSUPPORTED_ENCODING:
+ return JSONParser::kUnsupportedEncoding;
+ case JSONParser::JSON_UNQUOTED_DICTIONARY_KEY:
+ return JSONParser::kUnquotedDictionaryKey;
+ case JSONParser::JSON_TOO_LARGE:
+ return JSONParser::kInputTooLarge;
+ case JSONParser::JSON_UNREPRESENTABLE_NUMBER:
+ return JSONParser::kUnrepresentableNumber;
+ case JSONParser::JSON_PARSE_ERROR_COUNT:
+ break;
+ }
+ NOTREACHED();
+ return std::string();
+}
+
const int32_t kExtendedASCIIStart = 0x80;
constexpr uint32_t kUnicodeReplacementPoint = 0xFFFD;
@@ -45,6 +81,21 @@ bool UnprefixedHexStringToInt(StringPiece input, int* output) {
// This is U+FFFD.
const char kUnicodeReplacementString[] = "\xEF\xBF\xBD";
+const char JSONParser::kSyntaxError[] = "Syntax error.";
+const char JSONParser::kInvalidEscape[] = "Invalid escape sequence.";
+const char JSONParser::kUnexpectedToken[] = "Unexpected token.";
+const char JSONParser::kTrailingComma[] = "Trailing comma not allowed.";
+const char JSONParser::kTooMuchNesting[] = "Too much nesting.";
+const char JSONParser::kUnexpectedDataAfterRoot[] =
+ "Unexpected data after root element.";
+const char JSONParser::kUnsupportedEncoding[] =
+ "Unsupported encoding. JSON must be UTF-8.";
+const char JSONParser::kUnquotedDictionaryKey[] =
+ "Dictionary keys must be quoted.";
+const char JSONParser::kInputTooLarge[] = "Input string is too large (>2GB).";
+const char JSONParser::kUnrepresentableNumber[] =
+ "Number cannot be represented.";
+
JSONParser::JSONParser(int options, size_t max_depth)
: options_(options),
max_depth_(max_depth),
@@ -52,7 +103,7 @@ JSONParser::JSONParser(int options, size_t max_depth)
stack_depth_(0),
line_number_(0),
index_last_line_(0),
- error_code_(JSONReader::JSON_NO_ERROR),
+ error_code_(JSON_NO_ERROR),
error_line_(0),
error_column_(0) {
CHECK_LE(max_depth, kAbsoluteMaxDepth);
@@ -77,14 +128,14 @@ Optional<Value> JSONParser::Parse(StringPiece input) {
line_number_ = 1;
index_last_line_ = -1;
- error_code_ = JSONReader::JSON_NO_ERROR;
+ error_code_ = JSON_NO_ERROR;
error_line_ = 0;
error_column_ = 0;
// ICU and ReadUnicodeCharacter() use int32_t for lengths, so ensure
// that the index_ will not overflow when parsing.
if (!base::IsValueInRangeForNumericType<int32_t>(input.length())) {
- ReportError(JSONReader::JSON_TOO_LARGE, -1);
+ ReportError(JSON_TOO_LARGE, -1);
return nullopt;
}
@@ -100,20 +151,20 @@ Optional<Value> JSONParser::Parse(StringPiece input) {
// Make sure the input stream is at an end.
if (GetNextToken() != T_END_OF_INPUT) {
- ReportError(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, 0);
+ ReportError(JSON_UNEXPECTED_DATA_AFTER_ROOT, 0);
return nullopt;
}
return root;
}
-JSONReader::JsonParseError JSONParser::error_code() const {
+JSONParser::JsonParseError JSONParser::error_code() const {
return error_code_;
}
std::string JSONParser::GetErrorMessage() const {
return FormatErrorMessage(error_line_, error_column_,
- JSONReader::ErrorCodeToString(error_code_));
+ ErrorCodeToString(error_code_));
}
int JSONParser::error_line() const {
@@ -323,20 +374,20 @@ Optional<Value> JSONParser::ParseToken(Token token) {
case T_NULL:
return ConsumeLiteral();
default:
- ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0);
+ ReportError(JSON_UNEXPECTED_TOKEN, 0);
return nullopt;
}
}
Optional<Value> JSONParser::ConsumeDictionary() {
if (ConsumeChar() != '{') {
- ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0);
+ ReportError(JSON_UNEXPECTED_TOKEN, 0);
return nullopt;
}
StackMarker depth_check(max_depth_, &stack_depth_);
if (depth_check.IsTooDeep()) {
- ReportError(JSONReader::JSON_TOO_MUCH_NESTING, -1);
+ ReportError(JSON_TOO_MUCH_NESTING, -1);
return nullopt;
}
@@ -345,7 +396,7 @@ Optional<Value> JSONParser::ConsumeDictionary() {
Token token = GetNextToken();
while (token != T_OBJECT_END) {
if (token != T_STRING) {
- ReportError(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, 0);
+ ReportError(JSON_UNQUOTED_DICTIONARY_KEY, 0);
return nullopt;
}
@@ -358,7 +409,7 @@ Optional<Value> JSONParser::ConsumeDictionary() {
// Read the separator.
token = GetNextToken();
if (token != T_OBJECT_PAIR_SEPARATOR) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
@@ -378,11 +429,11 @@ Optional<Value> JSONParser::ConsumeDictionary() {
ConsumeChar();
token = GetNextToken();
if (token == T_OBJECT_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
- ReportError(JSONReader::JSON_TRAILING_COMMA, 0);
+ ReportError(JSON_TRAILING_COMMA, 0);
return nullopt;
}
} else if (token != T_OBJECT_END) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
}
@@ -396,13 +447,13 @@ Optional<Value> JSONParser::ConsumeDictionary() {
Optional<Value> JSONParser::ConsumeList() {
if (ConsumeChar() != '[') {
- ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0);
+ ReportError(JSON_UNEXPECTED_TOKEN, 0);
return nullopt;
}
StackMarker depth_check(max_depth_, &stack_depth_);
if (depth_check.IsTooDeep()) {
- ReportError(JSONReader::JSON_TOO_MUCH_NESTING, -1);
+ ReportError(JSON_TOO_MUCH_NESTING, -1);
return nullopt;
}
@@ -423,11 +474,11 @@ Optional<Value> JSONParser::ConsumeList() {
ConsumeChar();
token = GetNextToken();
if (token == T_ARRAY_END && !(options_ & JSON_ALLOW_TRAILING_COMMAS)) {
- ReportError(JSONReader::JSON_TRAILING_COMMA, 0);
+ ReportError(JSON_TRAILING_COMMA, 0);
return nullopt;
}
} else if (token != T_ARRAY_END) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
}
@@ -446,7 +497,7 @@ Optional<Value> JSONParser::ConsumeString() {
bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
if (ConsumeChar() != '"') {
- ReportError(JSONReader::JSON_UNEXPECTED_TOKEN, 0);
+ ReportError(JSON_UNEXPECTED_TOKEN, 0);
return false;
}
@@ -462,7 +513,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
&next_char) ||
!IsValidCodepoint(next_char)) {
if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0) {
- ReportError(JSONReader::JSON_UNSUPPORTED_ENCODING, 0);
+ ReportError(JSON_UNSUPPORTED_ENCODING, 0);
return false;
}
ConsumeChar();
@@ -501,7 +552,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
// Read past the escape '\' and ensure there's a character following.
Optional<StringPiece> escape_sequence = ConsumeChars(2);
if (!escape_sequence) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ ReportError(JSON_INVALID_ESCAPE, -1);
return false;
}
@@ -512,14 +563,14 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
// are supported here for backwards-compatiblity with the old parser.
escape_sequence = ConsumeChars(2);
if (!escape_sequence) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -3);
+ ReportError(JSON_INVALID_ESCAPE, -3);
return false;
}
int hex_digit = 0;
if (!UnprefixedHexStringToInt(*escape_sequence, &hex_digit) ||
!IsValidCharacter(hex_digit)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -3);
+ ReportError(JSON_INVALID_ESCAPE, -3);
return false;
}
@@ -530,7 +581,7 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
// UTF units are of the form \uXXXX.
uint32_t code_point;
if (!DecodeUTF16(&code_point)) {
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ ReportError(JSON_INVALID_ESCAPE, -1);
return false;
}
string.Append(code_point);
@@ -565,13 +616,13 @@ bool JSONParser::ConsumeStringRaw(StringBuilder* out) {
break;
// All other escape squences are illegal.
default:
- ReportError(JSONReader::JSON_INVALID_ESCAPE, -1);
+ ReportError(JSON_INVALID_ESCAPE, -1);
return false;
}
}
}
- ReportError(JSONReader::JSON_SYNTAX_ERROR, -1);
+ ReportError(JSON_SYNTAX_ERROR, -1);
return false;
}
@@ -589,10 +640,13 @@ bool JSONParser::DecodeUTF16(uint32_t* out_code_point) {
// If this is a high surrogate, consume the next code unit to get the
// low surrogate.
if (CBU16_IS_SURROGATE(code_unit16_high)) {
- // Make sure this is the high surrogate. If not, it's an encoding
- // error.
- if (!CBU16_IS_SURROGATE_LEAD(code_unit16_high))
- return false;
+ // Make sure this is the high surrogate.
+ if (!CBU16_IS_SURROGATE_LEAD(code_unit16_high)) {
+ if ((options_ & JSON_REPLACE_INVALID_CHARACTERS) == 0)
+ return false;
+ *out_code_point = kUnicodeReplacementPoint;
+ return true;
+ }
// Make sure that the token has more characters to consume the
// lower surrogate.
@@ -641,7 +695,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
ConsumeChar();
if (!ReadInt(false)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
end_index = index_;
@@ -650,7 +704,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
if (PeekChar() == '.') {
ConsumeChar();
if (!ReadInt(true)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
end_index = index_;
@@ -664,7 +718,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
ConsumeChar();
}
if (!ReadInt(true)) {
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
end_index = index_;
@@ -683,7 +737,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
case T_END_OF_INPUT:
break;
default:
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
@@ -701,7 +755,7 @@ Optional<Value> JSONParser::ConsumeNumber() {
return Value(num_double);
}
- ReportError(JSONReader::JSON_UNREPRESENTABLE_NUMBER, 0);
+ ReportError(JSON_UNREPRESENTABLE_NUMBER, 0);
return nullopt;
}
@@ -736,7 +790,7 @@ Optional<Value> JSONParser::ConsumeLiteral() {
return Value(false);
if (ConsumeIfMatch("null"))
return Value(Value::Type::NONE);
- ReportError(JSONReader::JSON_SYNTAX_ERROR, 0);
+ ReportError(JSON_SYNTAX_ERROR, 0);
return nullopt;
}
@@ -748,8 +802,7 @@ bool JSONParser::ConsumeIfMatch(StringPiece match) {
return false;
}
-void JSONParser::ReportError(JSONReader::JsonParseError code,
- int column_adjust) {
+void JSONParser::ReportError(JsonParseError code, int column_adjust) {
error_code_ = code;
error_line_ = line_number_;
error_column_ = index_ - index_last_line_ + column_adjust;
diff --git a/chromium/base/json/json_parser.h b/chromium/base/json/json_parser.h
index 523062e6812..1481a8d2ba7 100644
--- a/chromium/base/json/json_parser.h
+++ b/chromium/base/json/json_parser.h
@@ -15,10 +15,10 @@
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/json/json_common.h"
-#include "base/json/json_reader.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/strings/string_piece.h"
+#include "base/values.h"
namespace base {
@@ -44,7 +44,35 @@ class JSONParserTest;
// of the next token.
class BASE_EXPORT JSONParser {
public:
- JSONParser(int options, size_t max_depth = kAbsoluteMaxDepth);
+ // Error codes during parsing.
+ enum JsonParseError {
+ JSON_NO_ERROR = base::ValueDeserializer::kErrorCodeNoError,
+ JSON_SYNTAX_ERROR = base::ValueDeserializer::kErrorCodeInvalidFormat,
+ JSON_INVALID_ESCAPE,
+ JSON_UNEXPECTED_TOKEN,
+ JSON_TRAILING_COMMA,
+ JSON_TOO_MUCH_NESTING,
+ JSON_UNEXPECTED_DATA_AFTER_ROOT,
+ JSON_UNSUPPORTED_ENCODING,
+ JSON_UNQUOTED_DICTIONARY_KEY,
+ JSON_TOO_LARGE,
+ JSON_UNREPRESENTABLE_NUMBER,
+ JSON_PARSE_ERROR_COUNT
+ };
+
+ // String versions of parse error codes.
+ static const char kSyntaxError[];
+ static const char kInvalidEscape[];
+ static const char kUnexpectedToken[];
+ static const char kTrailingComma[];
+ static const char kTooMuchNesting[];
+ static const char kUnexpectedDataAfterRoot[];
+ static const char kUnsupportedEncoding[];
+ static const char kUnquotedDictionaryKey[];
+ static const char kInputTooLarge[];
+ static const char kUnrepresentableNumber[];
+
+ explicit JSONParser(int options, size_t max_depth = kAbsoluteMaxDepth);
~JSONParser();
// Parses the input string according to the set options and returns the
@@ -54,7 +82,7 @@ class BASE_EXPORT JSONParser {
Optional<Value> Parse(StringPiece input);
// Returns the error code.
- JSONReader::JsonParseError error_code() const;
+ JsonParseError error_code() const;
// Returns the human-friendly error message.
std::string GetErrorMessage() const;
@@ -205,7 +233,7 @@ class BASE_EXPORT JSONParser {
// Sets the error information to |code| at the current column, based on
// |index_| and |index_last_line_|, with an optional positive/negative
// adjustment by |column_adjust|.
- void ReportError(JSONReader::JsonParseError code, int column_adjust);
+ void ReportError(JsonParseError code, int column_adjust);
// Given the line and column number of an error, formats one of the error
// message contants from json_reader.h for human display.
@@ -234,7 +262,7 @@ class BASE_EXPORT JSONParser {
int index_last_line_;
// Error information.
- JSONReader::JsonParseError error_code_;
+ JsonParseError error_code_;
int error_line_;
int error_column_;
diff --git a/chromium/base/json/json_parser_unittest.cc b/chromium/base/json/json_parser_unittest.cc
index c4926423433..9e89afe3f7d 100644
--- a/chromium/base/json/json_parser_unittest.cc
+++ b/chromium/base/json/json_parser_unittest.cc
@@ -210,76 +210,103 @@ TEST_F(JSONParserTest, ConsumeNumbers) {
}
TEST_F(JSONParserTest, ErrorMessages) {
- JSONReader::ValueWithError root =
- JSONReader::ReadAndReturnValueWithError("[42]", JSON_PARSE_RFC);
- EXPECT_TRUE(root.error_message.empty());
- EXPECT_EQ(0, root.error_code);
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[42]");
+ EXPECT_TRUE(value);
+ EXPECT_TRUE(parser.GetErrorMessage().empty());
+ EXPECT_EQ(0, parser.error_code());
+ }
// Test each of the error conditions
- root = JSONReader::ReadAndReturnValueWithError("{},{}", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(
- 1, 3, JSONReader::kUnexpectedDataAfterRoot),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_UNEXPECTED_DATA_AFTER_ROOT, root.error_code);
-
- std::string nested_json;
- for (int i = 0; i < 201; ++i) {
- nested_json.insert(nested_json.begin(), '[');
- nested_json.append(1, ']');
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("{},{}");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(
+ 1, 3, JSONParser::kUnexpectedDataAfterRoot),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_UNEXPECTED_DATA_AFTER_ROOT, parser.error_code());
+ }
+
+ {
+ std::string nested_json;
+ for (int i = 0; i < 201; ++i) {
+ nested_json.insert(nested_json.begin(), '[');
+ nested_json.append(1, ']');
+ }
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse(nested_json);
+ EXPECT_FALSE(value);
+ EXPECT_EQ(
+ JSONParser::FormatErrorMessage(1, 200, JSONParser::kTooMuchNesting),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_TOO_MUCH_NESTING, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[1,]");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 4, JSONParser::kTrailingComma),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_TRAILING_COMMA, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("{foo:\"bar\"}");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(
+ 1, 2, JSONParser::kUnquotedDictionaryKey),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_UNQUOTED_DICTIONARY_KEY, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("{\"foo\":\"bar\",}");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 14, JSONParser::kTrailingComma),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_TRAILING_COMMA, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[nu]");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2, JSONParser::kSyntaxError),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_SYNTAX_ERROR, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[\"xxx\\xq\"]");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONParser::kInvalidEscape),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_INVALID_ESCAPE, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[\"xxx\\uq\"]");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONParser::kInvalidEscape),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_INVALID_ESCAPE, parser.error_code());
+ }
+
+ {
+ JSONParser parser(JSON_PARSE_RFC);
+ Optional<Value> value = parser.Parse("[\"xxx\\q\"]");
+ EXPECT_FALSE(value);
+ EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONParser::kInvalidEscape),
+ parser.GetErrorMessage());
+ EXPECT_EQ(JSONParser::JSON_INVALID_ESCAPE, parser.error_code());
}
- root = JSONReader::ReadAndReturnValueWithError(nested_json, JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 200, JSONReader::kTooMuchNesting),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_TOO_MUCH_NESTING, root.error_code);
-
- root = JSONReader::ReadAndReturnValueWithError("[1,]", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 4, JSONReader::kTrailingComma),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_TRAILING_COMMA, root.error_code);
-
- root =
- JSONReader::ReadAndReturnValueWithError("{foo:\"bar\"}", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(
- JSONParser::FormatErrorMessage(1, 2, JSONReader::kUnquotedDictionaryKey),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_UNQUOTED_DICTIONARY_KEY, root.error_code);
-
- root = JSONReader::ReadAndReturnValueWithError("{\"foo\":\"bar\",}",
- JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 14, JSONReader::kTrailingComma),
- root.error_message);
-
- root = JSONReader::ReadAndReturnValueWithError("[nu]", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 2, JSONReader::kSyntaxError),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_SYNTAX_ERROR, root.error_code);
-
- root =
- JSONReader::ReadAndReturnValueWithError("[\"xxx\\xq\"]", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, root.error_code);
-
- root =
- JSONReader::ReadAndReturnValueWithError("[\"xxx\\uq\"]", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, root.error_code);
-
- root =
- JSONReader::ReadAndReturnValueWithError("[\"xxx\\q\"]", JSON_PARSE_RFC);
- EXPECT_FALSE(root.value);
- EXPECT_EQ(JSONParser::FormatErrorMessage(1, 7, JSONReader::kInvalidEscape),
- root.error_message);
- EXPECT_EQ(JSONReader::JSON_INVALID_ESCAPE, root.error_code);
}
} // namespace internal
diff --git a/chromium/base/json/json_perftest_decodebench.cc b/chromium/base/json/json_perftest_decodebench.cc
index e0f265dde5f..47a659623cb 100644
--- a/chromium/base/json/json_perftest_decodebench.cc
+++ b/chromium/base/json/json_perftest_decodebench.cc
@@ -24,6 +24,7 @@
#include "base/command_line.h"
#include "base/files/file_util.h"
#include "base/json/json_reader.h"
+#include "base/logging.h"
#include "base/time/time.h"
int main(int argc, char* argv[]) {
diff --git a/chromium/base/json/json_reader.cc b/chromium/base/json/json_reader.cc
index 53e3df9fdf3..9456cfde1aa 100644
--- a/chromium/base/json/json_reader.cc
+++ b/chromium/base/json/json_reader.cc
@@ -8,30 +8,10 @@
#include <vector>
#include "base/json/json_parser.h"
-#include "base/notreached.h"
#include "base/optional.h"
namespace base {
-// Values 1000 and above are used by JSONFileValueSerializer::JsonFileError.
-static_assert(JSONReader::JSON_PARSE_ERROR_COUNT < 1000,
- "JSONReader error out of bounds");
-
-const char JSONReader::kInvalidEscape[] = "Invalid escape sequence.";
-const char JSONReader::kSyntaxError[] = "Syntax error.";
-const char JSONReader::kUnexpectedToken[] = "Unexpected token.";
-const char JSONReader::kTrailingComma[] = "Trailing comma not allowed.";
-const char JSONReader::kTooMuchNesting[] = "Too much nesting.";
-const char JSONReader::kUnexpectedDataAfterRoot[] =
- "Unexpected data after root element.";
-const char JSONReader::kUnsupportedEncoding[] =
- "Unsupported encoding. JSON must be UTF-8.";
-const char JSONReader::kUnquotedDictionaryKey[] =
- "Dictionary keys must be quoted.";
-const char JSONReader::kInputTooLarge[] = "Input string is too large (>2GB).";
-const char JSONReader::kUnrepresentableNumber[] =
- "Number cannot be represented.";
-
JSONReader::ValueWithError::ValueWithError() = default;
JSONReader::ValueWithError::ValueWithError(ValueWithError&& other) = default;
@@ -41,11 +21,6 @@ JSONReader::ValueWithError::~ValueWithError() = default;
JSONReader::ValueWithError& JSONReader::ValueWithError::operator=(
ValueWithError&& other) = default;
-JSONReader::JSONReader(int options, size_t max_depth)
- : parser_(new internal::JSONParser(options, max_depth)) {}
-
-JSONReader::~JSONReader() = default;
-
// static
Optional<Value> JSONReader::Read(StringPiece json,
int options,
@@ -54,6 +29,7 @@ Optional<Value> JSONReader::Read(StringPiece json,
return parser.Parse(json);
}
+// static
std::unique_ptr<Value> JSONReader::ReadDeprecated(StringPiece json,
int options,
size_t max_depth) {
@@ -77,72 +53,4 @@ JSONReader::ValueWithError JSONReader::ReadAndReturnValueWithError(
return ret;
}
-// static
-std::unique_ptr<Value> JSONReader::ReadAndReturnErrorDeprecated(
- StringPiece json,
- int options,
- int* error_code_out,
- std::string* error_msg_out,
- int* error_line_out,
- int* error_column_out) {
- ValueWithError ret = ReadAndReturnValueWithError(json, options);
- if (ret.value)
- return Value::ToUniquePtrValue(std::move(*ret.value));
-
- if (error_code_out)
- *error_code_out = ret.error_code;
- if (error_msg_out)
- *error_msg_out = ret.error_message;
- if (error_line_out)
- *error_line_out = ret.error_line;
- if (error_column_out)
- *error_column_out = ret.error_column;
- return nullptr;
-}
-
-// static
-std::string JSONReader::ErrorCodeToString(JsonParseError error_code) {
- switch (error_code) {
- case JSON_NO_ERROR:
- return std::string();
- case JSON_INVALID_ESCAPE:
- return kInvalidEscape;
- case JSON_SYNTAX_ERROR:
- return kSyntaxError;
- case JSON_UNEXPECTED_TOKEN:
- return kUnexpectedToken;
- case JSON_TRAILING_COMMA:
- return kTrailingComma;
- case JSON_TOO_MUCH_NESTING:
- return kTooMuchNesting;
- case JSON_UNEXPECTED_DATA_AFTER_ROOT:
- return kUnexpectedDataAfterRoot;
- case JSON_UNSUPPORTED_ENCODING:
- return kUnsupportedEncoding;
- case JSON_UNQUOTED_DICTIONARY_KEY:
- return kUnquotedDictionaryKey;
- case JSON_TOO_LARGE:
- return kInputTooLarge;
- case JSON_UNREPRESENTABLE_NUMBER:
- return kUnrepresentableNumber;
- case JSON_PARSE_ERROR_COUNT:
- break;
- }
- NOTREACHED();
- return std::string();
-}
-
-Optional<Value> JSONReader::ReadToValue(StringPiece json) {
- return parser_->Parse(json);
-}
-
-std::unique_ptr<Value> JSONReader::ReadToValueDeprecated(StringPiece json) {
- Optional<Value> value = parser_->Parse(json);
- return value ? std::make_unique<Value>(std::move(*value)) : nullptr;
-}
-
-std::string JSONReader::GetErrorMessage() const {
- return parser_->GetErrorMessage();
-}
-
} // namespace base
diff --git a/chromium/base/json/json_reader.h b/chromium/base/json/json_reader.h
index 511fffe32e7..007f843d736 100644
--- a/chromium/base/json/json_reader.h
+++ b/chromium/base/json/json_reader.h
@@ -49,10 +49,6 @@
namespace base {
-namespace internal {
-class JSONParser;
-}
-
enum JSONParserOptions {
// Parses the input strictly according to RFC 8259, except for where noted
// above.
@@ -70,22 +66,6 @@ enum JSONParserOptions {
class BASE_EXPORT JSONReader {
public:
- // Error codes during parsing.
- enum JsonParseError {
- JSON_NO_ERROR = 0,
- JSON_INVALID_ESCAPE,
- JSON_SYNTAX_ERROR,
- JSON_UNEXPECTED_TOKEN,
- JSON_TRAILING_COMMA,
- JSON_TOO_MUCH_NESTING,
- JSON_UNEXPECTED_DATA_AFTER_ROOT,
- JSON_UNSUPPORTED_ENCODING,
- JSON_UNQUOTED_DICTIONARY_KEY,
- JSON_TOO_LARGE,
- JSON_UNREPRESENTABLE_NUMBER,
- JSON_PARSE_ERROR_COUNT
- };
-
struct BASE_EXPORT ValueWithError {
ValueWithError();
ValueWithError(ValueWithError&& other);
@@ -96,7 +76,7 @@ class BASE_EXPORT JSONReader {
// Contains default values if |value| exists, or the error status if |value|
// is base::nullopt.
- JsonParseError error_code = JSON_NO_ERROR;
+ int error_code = ValueDeserializer::kErrorCodeNoError;
std::string error_message;
int error_line = 0;
int error_column = 0;
@@ -104,24 +84,6 @@ class BASE_EXPORT JSONReader {
DISALLOW_COPY_AND_ASSIGN(ValueWithError);
};
- // String versions of parse error codes.
- static const char kInvalidEscape[];
- static const char kSyntaxError[];
- static const char kUnexpectedToken[];
- static const char kTrailingComma[];
- static const char kTooMuchNesting[];
- static const char kUnexpectedDataAfterRoot[];
- static const char kUnsupportedEncoding[];
- static const char kUnquotedDictionaryKey[];
- static const char kInputTooLarge[];
- static const char kUnrepresentableNumber[];
-
- // Constructs a reader.
- JSONReader(int options = JSON_PARSE_RFC,
- size_t max_depth = internal::kAbsoluteMaxDepth);
-
- ~JSONReader();
-
// Reads and parses |json|, returning a Value.
// If |json| is not a properly formed JSON string, returns base::nullopt.
static Optional<Value> Read(StringPiece json,
@@ -145,36 +107,9 @@ class BASE_EXPORT JSONReader {
StringPiece json,
int options = JSON_PARSE_RFC);
- // Deprecated. Use the ReadAndReturnValueWithError() method above.
- // Reads and parses |json| like Read(). |error_code_out| and |error_msg_out|
- // are optional. If specified and nullptr is returned, they will be populated
- // an error code and a formatted error message (including error location if
- // appropriate). Otherwise, they will be unmodified.
- static std::unique_ptr<Value> ReadAndReturnErrorDeprecated(
- StringPiece json,
- int options, // JSONParserOptions
- int* error_code_out,
- std::string* error_msg_out,
- int* error_line_out = nullptr,
- int* error_column_out = nullptr);
-
- // Converts a JSON parse error code into a human readable message.
- // Returns an empty string if error_code is JSON_NO_ERROR.
- static std::string ErrorCodeToString(JsonParseError error_code);
-
- // Non-static version of Read() above.
- Optional<Value> ReadToValue(StringPiece json);
-
- // Deprecated. Use the ReadToValue() method above.
- // Non-static version of Read() above.
- std::unique_ptr<Value> ReadToValueDeprecated(StringPiece json);
-
- // Converts error_code_ to a human-readable string, including line and column
- // numbers if appropriate.
- std::string GetErrorMessage() const;
-
- private:
- std::unique_ptr<internal::JSONParser> parser_;
+ // This class contains only static methods.
+ JSONReader() = delete;
+ DISALLOW_COPY_AND_ASSIGN(JSONReader);
};
} // namespace base
diff --git a/chromium/base/json/json_reader_fuzzer.cc b/chromium/base/json/json_reader_fuzzer.cc
index 68b4dc0231a..5f27ac23b17 100644
--- a/chromium/base/json/json_reader_fuzzer.cc
+++ b/chromium/base/json/json_reader_fuzzer.cc
@@ -24,7 +24,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
JSONReader::ValueWithError json_val =
JSONReader::ReadAndReturnValueWithError(input_string, options);
- CHECK((json_val.error_code == JSONReader::JSON_NO_ERROR) ==
+ CHECK((json_val.error_code == base::ValueDeserializer::kErrorCodeNoError) ==
json_val.value.has_value());
if (json_val.value) {
diff --git a/chromium/base/json/json_reader_unittest.cc b/chromium/base/json/json_reader_unittest.cc
index 33570ad4b48..519a8845ea5 100644
--- a/chromium/base/json/json_reader_unittest.cc
+++ b/chromium/base/json/json_reader_unittest.cc
@@ -189,7 +189,8 @@ TEST(JSONReaderTest, Doubles) {
auto value_with_error =
JSONReader::ReadAndReturnValueWithError("1e1000", JSON_PARSE_RFC);
ASSERT_FALSE(value_with_error.value);
- ASSERT_NE(value_with_error.error_code, JSONReader::JSON_NO_ERROR);
+ ASSERT_NE(base::ValueDeserializer::kErrorCodeNoError,
+ value_with_error.error_code);
}
TEST(JSONReaderTest, FractionalNumbers) {
@@ -647,10 +648,10 @@ TEST(JSONReaderTest, ReadFromFile) {
std::string input;
ASSERT_TRUE(ReadFileToString(path.AppendASCII("bom_feff.json"), &input));
- JSONReader reader;
- Optional<Value> root(reader.ReadToValue(input));
- ASSERT_TRUE(root) << reader.GetErrorMessage();
- EXPECT_TRUE(root->is_dict());
+ JSONReader::ValueWithError root =
+ JSONReader::ReadAndReturnValueWithError(input);
+ ASSERT_TRUE(root.value) << root.error_message;
+ EXPECT_TRUE(root.value->is_dict());
}
// Tests that the root of a JSON object can be deleted safely while its
@@ -736,19 +737,21 @@ TEST(JSONReaderTest, InvalidSanity) {
};
for (size_t i = 0; i < base::size(kInvalidJson); ++i) {
- JSONReader reader;
LOG(INFO) << "Sanity test " << i << ": <" << kInvalidJson[i] << ">";
- EXPECT_FALSE(reader.ReadToValue(kInvalidJson[i]));
- EXPECT_NE("", reader.GetErrorMessage());
+ JSONReader::ValueWithError root =
+ JSONReader::ReadAndReturnValueWithError(kInvalidJson[i]);
+ EXPECT_FALSE(root.value);
+ EXPECT_NE("", root.error_message);
}
}
TEST(JSONReaderTest, IllegalTrailingNull) {
const char json[] = {'"', 'n', 'u', 'l', 'l', '"', '\0'};
std::string json_string(json, sizeof(json));
- JSONReader reader;
- EXPECT_FALSE(reader.ReadToValue(json_string));
- EXPECT_NE("", reader.GetErrorMessage());
+ JSONReader::ValueWithError root =
+ JSONReader::ReadAndReturnValueWithError(json_string);
+ EXPECT_FALSE(root.value);
+ EXPECT_NE("", root.error_message);
}
TEST(JSONReaderTest, ASCIIControlCodes) {
@@ -833,10 +836,18 @@ TEST(JSONReaderTest, DecodeNegativeEscapeSequence) {
// Verifies invalid code points are replaced.
TEST(JSONReaderTest, ReplaceInvalidCharacters) {
- // U+D800 is a lone surrogate.
- const std::string invalid = "\"\xED\xA0\x80\"";
+ // U+D800 is a lone high surrogate.
+ const std::string invalid_high = "\"\xED\xA0\x80\"";
Optional<Value> value =
- JSONReader::Read(invalid, JSON_REPLACE_INVALID_CHARACTERS);
+ JSONReader::Read(invalid_high, JSON_REPLACE_INVALID_CHARACTERS);
+ ASSERT_TRUE(value);
+ ASSERT_TRUE(value->is_string());
+ // Expect three U+FFFD (one for each UTF-8 byte in the invalid code point).
+ EXPECT_EQ("\xEF\xBF\xBD\xEF\xBF\xBD\xEF\xBF\xBD", value->GetString());
+
+ // U+DFFF is a lone low surrogate.
+ const std::string invalid_low = "\"\xED\xBF\xBF\"";
+ value = JSONReader::Read(invalid_low, JSON_REPLACE_INVALID_CHARACTERS);
ASSERT_TRUE(value);
ASSERT_TRUE(value->is_string());
// Expect three U+FFFD (one for each UTF-8 byte in the invalid code point).
@@ -844,10 +855,17 @@ TEST(JSONReaderTest, ReplaceInvalidCharacters) {
}
TEST(JSONReaderTest, ReplaceInvalidUTF16EscapeSequence) {
- // U+D800 is a lone surrogate.
- const std::string invalid = "\"_\\uD800_\"";
+ // U+D800 is a lone high surrogate.
+ const std::string invalid_high = "\"_\\uD800_\"";
Optional<Value> value =
- JSONReader::Read(invalid, JSON_REPLACE_INVALID_CHARACTERS);
+ JSONReader::Read(invalid_high, JSON_REPLACE_INVALID_CHARACTERS);
+ ASSERT_TRUE(value);
+ ASSERT_TRUE(value->is_string());
+ EXPECT_EQ("_\xEF\xBF\xBD_", value->GetString());
+
+ // U+DFFF is a lone low surrogate.
+ const std::string invalid_low = "\"_\\uDFFF_\"";
+ value = JSONReader::Read(invalid_low, JSON_REPLACE_INVALID_CHARACTERS);
ASSERT_TRUE(value);
ASSERT_TRUE(value->is_string());
EXPECT_EQ("_\xEF\xBF\xBD_", value->GetString());
diff --git a/chromium/base/json/json_string_value_serializer.cc b/chromium/base/json/json_string_value_serializer.cc
index 0ec48ca57fe..d98a62e37f9 100644
--- a/chromium/base/json/json_string_value_serializer.cc
+++ b/chromium/base/json/json_string_value_serializer.cc
@@ -49,6 +49,14 @@ JSONStringValueDeserializer::~JSONStringValueDeserializer() = default;
std::unique_ptr<Value> JSONStringValueDeserializer::Deserialize(
int* error_code,
std::string* error_str) {
- return base::JSONReader::ReadAndReturnErrorDeprecated(json_string_, options_,
- error_code, error_str);
+ base::JSONReader::ValueWithError ret =
+ base::JSONReader::ReadAndReturnValueWithError(json_string_, options_);
+ if (ret.value)
+ return base::Value::ToUniquePtrValue(std::move(*ret.value));
+
+ if (error_code)
+ *error_code = ret.error_code;
+ if (error_str)
+ *error_str = std::move(ret.error_message);
+ return nullptr;
}
diff --git a/chromium/base/json/json_value_serializer_unittest.cc b/chromium/base/json/json_value_serializer_unittest.cc
index 5722cb0f0dd..3b102a12274 100644
--- a/chromium/base/json/json_value_serializer_unittest.cc
+++ b/chromium/base/json/json_value_serializer_unittest.cc
@@ -133,12 +133,15 @@ TEST(JSONValueDeserializerTest, ReadJSONWithTrailingCommasFromString) {
ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Repeat with commas allowed.
+ // Repeat with commas allowed. The Deserialize call shouldn't change the
+ // value of error_code. To test that, we first set it to a nonsense value
+ // (-789) and ASSERT_EQ that it remains that nonsense value.
+ error_code = -789;
JSONStringValueDeserializer str_deserializer2(kProperJSONWithCommas,
JSON_ALLOW_TRAILING_COMMAS);
value = str_deserializer2.Deserialize(&error_code, &error_message);
ASSERT_TRUE(value);
- ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+ ASSERT_EQ(-789, error_code);
// Verify if the same JSON is still there.
CheckJSONIsStillTheSame(*value);
}
@@ -184,12 +187,15 @@ TEST(JSONValueDeserializerTest, ReadJSONWithCommasFromFile) {
ASSERT_FALSE(value);
ASSERT_NE(0, error_code);
ASSERT_FALSE(error_message.empty());
- // Repeat with commas allowed.
+ // Repeat with commas allowed. The Deserialize call shouldn't change the
+ // value of error_code. To test that, we first set it to a nonsense value
+ // (-789) and ASSERT_EQ that it remains that nonsense value.
+ error_code = -789;
JSONFileValueDeserializer file_deserializer2(temp_file,
JSON_ALLOW_TRAILING_COMMAS);
value = file_deserializer2.Deserialize(&error_code, &error_message);
ASSERT_TRUE(value);
- ASSERT_EQ(JSONReader::JSON_TRAILING_COMMA, error_code);
+ ASSERT_EQ(-789, error_code);
// Verify if the same JSON is still there.
CheckJSONIsStillTheSame(*value);
}
diff --git a/chromium/base/lazy_instance.h b/chromium/base/lazy_instance.h
index 4449373ead2..94e91e80413 100644
--- a/chromium/base/lazy_instance.h
+++ b/chromium/base/lazy_instance.h
@@ -48,9 +48,9 @@
#include <new> // For placement new.
#include "base/atomicops.h"
+#include "base/check_op.h"
#include "base/debug/leak_annotations.h"
#include "base/lazy_instance_helpers.h"
-#include "base/logging.h"
#include "base/threading/thread_restrictions.h"
// LazyInstance uses its own struct initializer-list style static
diff --git a/chromium/base/lazy_instance_helpers.h b/chromium/base/lazy_instance_helpers.h
index 5a43d8b1f26..99b5e055027 100644
--- a/chromium/base/lazy_instance_helpers.h
+++ b/chromium/base/lazy_instance_helpers.h
@@ -7,7 +7,7 @@
#include "base/atomicops.h"
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check.h"
// Helper methods used by LazyInstance and a few other base APIs for thread-safe
// lazy construction.
diff --git a/chromium/base/lazy_instance_unittest.cc b/chromium/base/lazy_instance_unittest.cc
index d1012ef6984..ad1f0ebf44c 100644
--- a/chromium/base/lazy_instance_unittest.cc
+++ b/chromium/base/lazy_instance_unittest.cc
@@ -5,6 +5,7 @@
#include <stddef.h>
#include <memory>
+#include <utility>
#include <vector>
#include "base/at_exit.h"
@@ -13,6 +14,7 @@
#include "base/barrier_closure.h"
#include "base/bind.h"
#include "base/lazy_instance.h"
+#include "base/memory/aligned_memory.h"
#include "base/system/sys_info.h"
#include "base/threading/platform_thread.h"
#include "base/threading/simple_thread.h"
@@ -178,9 +180,6 @@ class AlignedData {
} // namespace
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
TEST(LazyInstanceTest, Alignment) {
using base::LazyInstance;
@@ -194,9 +193,9 @@ TEST(LazyInstanceTest, Alignment) {
static LazyInstance<AlignedData<4096>>::DestructorAtExit align4096 =
LAZY_INSTANCE_INITIALIZER;
- EXPECT_ALIGNED(align4.Pointer(), 4);
- EXPECT_ALIGNED(align32.Pointer(), 32);
- EXPECT_ALIGNED(align4096.Pointer(), 4096);
+ EXPECT_TRUE(base::IsAligned(align4.Pointer(), 4));
+ EXPECT_TRUE(base::IsAligned(align32.Pointer(), 32));
+ EXPECT_TRUE(base::IsAligned(align4096.Pointer(), 4096));
}
namespace {
diff --git a/chromium/base/logging.cc b/chromium/base/logging.cc
index fdd86299aa4..2da71c73819 100644
--- a/chromium/base/logging.cc
+++ b/chromium/base/logging.cc
@@ -8,16 +8,20 @@
// build time. Try not to raise this limit unless absolutely necessary. See
// https://chromium.googlesource.com/chromium/src/+/HEAD/docs/wmax_tokens.md
#ifndef NACL_TC_REV
-#pragma clang max_tokens_here 370000
+#pragma clang max_tokens_here 350000
#endif // NACL_TC_REV
+#ifdef BASE_CHECK_H_
+#error "logging.h should not include check.h"
+#endif
+
#include <limits.h>
#include <stdint.h>
#include "base/pending_task.h"
#include "base/stl_util.h"
#include "base/task/common/task_annotator.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if defined(OS_WIN)
diff --git a/chromium/base/logging.h b/chromium/base/logging.h
index 4bc8b00ef88..0adda2c2910 100644
--- a/chromium/base/logging.h
+++ b/chromium/base/logging.h
@@ -14,11 +14,9 @@
#include "base/base_export.h"
#include "base/callback_forward.h"
-#include "base/check.h"
-#include "base/check_op.h"
#include "base/compiler_specific.h"
+#include "base/dcheck_is_on.h"
#include "base/macros.h"
-#include "base/notreached.h"
#include "base/scoped_clear_last_error.h"
#include "base/strings/string_piece_forward.h"
diff --git a/chromium/base/logging_unittest.cc b/chromium/base/logging_unittest.cc
index 90378f09db7..c14f3519614 100644
--- a/chromium/base/logging_unittest.cc
+++ b/chromium/base/logging_unittest.cc
@@ -53,8 +53,8 @@
#include <zircon/syscalls/exception.h>
#include <zircon/types.h>
-#include "base/fuchsia/default_context.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
#endif // OS_FUCHSIA
namespace logging {
@@ -776,10 +776,9 @@ TEST_F(LoggingTest, FuchsiaSystemLogging) {
std::unique_ptr<fuchsia::logger::LogFilterOptions> options =
std::make_unique<fuchsia::logger::LogFilterOptions>();
options->tags = {"base_unittests__exec"};
- fuchsia::logger::LogPtr logger =
- base::fuchsia::ComponentContextForCurrentProcess()
- ->svc()
- ->Connect<fuchsia::logger::Log>();
+ fuchsia::logger::LogPtr logger = base::ComponentContextForProcess()
+ ->svc()
+ ->Connect<fuchsia::logger::Log>();
listener.set_on_dump_logs_done(dump_logs);
logger->DumpLogsSafe(binding.NewBinding(), std::move(options));
});
diff --git a/chromium/base/mac/foundation_util.mm b/chromium/base/mac/foundation_util.mm
index f37884f6f70..be12912dcd0 100644
--- a/chromium/base/mac/foundation_util.mm
+++ b/chromium/base/mac/foundation_util.mm
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/mac/bundle_locations.h"
#include "base/mac/mac_logging.h"
+#include "base/notreached.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "base/strings/sys_string_conversions.h"
diff --git a/chromium/base/mac/mach_port_rendezvous.cc b/chromium/base/mac/mach_port_rendezvous.cc
index 43e5806bec6..7510a49d8a3 100644
--- a/chromium/base/mac/mach_port_rendezvous.cc
+++ b/chromium/base/mac/mach_port_rendezvous.cc
@@ -16,6 +16,7 @@
#include "base/mac/foundation_util.h"
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_msg_destroy.h"
+#include "base/notreached.h"
#include "base/strings/stringprintf.h"
namespace base {
diff --git a/chromium/base/mac/scoped_mach_vm.h b/chromium/base/mac/scoped_mach_vm.h
index 3d4cc022b4b..496a1fa41df 100644
--- a/chromium/base/mac/scoped_mach_vm.h
+++ b/chromium/base/mac/scoped_mach_vm.h
@@ -11,7 +11,7 @@
#include <algorithm>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
// Use ScopedMachVM to supervise ownership of pages in the current process
diff --git a/chromium/base/mac/scoped_typeref.h b/chromium/base/mac/scoped_typeref.h
index 3bec05f9ddd..fcfc22d402e 100644
--- a/chromium/base/mac/scoped_typeref.h
+++ b/chromium/base/mac/scoped_typeref.h
@@ -5,8 +5,8 @@
#ifndef BASE_MAC_SCOPED_TYPEREF_H_
#define BASE_MAC_SCOPED_TYPEREF_H_
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/memory/scoped_policy.h"
namespace base {
@@ -51,10 +51,10 @@ struct ScopedTypeRefTraits;
template<typename T, typename Traits = ScopedTypeRefTraits<T>>
class ScopedTypeRef {
public:
- typedef T element_type;
+ using element_type = T;
explicit constexpr ScopedTypeRef(
- T object = Traits::InvalidValue(),
+ element_type object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy = base::scoped_policy::ASSUME)
: object_(object) {
if (object_ && policy == base::scoped_policy::RETAIN)
@@ -92,12 +92,12 @@ class ScopedTypeRef {
// This is to be used only to take ownership of objects that are created
// by pass-by-pointer create functions. To enforce this, require that the
// object be reset to NULL before this may be used.
- T* InitializeInto() WARN_UNUSED_RESULT {
+ element_type* InitializeInto() WARN_UNUSED_RESULT {
DCHECK(!object_);
return &object_;
}
- void reset(T object = Traits::InvalidValue(),
+ void reset(element_type object = Traits::InvalidValue(),
base::scoped_policy::OwnershipPolicy policy =
base::scoped_policy::ASSUME) {
if (object && policy == base::scoped_policy::RETAIN)
@@ -107,16 +107,16 @@ class ScopedTypeRef {
object_ = object;
}
- bool operator==(T that) const { return object_ == that; }
+ bool operator==(const element_type& that) const { return object_ == that; }
- bool operator!=(T that) const { return object_ != that; }
+ bool operator!=(const element_type& that) const { return object_ != that; }
- operator T() const { return object_; }
+ operator element_type() const { return object_; }
- T get() const { return object_; }
+ element_type get() const { return object_; }
void swap(ScopedTypeRef& that) {
- T temp = that.object_;
+ element_type temp = that.object_;
that.object_ = object_;
object_ = temp;
}
@@ -124,14 +124,14 @@ class ScopedTypeRef {
// ScopedTypeRef<>::release() is like std::unique_ptr<>::release. It is NOT
// a wrapper for Release(). To force a ScopedTypeRef<> object to call
// Release(), use ScopedTypeRef<>::reset().
- T release() WARN_UNUSED_RESULT {
- T temp = object_;
+ element_type release() WARN_UNUSED_RESULT {
+ element_type temp = object_;
object_ = Traits::InvalidValue();
return temp;
}
private:
- T object_;
+ element_type object_;
};
} // namespace base
diff --git a/chromium/base/memory/aligned_memory.cc b/chromium/base/memory/aligned_memory.cc
index 97b49248373..7017e316af2 100644
--- a/chromium/base/memory/aligned_memory.cc
+++ b/chromium/base/memory/aligned_memory.cc
@@ -15,24 +15,26 @@ namespace base {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_GT(size, 0U);
- DCHECK_EQ(alignment & (alignment - 1), 0U);
+ DCHECK(bits::IsPowerOfTwo(alignment));
DCHECK_EQ(alignment % sizeof(void*), 0U);
void* ptr = nullptr;
#if defined(COMPILER_MSVC)
ptr = _aligned_malloc(size, alignment);
-// Android technically supports posix_memalign(), but does not expose it in
-// the current version of the library headers used by Chrome. Luckily,
-// memalign() on Android returns pointers which can safely be used with
-// free(), so we can use it instead. Issue filed to document this:
-// http://code.google.com/p/android/issues/detail?id=35391
#elif defined(OS_ANDROID)
+ // Android technically supports posix_memalign(), but does not expose it in
+ // the current version of the library headers used by Chromium. Luckily,
+ // memalign() on Android returns pointers which can safely be used with
+ // free(), so we can use it instead. Issue filed to document this:
+ // http://code.google.com/p/android/issues/detail?id=35391
ptr = memalign(alignment, size);
#else
- if (int ret = posix_memalign(&ptr, alignment, size)) {
+ int ret = posix_memalign(&ptr, alignment, size);
+ if (ret != 0) {
DLOG(ERROR) << "posix_memalign() returned with error " << ret;
ptr = nullptr;
}
#endif
+
// Since aligned allocations may fail for non-memory related reasons, force a
// crash if we encounter a failed allocation; maintaining consistent behavior
// with a normal allocation failure in Chrome.
@@ -42,7 +44,7 @@ void* AlignedAlloc(size_t size, size_t alignment) {
CHECK(false);
}
// Sanity check alignment just to be safe.
- DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
+ DCHECK(IsAligned(ptr, alignment));
return ptr;
}
diff --git a/chromium/base/memory/aligned_memory.h b/chromium/base/memory/aligned_memory.h
index d1cba0c7bb1..39a823a7d14 100644
--- a/chromium/base/memory/aligned_memory.h
+++ b/chromium/base/memory/aligned_memory.h
@@ -11,8 +11,8 @@
#include <type_traits>
#include "base/base_export.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
+#include "base/bits.h"
+#include "base/check.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
@@ -57,22 +57,25 @@ struct AlignedFreeDeleter {
}
};
-#ifndef __has_builtin
-#define __has_builtin(x) 0 // Compatibility with non-clang compilers.
+#ifdef __has_builtin
+#define SUPPORTS_BUILTIN_IS_ALIGNED (__has_builtin(__builtin_is_aligned))
+#else
+#define SUPPORTS_BUILTIN_IS_ALIGNED 0
#endif
inline bool IsAligned(uintptr_t val, size_t alignment) {
// If the compiler supports builtin alignment checks prefer them.
-#if __has_builtin(__builtin_is_aligned)
+#if SUPPORTS_BUILTIN_IS_ALIGNED
return __builtin_is_aligned(val, alignment);
#else
- DCHECK(!((alignment - 1) & alignment))
- << alignment << " is not a power of two";
+ DCHECK(bits::IsPowerOfTwo(alignment)) << alignment << " is not a power of 2";
return (val & (alignment - 1)) == 0;
#endif
}
-inline bool IsAligned(void* val, size_t alignment) {
+#undef SUPPORTS_BUILTIN_IS_ALIGNED
+
+inline bool IsAligned(const void* val, size_t alignment) {
return IsAligned(reinterpret_cast<uintptr_t>(val), alignment);
}
@@ -80,7 +83,7 @@ template <typename Type>
inline bool IsPageAligned(Type val) {
static_assert(std::is_integral<Type>::value || std::is_pointer<Type>::value,
"Integral or pointer type required");
- return base::IsAligned(val, base::GetPageSize());
+ return IsAligned(val, GetPageSize());
}
} // namespace base
diff --git a/chromium/base/memory/aligned_memory_unittest.cc b/chromium/base/memory/aligned_memory_unittest.cc
index e067b4cbbc2..810f2e46ca7 100644
--- a/chromium/base/memory/aligned_memory_unittest.cc
+++ b/chromium/base/memory/aligned_memory_unittest.cc
@@ -38,6 +38,10 @@ TEST(AlignedMemoryTest, ScopedDynamicAllocation) {
static_cast<float*>(AlignedAlloc(8, 8)));
EXPECT_TRUE(p.get());
EXPECT_TRUE(IsAligned(p.get(), 8));
+
+ // Make sure IsAligned() can check const pointers as well.
+ const float* const_p = p.get();
+ EXPECT_TRUE(IsAligned(const_p, 8));
}
TEST(AlignedMemoryTest, IsAligned) {
diff --git a/chromium/base/memory/checked_ptr.cc b/chromium/base/memory/checked_ptr.cc
new file mode 100644
index 00000000000..99f12a34b06
--- /dev/null
+++ b/chromium/base/memory/checked_ptr.cc
@@ -0,0 +1,29 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/checked_ptr.h"
+
+#include "base/allocator/partition_allocator/partition_alloc.h"
+
+namespace base {
+namespace internal {
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+BASE_EXPORT bool CheckedPtr2ImplPartitionAllocSupport::EnabledForPtr(
+ void* ptr) {
+ // CheckedPtr2Impl works only when memory is allocated by PartitionAlloc and
+ // only only if the pointer points to the beginning of the allocated slot.
+ //
+ // TODO(bartekn): Add |&& PartitionAllocGetSlotOffset(ptr) == 0|
+ // CheckedPtr2Impl uses a fake implementation at the moment, which happens to
+ // work even for non-0 offsets, so skip this check for now to get a better
+ // coverage.
+ return IsManagedByPartitionAlloc(ptr);
+}
+
+#endif
+
+} // namespace internal
+} // namespace base
diff --git a/chromium/base/memory/checked_ptr.h b/chromium/base/memory/checked_ptr.h
index dc81e98130f..58cd9ba0528 100644
--- a/chromium/base/memory/checked_ptr.h
+++ b/chromium/base/memory/checked_ptr.h
@@ -5,11 +5,28 @@
#ifndef BASE_MEMORY_CHECKED_PTR_H_
#define BASE_MEMORY_CHECKED_PTR_H_
-#include <cstddef>
-#include <cstdint>
+#include <stddef.h>
+#include <stdint.h>
+
#include <utility>
+#include "base/check_op.h"
#include "base/compiler_specific.h"
+#include "build/build_config.h"
+
+// TEST: We can't use protection in the real code (yet) because it may lead to
+// crashes in absence of PartitionAlloc support. Setting it to 0 will disable
+// the protection, while preserving all calculations.
+#define CHECKED_PTR2_PROTECTION_ENABLED 0
+
+#define CHECKED_PTR2_USE_NO_OP_WRAPPER 0
+
+// Set it to 1 to avoid branches when checking if per-pointer protection is
+// enabled.
+#define CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED 0
+// Set it to 1 to avoid branches when dereferencing the pointer.
+// Must be 1 if the above is 1.
+#define CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING 0
namespace base {
@@ -22,8 +39,10 @@ namespace internal {
struct CheckedPtrNoOpImpl {
// Wraps a pointer, and returns its uintptr_t representation.
- static ALWAYS_INLINE uintptr_t WrapRawPtr(const void* const_ptr) {
- return reinterpret_cast<uintptr_t>(const_ptr);
+ // Use |const volatile| to prevent compiler error. These will be dropped
+ // anyway when casting to uintptr_t and brought back upon pointer extraction.
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ return reinterpret_cast<uintptr_t>(cv_ptr);
}
// Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a
@@ -64,6 +83,272 @@ struct CheckedPtrNoOpImpl {
static ALWAYS_INLINE void IncrementSwapCountForTest() {}
};
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+constexpr int kValidAddressBits = 48;
+constexpr uintptr_t kAddressMask = (1ull << kValidAddressBits) - 1;
+constexpr int kGenerationBits = sizeof(uintptr_t) * 8 - kValidAddressBits;
+constexpr uintptr_t kGenerationMask = ~kAddressMask;
+constexpr int kTopBitShift = 63;
+constexpr uintptr_t kTopBit = 1ull << kTopBitShift;
+static_assert(kTopBit << 1 == 0, "kTopBit should really be the top bit");
+static_assert((kTopBit & kGenerationMask) > 0,
+ "kTopBit bit must be inside the generation region");
+
+// This functionality is outside of CheckedPtr2Impl, so that it can be
+// overridden by tests. The implementation is in the .cc file, because including
+// partition_alloc.h here could lead to cyclic includes.
+struct CheckedPtr2ImplPartitionAllocSupport {
+ // Checks if CheckedPtr2 support is enabled in PartitionAlloc for |ptr|.
+ // TODO(bartekn): Check if this function gets inlined.
+ BASE_EXPORT static bool EnabledForPtr(void* ptr);
+};
+
+template <typename PartitionAllocSupport = CheckedPtr2ImplPartitionAllocSupport>
+struct CheckedPtr2Impl {
+ // This implementation assumes that pointers are 64 bits long and at least 16
+ // top bits are unused. The latter is harder to verify statically, but this is
+ // true for all currently supported 64-bit architectures (DCHECK when wrapping
+ // will verify that).
+ static_assert(sizeof(void*) >= 8, "Need 64-bit pointers");
+
+ // Wraps a pointer, and returns its uintptr_t representation.
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ void* ptr = const_cast<void*>(cv_ptr);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ static_assert(!CHECKED_PTR2_PROTECTION_ENABLED, "");
+#else
+ // Make sure that the address bits that will be used for generation are 0.
+ // If they aren't, they'd fool the unwrapper into thinking that the
+ // protection is enabled, making it try to read and compare the generation.
+ DCHECK_EQ(ExtractGeneration(addr), 0ull);
+
+ // Return a not-wrapped |addr|, if it's either nullptr or if the protection
+ // for this pointer is disabled.
+ if (!PartitionAllocSupport::EnabledForPtr(ptr)) {
+ return addr;
+ }
+
+ // Read the generation from 16 bits before the allocation. Then place it in
+ // the top bits of the address.
+ static_assert(sizeof(uint16_t) * 8 == kGenerationBits, "");
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr) - 1);
+#else
+ // TEST: Reading from offset -1 may crash without full PA support.
+ // Just read from offset 0 to attain the same perf characteristics as the
+ // expected production solution.
+ // This generation will be ignored anyway either when unwrapping or below
+ // (depending on the algorithm variant), on the
+ // !CHECKED_PTR2_PROTECTION_ENABLED path.
+ uintptr_t generation = *(static_cast<volatile uint16_t*>(ptr));
+#endif // CHECKED_PTR2_PROTECTION_ENABLED
+ generation <<= kValidAddressBits;
+ addr |= generation;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // Always set top bit to 1, to indicated that the protection is enabled.
+ addr |= kTopBit;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ // TEST: Clear the generation, or else it could crash without PA support.
+ // If the top bit was set, the unwrapper would read from before the address
+ // address, but with it cleared, it'll read from the address itself.
+ addr &= kAddressMask;
+#endif // !CHECKED_PTR2_PROTECTION_ENABLED
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+#endif // CHECKED_PTR2_USE_NO_OP_WRAPPER
+ return addr;
+ }
+
+ // Returns equivalent of |WrapRawPtr(nullptr)|. Separated out to make it a
+ // constexpr.
+ static constexpr ALWAYS_INLINE uintptr_t GetWrappedNullPtr() {
+ return kWrappedNullPtr;
+ }
+
+ static ALWAYS_INLINE uintptr_t
+ SafelyUnwrapPtrInternal(uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // Top bit tells if the protection is enabled. Use it to decide whether to
+ // read the word before the allocation, which exists only if the protection
+ // is enabled. Otherwise it may crash, in which case read the data from the
+ // beginning of the allocation instead and ignore it later. All this magic
+ // is to avoid a branch, for performance reasons.
+ //
+ // A couple examples, assuming 64-bit system (continued below):
+ // Ex.1: wrapped_ptr=0x8442000012345678
+ // => enabled=0x8000000000000000
+ // => offset=1
+ // Ex.2: wrapped_ptr=0x0000000012345678
+ // => enabled=0x0000000000000000
+ // => offset=0
+ uintptr_t enabled = wrapped_ptr & kTopBit;
+ // We can't have protection disabled and generation set in the same time.
+ DCHECK(!(enabled == 0 && (ExtractGeneration(wrapped_ptr)) != 0));
+ uintptr_t offset = enabled >> kTopBitShift; // 0 or 1
+ // Use offset to decide if the generation should be read at the beginning or
+ // before the allocation.
+ // TODO(bartekn): Do something about 1-byte allocations. Reading 2-byte
+ // generation at the allocation could crash. This case is executed
+ // specifically for non-PartitionAlloc pointers, so we can't make
+ // assumptions about alignment.
+ //
+ // Cast to volatile to ensure memory is read. E.g. in a tight loop, the
+ // compiler could cache the value in a register and thus could miss that
+ // another thread freed memory and cleared generation.
+ //
+ // Examples (continued):
+ // Ex.1: generation_ptr=0x0000000012345676
+ // a) if pointee wasn't freed, read e.g. generation=0x0442 (could be
+ // also 0x8442, the top bit is overwritten later)
+ // b) if pointee was freed, read e.g. generation=0x1234 (could be
+ // anything)
+ // Ex.2: generation_ptr=0x0000000012345678, read e.g. 0x2345 (doesn't
+ // matter what we read, as long as this read doesn't crash)
+ volatile uint16_t* generation_ptr =
+ reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) -
+ offset;
+ uintptr_t generation = *generation_ptr;
+ // Shift generation into the right place and add back the enabled bit.
+ //
+ // Examples (continued):
+ // Ex.1:
+ // a) generation=0x8442000000000000
+ // a) generation=0x9234000000000000
+ // Ex.2: generation=0x2345000000000000
+ generation <<= kValidAddressBits;
+ generation |= enabled;
+
+ // If the protection isn't enabled, clear top bits. Casting to a signed
+ // type makes >> sign extend the last bit.
+ //
+ // Examples (continued):
+ // Ex.1: mask=0xffff000000000000
+ // a) generation=0x8442000000000000
+ // b) generation=0x9234000000000000
+ // Ex.2: mask=0x0000000000000000 => generation=0x0000000000000000
+ uintptr_t mask = static_cast<intptr_t>(enabled) >> (kGenerationBits - 1);
+ generation &= mask;
+
+ // Use hardware to detect generation mismatch. CPU will crash if top bits
+ // aren't all 0 (technically it won't if all bits are 1, but that's a kernel
+ // mode address, which isn't allowed either... also, top bit will be always
+ // zeroed out).
+ //
+ // Examples (continued):
+ // Ex.1:
+ // a) returning 0x0000000012345678
+ // b) returning 0x1676000012345678 (this will generate a desired crash)
+ // Ex.2: returning 0x0000000012345678
+ static_assert(CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING, "");
+ return generation ^ wrapped_ptr;
+#else // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ uintptr_t ptr_generation = wrapped_ptr >> kValidAddressBits;
+ if (ptr_generation > 0) {
+ // Read generation from before the allocation.
+ //
+ // Cast to volatile to ensure memory is read. E.g. in a tight loop, the
+ // compiler could cache the value in a register and thus could miss that
+ // another thread freed memory and cleared generation.
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ uintptr_t read_generation =
+ *(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)) -
+ 1);
+#else
+ // TEST: Reading from before the pointer may crash. See more above...
+ uintptr_t read_generation =
+ *(reinterpret_cast<volatile uint16_t*>(ExtractAddress(wrapped_ptr)));
+#endif
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ // Use hardware to detect generation mismatch. CPU will crash if top bits
+ // aren't all 0 (technically it won't if all bits are 1, but that's a
+ // kernel mode address, which isn't allowed either).
+ read_generation <<= kValidAddressBits;
+ return read_generation ^ wrapped_ptr;
+#else
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ if (UNLIKELY(ptr_generation != read_generation))
+ IMMEDIATE_CRASH();
+#else
+ // TEST: Use volatile to prevent optimizing out the calculations leading
+ // to this point.
+ volatile bool x = false;
+ if (ptr_generation != read_generation)
+ x = true;
+#endif // CHECKED_PTR2_PROTECTION_ENABLED
+ return wrapped_ptr & kAddressMask;
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ }
+ return wrapped_ptr;
+#endif // CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ }
+
+ // Unwraps the pointer's uintptr_t representation, while asserting that memory
+ // hasn't been freed. The function is allowed to crash on nullptr.
+ static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
+ uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_PROTECTION_ENABLED
+ return reinterpret_cast<void*>(SafelyUnwrapPtrInternal(wrapped_ptr));
+#else
+ // TEST: Use volatile to prevent optimizing out the calculations leading to
+ // this point.
+ // |SafelyUnwrapPtrInternal| was separated out solely for this purpose.
+ volatile uintptr_t addr = SafelyUnwrapPtrInternal(wrapped_ptr);
+ return reinterpret_cast<void*>(addr);
+#endif
+ }
+
+ // Unwraps the pointer's uintptr_t representation, while asserting that memory
+ // hasn't been freed. The function must handle nullptr gracefully.
+ static ALWAYS_INLINE void* SafelyUnwrapPtrForExtraction(
+ uintptr_t wrapped_ptr) {
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ // In this implementation SafelyUnwrapPtrForDereference doesn't tolerate
+ // nullptr, because it reads unconditionally to avoid branches. Handle the
+ // nullptr case here.
+ if (wrapped_ptr == kWrappedNullPtr)
+ return nullptr;
+ return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr));
+#else
+ // In this implementation SafelyUnwrapPtrForDereference handles nullptr case
+ // well.
+ return reinterpret_cast<void*>(SafelyUnwrapPtrForDereference(wrapped_ptr));
+#endif
+ }
+
+ // Unwraps the pointer's uintptr_t representation, without making an assertion
+ // on whether memory was freed or not.
+ static ALWAYS_INLINE void* UnsafelyUnwrapPtrForComparison(
+ uintptr_t wrapped_ptr) {
+ return reinterpret_cast<void*>(ExtractAddress(wrapped_ptr));
+ }
+
+ // Advance the wrapped pointer by |delta| bytes.
+ static ALWAYS_INLINE uintptr_t Advance(uintptr_t wrapped_ptr, size_t delta) {
+ // Mask out the generation to disable the protection. It's not supported for
+ // pointers inside an allocation.
+ return ExtractAddress(wrapped_ptr) + delta;
+ }
+
+ // This is for accounting only, used by unit tests.
+ static ALWAYS_INLINE void IncrementSwapCountForTest() {}
+
+ private:
+ static ALWAYS_INLINE uintptr_t ExtractAddress(uintptr_t wrapped_ptr) {
+ return wrapped_ptr & kAddressMask;
+ }
+
+ static ALWAYS_INLINE uintptr_t ExtractGeneration(uintptr_t wrapped_ptr) {
+ return wrapped_ptr & kGenerationMask;
+ }
+
+ // This relies on nullptr and 0 being equal in the eyes of reinterpret_cast,
+ // which apparently isn't true in some rare environments.
+ static constexpr uintptr_t kWrappedNullPtr = 0;
+};
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
template <typename T>
struct DereferencedPointerType {
using Type = decltype(*std::declval<T*>());
@@ -89,7 +374,12 @@ struct DereferencedPointerType<void> {};
// 2. Keep this class as small as possible, while still satisfying goal #1 (i.e.
// we aren't striving to maximize compatibility with raw pointers, merely
// adding support for cases encountered so far).
-template <typename T, typename Impl = internal::CheckedPtrNoOpImpl>
+template <typename T,
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+ typename Impl = internal::CheckedPtr2Impl<>>
+#else
+ typename Impl = internal::CheckedPtrNoOpImpl>
+#endif
class CheckedPtr {
public:
// CheckedPtr can be trivially default constructed (leaving |wrapped_ptr_|
@@ -122,6 +412,10 @@ class CheckedPtr {
wrapped_ptr_ = Impl::WrapRawPtr(p);
return *this;
}
+ ALWAYS_INLINE CheckedPtr& operator=(std::nullptr_t) noexcept {
+ wrapped_ptr_ = Impl::GetWrappedNullPtr();
+ return *this;
+ }
~CheckedPtr() = default;
@@ -153,64 +447,103 @@ class CheckedPtr {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, sizeof(T));
return *this;
}
-
ALWAYS_INLINE CheckedPtr& operator--() {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, -sizeof(T));
return *this;
}
-
+ ALWAYS_INLINE CheckedPtr operator++(int /* post_increment */) {
+ CheckedPtr result = *this;
+ ++(*this);
+ return result;
+ }
+ ALWAYS_INLINE CheckedPtr operator--(int /* post_decrement */) {
+ CheckedPtr result = *this;
+ --(*this);
+ return result;
+ }
ALWAYS_INLINE CheckedPtr& operator+=(ptrdiff_t delta_elems) {
wrapped_ptr_ = Impl::Advance(wrapped_ptr_, delta_elems * sizeof(T));
return *this;
}
-
ALWAYS_INLINE CheckedPtr& operator-=(ptrdiff_t delta_elems) {
return *this += -delta_elems;
}
- ALWAYS_INLINE bool operator==(T* p) const { return GetForComparison() == p; }
- ALWAYS_INLINE bool operator!=(T* p) const { return !operator==(p); }
-
- // Useful for cases like this:
- // class Base {};
- // class Derived : public Base {};
- // Derived d;
- // CheckedPtr<Derived> derived_ptr = &d;
- // Base* base_ptr = &d;
- // if (derived_ptr == base_ptr) {...}
- // Without these, such comparisons would end up calling |operator T*()|.
+ // Be careful to cover all cases with CheckedPtr being on both sides, left
+ // side only and right side only. If any case is missed, a more costly
+ // |operator T*()| will get called, instead of |operator==|.
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs,
+ const CheckedPtr& rhs) {
+ return lhs.GetForComparison() == rhs.GetForComparison();
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs,
+ const CheckedPtr& rhs) {
+ return !(lhs == rhs);
+ }
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, T* rhs) {
+ return lhs.GetForComparison() == rhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, T* rhs) {
+ return !(lhs == rhs);
+ }
+ friend ALWAYS_INLINE bool operator==(T* lhs, const CheckedPtr& rhs) {
+ return rhs == lhs; // Reverse order to call the operator above.
+ }
+ friend ALWAYS_INLINE bool operator!=(T* lhs, const CheckedPtr& rhs) {
+ return rhs != lhs; // Reverse order to call the operator above.
+ }
+ // Needed for cases like |derived_ptr == base_ptr|. Without these, a more
+ // costly |operator T*()| will get called, instead of |operator==|.
template <typename U>
- ALWAYS_INLINE bool operator==(U* p) const {
- // Add |const| when casting, because |U| may have |const| in it. Even if |T|
- // doesn't, comparison between |T*| and |const T*| is fine.
- return GetForComparison() == static_cast<std::add_const_t<T>*>(p);
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs,
+ const CheckedPtr<U, Impl>& rhs) {
+ // Add |const volatile| when casting, in case |U| has any. Even if |T|
+ // doesn't, comparison between |T*| and |const volatile T*| is fine.
+ return lhs.GetForComparison() ==
+ static_cast<std::add_cv_t<T>*>(rhs.GetForComparison());
}
template <typename U>
- ALWAYS_INLINE bool operator!=(U* p) const {
- return !operator==(p);
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs,
+ const CheckedPtr<U, Impl>& rhs) {
+ return !(lhs == rhs);
}
-
- ALWAYS_INLINE bool operator==(const CheckedPtr& other) const {
- return GetForComparison() == other.GetForComparison();
+ template <typename U>
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, U* rhs) {
+ // Add |const volatile| when casting, in case |U| has any. Even if |T|
+ // doesn't, comparison between |T*| and |const volatile T*| is fine.
+ return lhs.GetForComparison() == static_cast<std::add_cv_t<T>*>(rhs);
}
- ALWAYS_INLINE bool operator!=(const CheckedPtr& other) const {
- return !operator==(other);
+ template <typename U>
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, U* rhs) {
+ return !(lhs == rhs);
}
- template <typename U, typename I>
- ALWAYS_INLINE bool operator==(const CheckedPtr<U, I>& other) const {
- // Add |const| when casting, because |U| may have |const| in it. Even if |T|
- // doesn't, comparison between |T*| and |const T*| is fine.
- return GetForComparison() ==
- static_cast<std::add_const_t<T>*>(other.GetForComparison());
+ template <typename U>
+ friend ALWAYS_INLINE bool operator==(U* lhs, const CheckedPtr& rhs) {
+ return rhs == lhs; // Reverse order to call the operator above.
+ }
+ template <typename U>
+ friend ALWAYS_INLINE bool operator!=(U* lhs, const CheckedPtr& rhs) {
+ return rhs != lhs; // Reverse order to call the operator above.
}
- template <typename U, typename I>
- ALWAYS_INLINE bool operator!=(const CheckedPtr<U, I>& other) const {
- return !operator==(other);
+ // Needed for comparisons against nullptr. Without these, a slightly more
+ // costly version would be called that extracts wrapped pointer, as opposed
+ // to plain comparison against 0.
+ friend ALWAYS_INLINE bool operator==(const CheckedPtr& lhs, std::nullptr_t) {
+ return !lhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(const CheckedPtr& lhs, std::nullptr_t) {
+ return !!lhs; // Use !! otherwise the costly implicit cast will be used.
+ }
+ friend ALWAYS_INLINE bool operator==(std::nullptr_t, const CheckedPtr& rhs) {
+ return !rhs;
+ }
+ friend ALWAYS_INLINE bool operator!=(std::nullptr_t, const CheckedPtr& rhs) {
+ return !!rhs; // Use !! otherwise the costly implicit cast will be used.
}
- ALWAYS_INLINE void swap(CheckedPtr& other) noexcept {
+ friend ALWAYS_INLINE void swap(CheckedPtr& lhs, CheckedPtr& rhs) noexcept {
Impl::IncrementSwapCountForTest();
- std::swap(wrapped_ptr_, other.wrapped_ptr_);
+ std::swap(lhs.wrapped_ptr_, rhs.wrapped_ptr_);
}
private:
@@ -241,32 +574,6 @@ class CheckedPtr {
friend class CheckedPtr;
};
-// These are for cases where a raw pointer is on the left hand side. Reverse
-// order, so that |CheckedPtr::operator==()| kicks in, which will compare more
-// efficiently. Otherwise the CheckedPtr operand would have to be cast to raw
-// pointer, which may be more costly.
-template <typename T, typename I>
-ALWAYS_INLINE bool operator==(T* lhs, const CheckedPtr<T, I>& rhs) {
- return rhs == lhs;
-}
-template <typename T, typename I>
-ALWAYS_INLINE bool operator!=(T* lhs, const CheckedPtr<T, I>& rhs) {
- return !operator==(lhs, rhs);
-}
-template <typename T, typename I, typename U>
-ALWAYS_INLINE bool operator==(U* lhs, const CheckedPtr<T, I>& rhs) {
- return rhs == lhs;
-}
-template <typename T, typename I, typename U>
-ALWAYS_INLINE bool operator!=(U* lhs, const CheckedPtr<T, I>& rhs) {
- return !operator==(lhs, rhs);
-}
-
-template <typename T, typename I>
-ALWAYS_INLINE void swap(CheckedPtr<T, I>& lhs, CheckedPtr<T, I>& rhs) noexcept {
- lhs.swap(rhs);
-}
-
} // namespace base
using base::CheckedPtr;
diff --git a/chromium/base/memory/checked_ptr_unittest.cc b/chromium/base/memory/checked_ptr_unittest.cc
index 32fa63964ec..e1eedb2ff06 100644
--- a/chromium/base/memory/checked_ptr_unittest.cc
+++ b/chromium/base/memory/checked_ptr_unittest.cc
@@ -4,13 +4,17 @@
#include "base/memory/checked_ptr.h"
+#include <climits>
#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+using testing::Test;
+
static_assert(sizeof(CheckedPtr<void>) == sizeof(void*),
"CheckedPtr shouldn't add memory overhead");
static_assert(sizeof(CheckedPtr<int>) == sizeof(int*),
@@ -49,12 +53,14 @@ static_assert(
namespace {
+static int g_wrap_raw_ptr_cnt = INT_MIN;
static int g_get_for_dereference_cnt = INT_MIN;
static int g_get_for_extraction_cnt = INT_MIN;
static int g_get_for_comparison_cnt = INT_MIN;
static int g_checked_ptr_swap_cnt = INT_MIN;
static void ClearCounters() {
+ g_wrap_raw_ptr_cnt = 0;
g_get_for_dereference_cnt = 0;
g_get_for_extraction_cnt = 0;
g_get_for_comparison_cnt = 0;
@@ -64,6 +70,11 @@ static void ClearCounters() {
struct CheckedPtrCountingNoOpImpl : base::internal::CheckedPtrNoOpImpl {
using Super = base::internal::CheckedPtrNoOpImpl;
+ static ALWAYS_INLINE uintptr_t WrapRawPtr(const volatile void* cv_ptr) {
+ ++g_wrap_raw_ptr_cnt;
+ return Super::WrapRawPtr(cv_ptr);
+ }
+
static ALWAYS_INLINE void* SafelyUnwrapPtrForDereference(
uintptr_t wrapped_ptr) {
++g_get_for_dereference_cnt;
@@ -109,55 +120,156 @@ struct Derived : Base1, Base2 {
int d;
};
-TEST(CheckedPtr, NullStarDereference) {
+class CheckedPtrTest : public Test {
+ protected:
+ void SetUp() override { ClearCounters(); }
+};
+
+TEST_F(CheckedPtrTest, NullStarDereference) {
CheckedPtr<int> ptr = nullptr;
EXPECT_DEATH_IF_SUPPORTED(if (*ptr == 42) return, "");
}
-TEST(CheckedPtr, NullArrowDereference) {
+TEST_F(CheckedPtrTest, NullArrowDereference) {
CheckedPtr<MyStruct> ptr = nullptr;
EXPECT_DEATH_IF_SUPPORTED(if (ptr->x == 42) return, "");
}
-TEST(CheckedPtr, NullExtractNoDereference) {
- CheckedPtr<int> ptr = nullptr;
+TEST_F(CheckedPtrTest, NullExtractNoDereference) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ // No dereference hence shouldn't crash.
int* raw = ptr;
std::ignore = raw;
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, NullCmpExplicit) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ EXPECT_TRUE(ptr == nullptr);
+ EXPECT_TRUE(nullptr == ptr);
+ EXPECT_FALSE(ptr != nullptr);
+ EXPECT_FALSE(nullptr != ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, NullCmpBool) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ EXPECT_FALSE(ptr);
+ EXPECT_TRUE(!ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+void FuncThatAcceptsBool(bool b) {}
+
+bool IsValidNoCast(CountingCheckedPtr<int> ptr) {
+ return !!ptr; // !! to avoid implicit cast
+}
+bool IsValidNoCast2(CountingCheckedPtr<int> ptr) {
+ return ptr && true;
+}
+
+TEST_F(CheckedPtrTest, BoolOpNotCast) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ volatile bool is_valid = !!ptr; // !! to avoid implicit cast
+ is_valid = ptr || is_valid; // volatile, so won't be optimized
+ if (ptr)
+ is_valid = true;
+ bool is_not_valid = !ptr;
+ if (!ptr)
+ is_not_valid = true;
+ std::ignore = IsValidNoCast(ptr);
+ std::ignore = IsValidNoCast2(ptr);
+ FuncThatAcceptsBool(!ptr);
+ // No need to unwrap pointer, just compare against 0.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, StarDereference) {
+bool IsValidWithCast(CountingCheckedPtr<int> ptr) {
+ return ptr;
+}
+
+// This test is mostly for documentation purposes. It demonstrates cases where
+// |operator T*| is called first and then the pointer is converted to bool,
+// as opposed to calling |operator bool| directly. The former may be more
+// costly, so the caller has to be careful not to trigger this path.
+TEST_F(CheckedPtrTest, CastNotBoolOp) {
+ CountingCheckedPtr<int> ptr = nullptr;
+ bool is_valid = ptr;
+ is_valid = IsValidWithCast(ptr);
+ FuncThatAcceptsBool(ptr);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 3);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, StarDereference) {
int foo = 42;
- CheckedPtr<int> ptr = &foo;
+ CountingCheckedPtr<int> ptr = &foo;
EXPECT_EQ(*ptr, 42);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 1);
}
-TEST(CheckedPtr, ArrowDereference) {
+TEST_F(CheckedPtrTest, ArrowDereference) {
MyStruct foo = {42};
- CheckedPtr<MyStruct> ptr = &foo;
+ CountingCheckedPtr<MyStruct> ptr = &foo;
EXPECT_EQ(ptr->x, 42);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 1);
}
-TEST(CheckedPtr, ConstVoidPtr) {
+TEST_F(CheckedPtrTest, Delete) {
+ CountingCheckedPtr<int> ptr = new int(42);
+ delete ptr;
+ // The pointer was extracted using implicit cast before passing to |delete|.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+TEST_F(CheckedPtrTest, ConstVolatileVoidPtr) {
int32_t foo[] = {1234567890};
- CheckedPtr<const void> ptr = foo;
- EXPECT_EQ(*static_cast<const int32_t*>(ptr), 1234567890);
+ CountingCheckedPtr<const volatile void> ptr = foo;
+ EXPECT_EQ(*static_cast<const volatile int32_t*>(ptr), 1234567890);
+ // Because we're using a cast, the extraction API kicks in, which doesn't
+ // know if the extracted pointer will be dereferenced or not.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, VoidPtr) {
+TEST_F(CheckedPtrTest, VoidPtr) {
int32_t foo[] = {1234567890};
- CheckedPtr<void> ptr = foo;
+ CountingCheckedPtr<void> ptr = foo;
EXPECT_EQ(*static_cast<int32_t*>(ptr), 1234567890);
+ // Because we're using a cast, the extraction API kicks in, which doesn't
+ // know if the extracted pointer will be dereferenced or not.
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 1);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorEQ) {
+TEST_F(CheckedPtrTest, OperatorEQ) {
int foo;
- CheckedPtr<int> ptr1 = nullptr;
+ CountingCheckedPtr<int> ptr1 = nullptr;
EXPECT_TRUE(ptr1 == ptr1);
- CheckedPtr<int> ptr2 = nullptr;
+ CountingCheckedPtr<int> ptr2 = nullptr;
EXPECT_TRUE(ptr1 == ptr2);
- CheckedPtr<int> ptr3 = &foo;
+ CountingCheckedPtr<int> ptr3 = &foo;
EXPECT_TRUE(&foo == ptr3);
EXPECT_TRUE(ptr3 == &foo);
EXPECT_FALSE(ptr1 == ptr3);
@@ -165,17 +277,21 @@ TEST(CheckedPtr, OperatorEQ) {
ptr1 = &foo;
EXPECT_TRUE(ptr1 == ptr3);
EXPECT_TRUE(ptr3 == ptr1);
+
+ EXPECT_EQ(g_get_for_comparison_cnt, 12);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorNE) {
+TEST_F(CheckedPtrTest, OperatorNE) {
int foo;
- CheckedPtr<int> ptr1 = nullptr;
+ CountingCheckedPtr<int> ptr1 = nullptr;
EXPECT_FALSE(ptr1 != ptr1);
- CheckedPtr<int> ptr2 = nullptr;
+ CountingCheckedPtr<int> ptr2 = nullptr;
EXPECT_FALSE(ptr1 != ptr2);
- CheckedPtr<int> ptr3 = &foo;
+ CountingCheckedPtr<int> ptr3 = &foo;
EXPECT_FALSE(&foo != ptr3);
EXPECT_FALSE(ptr3 != &foo);
EXPECT_TRUE(ptr1 != ptr3);
@@ -183,14 +299,17 @@ TEST(CheckedPtr, OperatorNE) {
ptr1 = &foo;
EXPECT_FALSE(ptr1 != ptr3);
EXPECT_FALSE(ptr3 != ptr1);
+
+ EXPECT_EQ(g_get_for_comparison_cnt, 12);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorEQCast) {
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorEQCast) {
int foo = 42;
const int* raw_int_ptr = &foo;
- void* raw_void_ptr = &foo;
- CountingCheckedPtr<int> checked_int_ptr = &foo;
+ volatile void* raw_void_ptr = &foo;
+ CountingCheckedPtr<volatile int> checked_int_ptr = &foo;
CountingCheckedPtr<const void> checked_void_ptr = &foo;
EXPECT_TRUE(checked_int_ptr == checked_int_ptr);
EXPECT_TRUE(checked_int_ptr == raw_int_ptr);
@@ -209,14 +328,15 @@ TEST(CheckedPtr, OperatorEQCast) {
EXPECT_EQ(g_get_for_comparison_cnt, 16);
EXPECT_EQ(g_get_for_extraction_cnt, 0);
EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorEQCastHierarchy) {
Derived derived_val(42, 84, 1024);
Derived* raw_derived_ptr = &derived_val;
const Base1* raw_base1_ptr = &derived_val;
- Base2* raw_base2_ptr = &derived_val;
- CountingCheckedPtr<const Derived> checked_derived_ptr = &derived_val;
- CountingCheckedPtr<Base1> checked_base1_ptr = &derived_val;
+ volatile Base2* raw_base2_ptr = &derived_val;
+ CountingCheckedPtr<const volatile Derived> checked_derived_ptr = &derived_val;
+ CountingCheckedPtr<volatile Base1> checked_base1_ptr = &derived_val;
CountingCheckedPtr<const Base2> checked_base2_ptr = &derived_val;
EXPECT_TRUE(checked_derived_ptr == checked_derived_ptr);
EXPECT_TRUE(checked_derived_ptr == raw_derived_ptr);
@@ -251,13 +371,12 @@ TEST(CheckedPtr, OperatorEQCast) {
EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, OperatorNECast) {
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorNECast) {
int foo = 42;
- int* raw_int_ptr = &foo;
+ volatile int* raw_int_ptr = &foo;
const void* raw_void_ptr = &foo;
CountingCheckedPtr<const int> checked_int_ptr = &foo;
- CountingCheckedPtr<void> checked_void_ptr = &foo;
+ CountingCheckedPtr<volatile void> checked_void_ptr = &foo;
EXPECT_FALSE(checked_int_ptr != checked_int_ptr);
EXPECT_FALSE(checked_int_ptr != raw_int_ptr);
EXPECT_FALSE(raw_int_ptr != checked_int_ptr);
@@ -275,15 +394,16 @@ TEST(CheckedPtr, OperatorNECast) {
EXPECT_EQ(g_get_for_comparison_cnt, 16);
EXPECT_EQ(g_get_for_extraction_cnt, 0);
EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
- ClearCounters();
+TEST_F(CheckedPtrTest, OperatorNECastHierarchy) {
Derived derived_val(42, 84, 1024);
const Derived* raw_derived_ptr = &derived_val;
- Base1* raw_base1_ptr = &derived_val;
+ volatile Base1* raw_base1_ptr = &derived_val;
const Base2* raw_base2_ptr = &derived_val;
- CountingCheckedPtr<Derived> checked_derived_ptr = &derived_val;
+ CountingCheckedPtr<volatile Derived> checked_derived_ptr = &derived_val;
CountingCheckedPtr<const Base1> checked_base1_ptr = &derived_val;
- CountingCheckedPtr<Base2> checked_base2_ptr = &derived_val;
+ CountingCheckedPtr<const volatile Base2> checked_base2_ptr = &derived_val;
EXPECT_FALSE(checked_derived_ptr != checked_derived_ptr);
EXPECT_FALSE(checked_derived_ptr != raw_derived_ptr);
EXPECT_FALSE(raw_derived_ptr != checked_derived_ptr);
@@ -317,7 +437,7 @@ TEST(CheckedPtr, OperatorNECast) {
EXPECT_EQ(g_get_for_dereference_cnt, 0);
}
-TEST(CheckedPtr, Cast) {
+TEST_F(CheckedPtrTest, Cast) {
Derived derived_val(42, 84, 1024);
CheckedPtr<Derived> checked_derived_ptr = &derived_val;
Base1* raw_base1_ptr = checked_derived_ptr;
@@ -359,6 +479,17 @@ TEST(CheckedPtr, Cast) {
EXPECT_EQ(checked_const_derived_ptr->b2, 84);
EXPECT_EQ(checked_const_derived_ptr->d, 1024);
+ volatile Derived* raw_volatile_derived_ptr = checked_derived_ptr2;
+ EXPECT_EQ(raw_volatile_derived_ptr->b1, 42);
+ EXPECT_EQ(raw_volatile_derived_ptr->b2, 84);
+ EXPECT_EQ(raw_volatile_derived_ptr->d, 1024);
+
+ CheckedPtr<volatile Derived> checked_volatile_derived_ptr =
+ raw_volatile_derived_ptr;
+ EXPECT_EQ(checked_volatile_derived_ptr->b1, 42);
+ EXPECT_EQ(checked_volatile_derived_ptr->b2, 84);
+ EXPECT_EQ(checked_volatile_derived_ptr->d, 1024);
+
void* raw_void_ptr = checked_derived_ptr;
CheckedPtr<void> checked_void_ptr = raw_derived_ptr;
CheckedPtr<Derived> checked_derived_ptr3 =
@@ -373,8 +504,7 @@ TEST(CheckedPtr, Cast) {
EXPECT_EQ(checked_derived_ptr4->d, 1024);
}
-TEST(CheckedPtr, CustomSwap) {
- ClearCounters();
+TEST_F(CheckedPtrTest, CustomSwap) {
int foo1, foo2;
CountingCheckedPtr<int> ptr1(&foo1);
CountingCheckedPtr<int> ptr2(&foo2);
@@ -386,8 +516,7 @@ TEST(CheckedPtr, CustomSwap) {
EXPECT_EQ(g_checked_ptr_swap_cnt, 1);
}
-TEST(CheckedPtr, StdSwap) {
- ClearCounters();
+TEST_F(CheckedPtrTest, StdSwap) {
int foo1, foo2;
CountingCheckedPtr<int> ptr1(&foo1);
CountingCheckedPtr<int> ptr2(&foo2);
@@ -397,44 +526,188 @@ TEST(CheckedPtr, StdSwap) {
EXPECT_EQ(g_checked_ptr_swap_cnt, 0);
}
-TEST(CheckedPtr, AdvanceIntArray) {
- // operator++
+TEST_F(CheckedPtrTest, PostIncrementOperator) {
int foo[] = {42, 43, 44, 45};
- CheckedPtr<int> ptr = foo;
- for (int i = 0; i < 4; ++i, ++ptr) {
- ASSERT_EQ(*ptr, 42 + i);
+ CountingCheckedPtr<int> ptr = foo;
+ for (int i = 0; i < 4; ++i) {
+ ASSERT_EQ(*ptr++, 42 + i);
}
- ptr = &foo[1];
- for (int i = 1; i < 4; ++i, ++ptr) {
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
+
+TEST_F(CheckedPtrTest, PostDecrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
+ for (int i = 3; i >= 0; --i) {
+ ASSERT_EQ(*ptr--, 42 + i);
+ }
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
+
+TEST_F(CheckedPtrTest, PreIncrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = foo;
+ for (int i = 0; i < 4; ++i, ++ptr) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
- // operator--
- ptr = &foo[3];
+TEST_F(CheckedPtrTest, PreDecrementOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
for (int i = 3; i >= 0; --i, --ptr) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 4);
+}
- // operator+=
- ptr = foo;
+TEST_F(CheckedPtrTest, PlusEqualOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = foo;
for (int i = 0; i < 4; i += 2, ptr += 2) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 2);
+}
- // operator-=
- ptr = &foo[3];
+TEST_F(CheckedPtrTest, MinusEqualOperator) {
+ int foo[] = {42, 43, 44, 45};
+ CountingCheckedPtr<int> ptr = &foo[3];
for (int i = 3; i >= 0; i -= 2, ptr -= 2) {
ASSERT_EQ(*ptr, 42 + i);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 2);
}
-TEST(CheckedPtr, AdvanceString) {
+TEST_F(CheckedPtrTest, AdvanceString) {
const char kChars[] = "Hello";
std::string str = kChars;
- CheckedPtr<const char> ptr = str.c_str();
+ CountingCheckedPtr<const char> ptr = str.c_str();
for (size_t i = 0; i < str.size(); ++i, ++ptr) {
ASSERT_EQ(*ptr, kChars[i]);
}
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 5);
+}
+
+TEST_F(CheckedPtrTest, AssignmentFromNullptr) {
+ CountingCheckedPtr<int> checked_ptr;
+ checked_ptr = nullptr;
+ EXPECT_EQ(g_wrap_raw_ptr_cnt, 0);
+ EXPECT_EQ(g_get_for_comparison_cnt, 0);
+ EXPECT_EQ(g_get_for_extraction_cnt, 0);
+ EXPECT_EQ(g_get_for_dereference_cnt, 0);
+}
+
+#if defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
+namespace {
+
+struct CheckedPtr2ImplPartitionAllocSupportEnabled
+ : base::internal::CheckedPtr2ImplPartitionAllocSupport {
+ static bool EnabledForPtr(void* ptr) { return true; }
+};
+
+using CheckedPtr2ImplEnabled = base::internal::CheckedPtr2Impl<
+ CheckedPtr2ImplPartitionAllocSupportEnabled>;
+
+} // namespace
+
+TEST(CheckedPtr2Impl, WrapNull) {
+ ASSERT_EQ(base::internal::CheckedPtr2Impl<>::GetWrappedNullPtr(), 0u);
+ ASSERT_EQ(base::internal::CheckedPtr2Impl<>::WrapRawPtr(nullptr), 0u);
+}
+
+TEST(CheckedPtr2Impl, SafelyUnwrapNull) {
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrForExtraction(0), nullptr);
}
+TEST(CheckedPtr2Impl, WrapAndSafelyUnwrap) {
+ char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89};
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ // If protection is disabled, wrap & unwrap will read at the pointer, not
+ // before it.
+ bytes[8] = bytes[6];
+ bytes[9] = bytes[7];
+#endif
+ void* ptr = bytes + sizeof(uintptr_t);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+
+ uintptr_t set_top_bit = 0x0000000000000000;
+ uintptr_t mask = 0xFFFFFFFFFFFFFFFF;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ set_top_bit = 0x8000000000000000;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ mask = 0x0000FFFFFFFFFFFF;
+#endif
+#endif
+
+ uintptr_t wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
+ // First 2 bytes in the preceding word will be used as generation (in reverse
+ // order due to little-endianness).
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ ASSERT_EQ(wrapped, addr);
+ std::ignore = set_top_bit;
+ std::ignore = mask;
+#else
+ ASSERT_EQ(wrapped, (addr | 0x42BA000000000000 | set_top_bit) & mask);
+#endif
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr);
+
+ bytes[7] |= 0x80;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ bytes[9] = bytes[7];
+#endif
+ wrapped = CheckedPtr2ImplEnabled::WrapRawPtr(ptr);
+#if CHECKED_PTR2_USE_NO_OP_WRAPPER
+ ASSERT_EQ(wrapped, addr);
+#else
+ ASSERT_EQ(wrapped, (addr | 0xC2BA000000000000 | set_top_bit) & mask);
+#endif
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped), addr);
+
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_DEREFERENCING
+ bytes[6] = 0;
+ bytes[7] = 0;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ bytes[8] = bytes[6];
+ bytes[9] = bytes[7];
+#endif
+ mask = 0xFFFFFFFFFFFFFFFF;
+#if CHECKED_PTR2_AVOID_BRANCH_WHEN_CHECKING_ENABLED
+ mask = 0x7FFFFFFFFFFFFFFF;
+#if !CHECKED_PTR2_PROTECTION_ENABLED
+ mask = 0x0000FFFFFFFFFFFF;
+#endif
+#endif
+
+ // Mask out the top bit, because in some cases (not all), it may differ.
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(wrapped) & mask,
+ wrapped & mask);
+#endif
+}
+
+TEST(CheckedPtr2Impl, SafelyUnwrapDisabled) {
+ char bytes[] = {0x12, 0x23, 0x34, 0x45, 0x56, 0x67, 0xBA, 0x42, 0x78, 0x89};
+ void* ptr = bytes + sizeof(uintptr_t);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+ ASSERT_EQ(CheckedPtr2ImplEnabled::SafelyUnwrapPtrInternal(addr), addr);
+}
+
+#endif // defined(ARCH_CPU_64_BITS) && !defined(OS_NACL)
+
} // namespace
diff --git a/chromium/base/memory/discardable_shared_memory.cc b/chromium/base/memory/discardable_shared_memory.cc
index bee394a6e63..7214a801a61 100644
--- a/chromium/base/memory/discardable_shared_memory.cc
+++ b/chromium/base/memory/discardable_shared_memory.cc
@@ -17,8 +17,7 @@
#include "base/memory/shared_memory_tracker.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#if defined(OS_POSIX) && !defined(OS_NACL)
@@ -41,6 +40,11 @@
#include "base/fuchsia/fuchsia_logging.h"
#endif
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
namespace {
@@ -483,6 +487,8 @@ void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
trace_event::MemoryAllocatorDump* local_segment_dump,
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const {
+// Memory dumps are only supported when tracing support is enabled,.
+#if BUILDFLAG(ENABLE_BASE_TRACING)
auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
shared_memory_mapping_, pmd);
// TODO(ssid): Clean this by a new api to inherit size of parent dump once the
@@ -512,6 +518,7 @@ void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
shared_memory_guid, kImportance);
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
// static
diff --git a/chromium/base/memory/discardable_shared_memory.h b/chromium/base/memory/discardable_shared_memory.h
index 44d4cab02a5..af06d5d5190 100644
--- a/chromium/base/memory/discardable_shared_memory.h
+++ b/chromium/base/memory/discardable_shared_memory.h
@@ -8,7 +8,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
diff --git a/chromium/base/memory/discardable_shared_memory_unittest.cc b/chromium/base/memory/discardable_shared_memory_unittest.cc
index 817e105f8de..d567ee973e1 100644
--- a/chromium/base/memory/discardable_shared_memory_unittest.cc
+++ b/chromium/base/memory/discardable_shared_memory_unittest.cc
@@ -9,11 +9,15 @@
#include "base/memory/discardable_shared_memory.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
namespace base {
class TestDiscardableSharedMemory : public DiscardableSharedMemory {
@@ -450,6 +454,7 @@ TEST(DiscardableSharedMemoryTest, ZeroFilledPagesAfterPurge) {
}
#endif
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
const uint32_t kDataSize = 1024;
TestDiscardableSharedMemory memory1;
@@ -474,5 +479,6 @@ TEST(DiscardableSharedMemoryTest, TracingOwnershipEdges) {
// TODO(ssid): test for weak global dump once the
// CreateWeakSharedMemoryOwnershipEdge() is fixed, crbug.com/661257.
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
} // namespace base
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
index 3587db93627..a98057cf2d4 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.cc
@@ -10,12 +10,17 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_dump_manager.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
MadvFreeDiscardableMemoryAllocatorPosix::
MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
// Don't register dump provider if ThreadTaskRunnerHandle is not set, such as
// in tests and Android Webview.
if (base::ThreadTaskRunnerHandle::IsSet()) {
@@ -23,11 +28,14 @@ MadvFreeDiscardableMemoryAllocatorPosix::
this, "MadvFreeDiscardableMemoryAllocator",
ThreadTaskRunnerHandle::Get());
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
MadvFreeDiscardableMemoryAllocatorPosix::
~MadvFreeDiscardableMemoryAllocatorPosix() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
std::unique_ptr<DiscardableMemory>
@@ -44,6 +52,7 @@ size_t MadvFreeDiscardableMemoryAllocatorPosix::GetBytesAllocated() const {
bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
if (args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
return true;
@@ -55,6 +64,9 @@ bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
GetBytesAllocated());
return true;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ return false;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace base
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
index f4c33c59270..c569ca7a835 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix.h
@@ -13,12 +13,11 @@
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/memory/discardable_memory_allocator.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
diff --git a/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc b/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
index 50c30e67bee..0c7a53e1760 100644
--- a/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_allocator_posix_unittest.cc
@@ -11,11 +11,15 @@
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/memory_allocator_dump.h"
+#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
#define SUCCEED_IF_MADV_FREE_UNSUPPORTED() \
do { \
if (GetMadvFreeSupport() != base::MadvFreeSupport::kSupported) { \
@@ -31,9 +35,11 @@ namespace base {
class MadvFreeDiscardableMemoryAllocatorPosixTest : public ::testing::Test {
protected:
MadvFreeDiscardableMemoryAllocatorPosixTest() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
base::trace_event::MemoryDumpArgs dump_args = {
base::trace_event::MemoryDumpLevelOfDetail::DETAILED};
pmd_ = std::make_unique<base::trace_event::ProcessMemoryDump>(dump_args);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
std::unique_ptr<MadvFreeDiscardableMemoryPosix>
@@ -43,15 +49,19 @@ class MadvFreeDiscardableMemoryAllocatorPosixTest : public ::testing::Test {
allocator_.AllocateLockedDiscardableMemory(size).release()));
}
+#if BUILDFLAG(ENABLE_BASE_TRACING)
size_t GetDiscardableMemorySizeFromDump(const DiscardableMemory& mem,
const std::string& dump_id) {
return mem.CreateMemoryAllocatorDump(dump_id.c_str(), pmd_.get())
->GetSizeInternal();
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
MadvFreeDiscardableMemoryAllocatorPosix allocator_;
- std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd_;
const size_t kPageSize = base::GetPageSize();
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+ std::unique_ptr<base::trace_event::ProcessMemoryDump> pmd_;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
};
TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
@@ -61,8 +71,10 @@ TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
auto mem1 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3 + 1);
EXPECT_TRUE(mem1->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem1, "dummy_dump_1"),
kPageSize * 3 + 1);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 3 + 1);
// Allocate 3 pages of discardable memory, and free the previously allocated
@@ -70,8 +82,10 @@ TEST_F(MadvFreeDiscardableMemoryAllocatorPosixTest, AllocateAndUseMemory) {
auto mem2 = AllocateLockedMadvFreeDiscardableMemory(kPageSize * 3);
EXPECT_TRUE(mem2->IsLockedForTesting());
+#if BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(GetDiscardableMemorySizeFromDump(*mem2, "dummy_dump_2"),
kPageSize * 3);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
EXPECT_EQ(allocator_.GetBytesAllocated(), kPageSize * 6 + 1);
mem1.reset();
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix.cc b/chromium/base/memory/madv_free_discardable_memory_posix.cc
index 0950964ed1a..ed89d6ea773 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_posix.cc
@@ -13,13 +13,18 @@
#include "base/atomicops.h"
#include "base/bits.h"
#include "base/callback.h"
+#include "base/logging.h"
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_manager.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
@@ -223,6 +228,7 @@ trace_event::MemoryAllocatorDump*
MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
const char* name,
trace_event::ProcessMemoryDump* pmd) const {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
DFAKE_SCOPED_LOCK(thread_collision_warner_);
using base::trace_event::MemoryAllocatorDump;
@@ -267,6 +273,10 @@ MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
pmd->AddSuballocation(dump->guid(), allocator_dump_name);
return dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
bool MadvFreeDiscardableMemoryPosix::IsValid() const {
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix.h b/chromium/base/memory/madv_free_discardable_memory_posix.h
index c482a9866b6..e7875188822 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix.h
+++ b/chromium/base/memory/madv_free_discardable_memory_posix.h
@@ -12,7 +12,7 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc b/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
index 731411d4980..a5507b86fc9 100644
--- a/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
+++ b/chromium/base/memory/madv_free_discardable_memory_posix_unittest.cc
@@ -15,8 +15,6 @@
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
-#include "base/trace_event/memory_allocator_dump.h"
-#include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/memory/memory_pressure_listener.cc b/chromium/base/memory/memory_pressure_listener.cc
index 1b97fd8cac6..87cdd8c962a 100644
--- a/chromium/base/memory/memory_pressure_listener.cc
+++ b/chromium/base/memory/memory_pressure_listener.cc
@@ -5,7 +5,7 @@
#include "base/memory/memory_pressure_listener.h"
#include "base/observer_list_threadsafe.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -60,17 +60,20 @@ subtle::Atomic32 g_notifications_suppressed = 0;
} // namespace
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback)
- : callback_(callback) {
+ : callback_(callback), creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, false);
}
MemoryPressureListener::MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureListener::MemoryPressureCallback& callback,
const MemoryPressureListener::SyncMemoryPressureCallback&
sync_memory_pressure_callback)
: callback_(callback),
- sync_memory_pressure_callback_(sync_memory_pressure_callback) {
+ sync_memory_pressure_callback_(sync_memory_pressure_callback),
+ creation_location_(creation_location) {
GetMemoryPressureObserver()->AddObserver(this, true);
}
@@ -79,6 +82,9 @@ MemoryPressureListener::~MemoryPressureListener() {
}
void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
+ TRACE_EVENT2("base", "MemoryPressureListener::Notify",
+ "listener_creation_info", creation_location_.ToString(), "level",
+ memory_pressure_level);
callback_.Run(memory_pressure_level);
}
diff --git a/chromium/base/memory/memory_pressure_listener.h b/chromium/base/memory/memory_pressure_listener.h
index 084ddd54208..bfa374719f2 100644
--- a/chromium/base/memory/memory_pressure_listener.h
+++ b/chromium/base/memory/memory_pressure_listener.h
@@ -12,6 +12,7 @@
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/location.h"
#include "base/macros.h"
namespace base {
@@ -67,9 +68,11 @@ class BASE_EXPORT MemoryPressureListener {
using SyncMemoryPressureCallback =
RepeatingCallback<void(MemoryPressureLevel)>;
- explicit MemoryPressureListener(
+ MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback);
MemoryPressureListener(
+ const base::Location& creation_location,
const MemoryPressureCallback& memory_pressure_callback,
const SyncMemoryPressureCallback& sync_memory_pressure_callback);
@@ -95,6 +98,8 @@ class BASE_EXPORT MemoryPressureListener {
MemoryPressureCallback callback_;
SyncMemoryPressureCallback sync_memory_pressure_callback_;
+ const base::Location creation_location_;
+
DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
};
diff --git a/chromium/base/memory/memory_pressure_listener_unittest.cc b/chromium/base/memory/memory_pressure_listener_unittest.cc
index f1c0006ab40..3df98487a52 100644
--- a/chromium/base/memory/memory_pressure_listener_unittest.cc
+++ b/chromium/base/memory/memory_pressure_listener_unittest.cc
@@ -19,8 +19,9 @@ class MemoryPressureListenerTest : public testing::Test {
: task_environment_(test::TaskEnvironment::MainThreadType::UI) {}
void SetUp() override {
- listener_ = std::make_unique<MemoryPressureListener>(BindRepeating(
- &MemoryPressureListenerTest::OnMemoryPressure, Unretained(this)));
+ listener_ = std::make_unique<MemoryPressureListener>(
+ FROM_HERE, BindRepeating(&MemoryPressureListenerTest::OnMemoryPressure,
+ Unretained(this)));
}
void TearDown() override {
diff --git a/chromium/base/memory/platform_shared_memory_region.cc b/chromium/base/memory/platform_shared_memory_region.cc
index 944b12cb297..964844adff6 100644
--- a/chromium/base/memory/platform_shared_memory_region.cc
+++ b/chromium/base/memory/platform_shared_memory_region.cc
@@ -4,6 +4,7 @@
#include "base/memory/platform_shared_memory_region.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/shared_memory_security_policy.h"
#include "base/metrics/histogram_functions.h"
@@ -15,7 +16,7 @@ namespace subtle {
namespace {
void RecordMappingWasBlockedHistogram(bool blocked) {
- base::UmaHistogramBoolean("SharedMemory.MapBlockedForSecurity", blocked);
+ UmaHistogramBoolean("SharedMemory.MapBlockedForSecurity", blocked);
}
} // namespace
@@ -62,14 +63,13 @@ bool PlatformSharedMemoryRegion::MapAt(off_t offset,
if (!SharedMemorySecurityPolicy::AcquireReservationForMapping(size)) {
RecordMappingWasBlockedHistogram(/*blocked=*/true);
return false;
- } else {
- RecordMappingWasBlockedHistogram(/*blocked=*/false);
}
+ RecordMappingWasBlockedHistogram(/*blocked=*/false);
+
bool success = MapAtInternal(offset, size, memory, mapped_size);
if (success) {
- DCHECK_EQ(
- 0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
+ DCHECK(IsAligned(*memory, kMapMinimumAlignment));
} else {
SharedMemorySecurityPolicy::ReleaseReservationForMapping(size);
}
diff --git a/chromium/base/memory/platform_shared_memory_region_android.cc b/chromium/base/memory/platform_shared_memory_region_android.cc
index 812ad67c85f..b862a115bc1 100644
--- a/chromium/base/memory/platform_shared_memory_region_android.cc
+++ b/chromium/base/memory/platform_shared_memory_region_android.cc
@@ -7,6 +7,7 @@
#include <sys/mman.h>
#include "base/bits.h"
+#include "base/logging.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
diff --git a/chromium/base/memory/platform_shared_memory_region_unittest.cc b/chromium/base/memory/platform_shared_memory_region_unittest.cc
index 10e8fe0db65..6f099f6d522 100644
--- a/chromium/base/memory/platform_shared_memory_region_unittest.cc
+++ b/chromium/base/memory/platform_shared_memory_region_unittest.cc
@@ -21,6 +21,7 @@
#include "base/debug/proc_maps_linux.h"
#elif defined(OS_WIN)
#include <windows.h>
+#include "base/logging.h"
#elif defined(OS_FUCHSIA)
#include <lib/zx/object.h>
#include <lib/zx/process.h>
diff --git a/chromium/base/memory/ref_counted.h b/chromium/base/memory/ref_counted.h
index 4ef63e85794..c9cad910f49 100644
--- a/chromium/base/memory/ref_counted.h
+++ b/chromium/base/memory/ref_counted.h
@@ -11,9 +11,9 @@
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/memory/ref_counted_delete_on_sequence.h b/chromium/base/memory/ref_counted_delete_on_sequence.h
index 4a8ac744a48..f5f53c022b9 100644
--- a/chromium/base/memory/ref_counted_delete_on_sequence.h
+++ b/chromium/base/memory/ref_counted_delete_on_sequence.h
@@ -7,8 +7,8 @@
#include <utility>
+#include "base/check.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
diff --git a/chromium/base/memory/scoped_refptr.h b/chromium/base/memory/scoped_refptr.h
index 238b61a7363..a746f95c010 100644
--- a/chromium/base/memory/scoped_refptr.h
+++ b/chromium/base/memory/scoped_refptr.h
@@ -11,8 +11,8 @@
#include <type_traits>
#include <utility>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
template <class T>
diff --git a/chromium/base/memory/shared_memory_tracker.cc b/chromium/base/memory/shared_memory_tracker.cc
index 8d1ac504a74..79383838d80 100644
--- a/chromium/base/memory/shared_memory_tracker.cc
+++ b/chromium/base/memory/shared_memory_tracker.cc
@@ -4,10 +4,16 @@
#include "base/memory/shared_memory_tracker.h"
+#include "base/check.h"
+#include "base/notreached.h"
#include "base/strings/string_number_conversions.h"
-#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
@@ -58,8 +64,10 @@ void SharedMemoryTracker::DecrementMemoryUsage(
}
SharedMemoryTracker::SharedMemoryTracker() {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
@@ -83,6 +91,7 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name);
@@ -116,6 +125,10 @@ SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
0 /* importance */);
return local_dump;
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+ NOTREACHED();
+ return nullptr;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
}
} // namespace
diff --git a/chromium/base/memory/shared_memory_tracker.h b/chromium/base/memory/shared_memory_tracker.h
index 9df089edfbd..e0ae3a9b13d 100644
--- a/chromium/base/memory/shared_memory_tracker.h
+++ b/chromium/base/memory/shared_memory_tracker.h
@@ -10,7 +10,7 @@
#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h"
-#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
diff --git a/chromium/base/memory/singleton.h b/chromium/base/memory/singleton.h
index 87b57919c07..cd39b21f243 100644
--- a/chromium/base/memory/singleton.h
+++ b/chromium/base/memory/singleton.h
@@ -30,8 +30,8 @@
#include "base/at_exit.h"
#include "base/atomicops.h"
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/lazy_instance_helpers.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
diff --git a/chromium/base/memory/singleton_unittest.cc b/chromium/base/memory/singleton_unittest.cc
index 06e53b24cd8..be2253f27f0 100644
--- a/chromium/base/memory/singleton_unittest.cc
+++ b/chromium/base/memory/singleton_unittest.cc
@@ -5,6 +5,7 @@
#include <stdint.h>
#include "base/at_exit.h"
+#include "base/memory/aligned_memory.h"
#include "base/memory/singleton.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -156,10 +157,15 @@ void SingletonStatic(CallbackFunc CallOnQuit) {
}
CallbackFunc* GetStaticSingleton() {
- return &CallbackSingletonWithStaticTrait::GetInstance()->callback_;
+ CallbackSingletonWithStaticTrait* instance =
+ CallbackSingletonWithStaticTrait::GetInstance();
+ if (instance == nullptr) {
+ return nullptr;
+ } else {
+ return &instance->callback_;
+ }
}
-
class SingletonTest : public testing::Test {
public:
SingletonTest() = default;
@@ -273,9 +279,6 @@ TEST_F(SingletonTest, Basic) {
VerifiesCallbacksNotCalled();
}
-#define EXPECT_ALIGNED(ptr, align) \
- EXPECT_EQ(0u, reinterpret_cast<uintptr_t>(ptr) & (align - 1))
-
TEST_F(SingletonTest, Alignment) {
// Create some static singletons with increasing sizes and alignment
// requirements. By ordering this way, the linker will need to do some work to
@@ -289,10 +292,10 @@ TEST_F(SingletonTest, Alignment) {
AlignedTestSingleton<AlignedData<4096>>* align4096 =
AlignedTestSingleton<AlignedData<4096>>::GetInstance();
- EXPECT_ALIGNED(align4, 4);
- EXPECT_ALIGNED(align32, 32);
- EXPECT_ALIGNED(align128, 128);
- EXPECT_ALIGNED(align4096, 4096);
+ EXPECT_TRUE(IsAligned(align4, 4));
+ EXPECT_TRUE(IsAligned(align32, 32));
+ EXPECT_TRUE(IsAligned(align128, 128));
+ EXPECT_TRUE(IsAligned(align4096, 4096));
}
} // namespace
diff --git a/chromium/base/memory/weak_ptr.h b/chromium/base/memory/weak_ptr.h
index d2749871681..42aa3412c5e 100644
--- a/chromium/base/memory/weak_ptr.h
+++ b/chromium/base/memory/weak_ptr.h
@@ -73,7 +73,7 @@
#include <type_traits>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/message_loop/message_loop.cc b/chromium/base/message_loop/message_loop.cc
deleted file mode 100644
index d4b68bed9fb..00000000000
--- a/chromium/base/message_loop/message_loop.cc
+++ /dev/null
@@ -1,164 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop/message_loop.h"
-
-#include <utility>
-
-#include "base/bind.h"
-#include "base/check_op.h"
-#include "base/memory/ptr_util.h"
-#include "base/message_loop/message_pump_default.h"
-#include "base/message_loop/message_pump_for_io.h"
-#include "base/message_loop/message_pump_for_ui.h"
-#include "base/optional.h"
-#include "base/run_loop.h"
-#include "base/task/sequence_manager/sequence_manager.h"
-#include "base/task/sequence_manager/sequence_manager_impl.h"
-#include "base/task/sequence_manager/task_queue.h"
-#include "build/build_config.h"
-
-#if defined(OS_MACOSX)
-#include "base/message_loop/message_pump_mac.h"
-#endif
-
-namespace base {
-
-MessageLoop::MessageLoop(MessagePumpType type) : MessageLoop(type, nullptr) {
- // For TYPE_CUSTOM you must either use
- // MessageLoop(std::unique_ptr<MessagePump> pump) or
- // MessageLoop::CreateUnbound()
- DCHECK_NE(type_, MessagePumpType::CUSTOM);
- BindToCurrentThread();
-}
-
-MessageLoop::MessageLoop(std::unique_ptr<MessagePump> pump)
- : MessageLoop(MessagePumpType::CUSTOM, std::move(pump)) {
- BindToCurrentThread();
-}
-
-MessageLoop::~MessageLoop() {
- // Clean up any unprocessed tasks, but take care: deleting a task could
- // result in the addition of more tasks (e.g., via DeleteSoon). This is taken
- // care by the queue as it will prevent further tasks from being posted to its
- // associated TaskRunner instances.
- default_task_queue_->ShutdownTaskQueue();
-
- // If |pump_| is non-null, this message loop has been bound and should be the
- // current one on this thread. Otherwise, this loop is being destructed before
- // it was bound to a thread, so a different message loop (or no loop at all)
- // may be current.
- DCHECK((pump_ && IsBoundToCurrentThread()) ||
- (!pump_ && !IsBoundToCurrentThread()));
-
-// iOS just attaches to the loop, it doesn't Run it.
-// TODO(stuartmorgan): Consider wiring up a Detach().
-#if !defined(OS_IOS)
- // There should be no active RunLoops on this thread, unless this MessageLoop
- // isn't bound to the current thread (see other condition at the top of this
- // method).
- DCHECK((!pump_ && !IsBoundToCurrentThread()) ||
- !RunLoop::IsRunningOnCurrentThread());
-#endif // !defined(OS_IOS)
-}
-
-bool MessageLoop::IsType(MessagePumpType type) const {
- return type_ == type;
-}
-
-// TODO(gab): Migrate TaskObservers to RunLoop as part of separating concerns
-// between MessageLoop and RunLoop and making MessageLoop a swappable
-// implementation detail. http://crbug.com/703346
-void MessageLoop::AddTaskObserver(TaskObserver* task_observer) {
- DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
- sequence_manager_->AddTaskObserver(task_observer);
-}
-
-void MessageLoop::RemoveTaskObserver(TaskObserver* task_observer) {
- DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
- sequence_manager_->RemoveTaskObserver(task_observer);
-}
-
-bool MessageLoop::IsBoundToCurrentThread() const {
- return sequence_manager_->IsBoundToCurrentThread();
-}
-
-bool MessageLoop::IsIdleForTesting() {
- return sequence_manager_->IsIdleForTesting();
-}
-
-//------------------------------------------------------------------------------
-
-// static
-std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(MessagePumpType type) {
- return WrapUnique(new MessageLoop(type, nullptr));
-}
-
-// static
-std::unique_ptr<MessageLoop> MessageLoop::CreateUnbound(
- std::unique_ptr<MessagePump> custom_pump) {
- return WrapUnique(
- new MessageLoop(MessagePumpType::CUSTOM, std::move(custom_pump)));
-}
-
-MessageLoop::MessageLoop(MessagePumpType type,
- std::unique_ptr<MessagePump> custom_pump)
- : sequence_manager_(
- sequence_manager::internal::SequenceManagerImpl::CreateUnbound(
- sequence_manager::SequenceManager::Settings::Builder()
- .SetMessagePumpType(type)
- .Build())),
- default_task_queue_(CreateDefaultTaskQueue()),
- type_(type),
- custom_pump_(std::move(custom_pump)) {
- // Bound in BindToCurrentThread();
- DETACH_FROM_THREAD(bound_thread_checker_);
-}
-
-scoped_refptr<sequence_manager::TaskQueue>
-MessageLoop::CreateDefaultTaskQueue() {
- auto default_task_queue = sequence_manager_->CreateTaskQueue(
- sequence_manager::TaskQueue::Spec("default_tq"));
- sequence_manager_->SetTaskRunner(default_task_queue->task_runner());
- return default_task_queue;
-}
-
-void MessageLoop::BindToCurrentThread() {
- DCHECK_CALLED_ON_VALID_THREAD(bound_thread_checker_);
- thread_id_ = PlatformThread::CurrentId();
-
- DCHECK(!pump_);
-
- std::unique_ptr<MessagePump> pump = CreateMessagePump();
- pump_ = pump.get();
-
- DCHECK(!MessageLoopCurrent::IsSet())
- << "should only have one message loop per thread";
-
- sequence_manager_->BindToCurrentThread(std::move(pump));
-}
-
-std::unique_ptr<MessagePump> MessageLoop::CreateMessagePump() {
- if (custom_pump_) {
- return std::move(custom_pump_);
- } else {
- return MessagePump::Create(type_);
- }
-}
-
-void MessageLoop::SetTimerSlack(TimerSlack timer_slack) {
- sequence_manager_->SetTimerSlack(timer_slack);
-}
-
-scoped_refptr<SingleThreadTaskRunner> MessageLoop::task_runner() const {
- return sequence_manager_->GetTaskRunner();
-}
-
-void MessageLoop::SetTaskRunner(
- scoped_refptr<SingleThreadTaskRunner> task_runner) {
- DCHECK(task_runner);
- sequence_manager_->SetTaskRunner(task_runner);
-}
-
-} // namespace base
diff --git a/chromium/base/message_loop/message_loop.h b/chromium/base/message_loop/message_loop.h
deleted file mode 100644
index 84e4c44500f..00000000000
--- a/chromium/base/message_loop/message_loop.h
+++ /dev/null
@@ -1,201 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
-#define BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
-
-#include <memory>
-#include <string>
-
-#include "base/base_export.h"
-#include "base/callback_forward.h"
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/scoped_refptr.h"
-#include "base/message_loop/message_pump_type.h"
-#include "base/message_loop/timer_slack.h"
-#include "base/pending_task.h"
-#include "base/run_loop.h"
-#include "base/threading/thread_checker.h"
-#include "base/time/time.h"
-#include "build/build_config.h"
-
-namespace base {
-
-class MessagePump;
-class TaskObserver;
-
-namespace sequence_manager {
-class TaskQueue;
-namespace internal {
-class SequenceManagerImpl;
-} // namespace internal
-} // namespace sequence_manager
-
-// A MessageLoop is used to process events for a particular thread. There is
-// at most one MessageLoop instance per thread.
-//
-// Events include at a minimum Task instances submitted to the MessageLoop's
-// TaskRunner. Depending on the Type of message pump used by the MessageLoop
-// other events such as UI messages may be processed. On Windows APC calls (as
-// time permits) and signals sent to a registered set of HANDLEs may also be
-// processed.
-//
-// The MessageLoop's API should only be used directly by its owner (and users
-// which the owner opts to share a MessageLoop* with). Other ways to access
-// subsets of the MessageLoop API:
-// - base::RunLoop : Drive the MessageLoop from the thread it's bound to.
-// - base::Thread/SequencedTaskRunnerHandle : Post back to the MessageLoop
-// from a task running on it.
-// - SequenceLocalStorageSlot : Bind external state to this MessageLoop.
-// - base::MessageLoopCurrent : Access statically exposed APIs of this
-// MessageLoop.
-// - Embedders may provide their own static accessors to post tasks on
-// specific loops (e.g. content::BrowserThreads).
-//
-// NOTE: Unless otherwise specified, a MessageLoop's methods may only be called
-// on the thread where the MessageLoop's Run method executes.
-//
-// NOTE: MessageLoop has task reentrancy protection. This means that if a
-// task is being processed, a second task cannot start until the first task is
-// finished. Reentrancy can happen when processing a task, and an inner
-// message pump is created. That inner pump then processes native messages
-// which could implicitly start an inner task. Inner message pumps are created
-// with dialogs (DialogBox), common dialogs (GetOpenFileName), OLE functions
-// (DoDragDrop), printer functions (StartDoc) and *many* others.
-//
-// Sample workaround when inner task processing is needed:
-// HRESULT hr;
-// {
-// MessageLoopCurrent::ScopedNestableTaskAllower allow;
-// hr = DoDragDrop(...); // Implicitly runs a modal message loop.
-// }
-// // Process |hr| (the result returned by DoDragDrop()).
-//
-// Please be SURE your task is reentrant (nestable) and all global variables
-// are stable and accessible before calling SetNestableTasksAllowed(true).
-//
-// DEPRECATED: Use a SingleThreadTaskExecutor instead or TaskEnvironment
-// for tests. TODO(https://crbug.com/891670/) remove this class.
-class BASE_EXPORT MessageLoop {
- public:
- // Normally, it is not necessary to instantiate a MessageLoop. Instead, it
- // is typical to make use of the current thread's MessageLoop instance.
- explicit MessageLoop(MessagePumpType type = MessagePumpType::DEFAULT);
- // Creates a MessageLoop with the supplied MessagePump, which must be
- // non-null.
- explicit MessageLoop(std::unique_ptr<MessagePump> custom_pump);
-
- virtual ~MessageLoop();
-
- // Set the timer slack for this message loop.
- void SetTimerSlack(TimerSlack timer_slack);
-
- // Returns true if this loop's pump is |type|. This allows subclasses
- // (especially those in tests) to specialize how they are identified.
- virtual bool IsType(MessagePumpType type) const;
-
- // Returns the type passed to the constructor.
- MessagePumpType type() const { return type_; }
-
- // Sets a new TaskRunner for this message loop. If the message loop was
- // already bound, this must be called on the thread to which it is bound.
- void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
-
- // Gets the TaskRunner associated with this message loop.
- scoped_refptr<SingleThreadTaskRunner> task_runner() const;
-
- // These functions can only be called on the same thread that |this| is
- // running on.
- // These functions must not be called from a TaskObserver callback.
- void AddTaskObserver(TaskObserver* task_observer);
- void RemoveTaskObserver(TaskObserver* task_observer);
-
- // Returns true if the message loop is idle (ignoring delayed tasks). This is
- // the same condition which triggers DoWork() to return false: i.e.
- // out of tasks which can be processed at the current run-level -- there might
- // be deferred non-nestable tasks remaining if currently in a nested run
- // level.
- // TODO(alexclarke): Make this const when MessageLoopImpl goes away.
- bool IsIdleForTesting();
-
- //----------------------------------------------------------------------------
- protected:
- // Returns true if this is the active MessageLoop for the current thread.
- bool IsBoundToCurrentThread() const;
-
- using MessagePumpFactoryCallback =
- OnceCallback<std::unique_ptr<MessagePump>()>;
-
- // Common protected constructor. Other constructors delegate the
- // initialization to this constructor.
- // A subclass can invoke this constructor to create a message_loop of a
- // specific type with a custom loop. The implementation does not call
- // BindToCurrentThread. If this constructor is invoked directly by a subclass,
- // then the subclass must subsequently bind the message loop.
- MessageLoop(MessagePumpType type, std::unique_ptr<MessagePump> pump);
-
- // Configure various members and bind this message loop to the current thread.
- void BindToCurrentThread();
-
- // A raw pointer to the MessagePump handed-off to |sequence_manager_|.
- // Valid for the lifetime of |sequence_manager_|.
- MessagePump* pump_ = nullptr;
-
- // TODO(crbug.com/891670): We shouldn't publicly expose all of
- // SequenceManagerImpl.
- const std::unique_ptr<sequence_manager::internal::SequenceManagerImpl>
- sequence_manager_;
- // SequenceManager requires an explicit initialisation of the default task
- // queue.
- const scoped_refptr<sequence_manager::TaskQueue> default_task_queue_;
-
- private:
- friend class MessageLoopTypedTest;
- friend class ScheduleWorkTest;
- friend class Thread;
- friend class sequence_manager::internal::SequenceManagerImpl;
- FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
-
- // Creates a MessageLoop without binding to a thread.
- //
- // It is valid to call this to create a new message loop on one thread,
- // and then pass it to the thread where the message loop actually runs.
- // The message loop's BindToCurrentThread() method must be called on the
- // thread the message loop runs on, before calling Run().
- // Before BindToCurrentThread() is called, only Post*Task() functions can
- // be called on the message loop.
- static std::unique_ptr<MessageLoop> CreateUnbound(MessagePumpType type);
- static std::unique_ptr<MessageLoop> CreateUnbound(
- std::unique_ptr<MessagePump> pump);
-
- scoped_refptr<sequence_manager::TaskQueue> CreateDefaultTaskQueue();
-
- std::unique_ptr<MessagePump> CreateMessagePump();
-
- sequence_manager::internal::SequenceManagerImpl* GetSequenceManagerImpl()
- const {
- return sequence_manager_.get();
- }
-
- const MessagePumpType type_;
-
- // If set this will be returned by the next call to CreateMessagePump().
- // This is only set if |type_| is TYPE_CUSTOM and |pump_| is null.
- std::unique_ptr<MessagePump> custom_pump_;
-
- // Id of the thread this message loop is bound to. Initialized once when the
- // MessageLoop is bound to its thread and constant forever after.
- PlatformThreadId thread_id_ = kInvalidThreadId;
-
- // Verifies that calls are made on the thread on which BindToCurrentThread()
- // was invoked.
- THREAD_CHECKER(bound_thread_checker_);
-
- DISALLOW_COPY_AND_ASSIGN(MessageLoop);
-};
-
-} // namespace base
-
-#endif // BASE_MESSAGE_LOOP_MESSAGE_LOOP_H_
diff --git a/chromium/base/message_loop/message_loop_current.cc b/chromium/base/message_loop/message_loop_current.cc
index 7688ba3d7b0..14d7b8fbb59 100644
--- a/chromium/base/message_loop/message_loop_current.cc
+++ b/chromium/base/message_loop/message_loop_current.cc
@@ -81,23 +81,22 @@ void MessageLoopCurrent::SetAddQueueTimeToTasks(bool enable) {
current_->SetAddQueueTimeToTasks(enable);
}
-void MessageLoopCurrent::SetNestableTasksAllowed(bool allowed) {
- DCHECK(current_->IsBoundToCurrentThread());
- current_->SetTaskExecutionAllowed(allowed);
-}
-
-bool MessageLoopCurrent::NestableTasksAllowed() const {
- return current_->IsTaskExecutionAllowed();
-}
-
-MessageLoopCurrent::ScopedNestableTaskAllower::ScopedNestableTaskAllower()
+MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop::
+ ScopedAllowApplicationTasksInNativeNestedLoop()
: sequence_manager_(GetCurrentSequenceManagerImpl()),
- old_state_(sequence_manager_->IsTaskExecutionAllowed()) {
+ previous_state_(sequence_manager_->IsTaskExecutionAllowed()) {
+ TRACE_EVENT_BEGIN0("base", "ScopedNestableTaskAllower");
sequence_manager_->SetTaskExecutionAllowed(true);
}
-MessageLoopCurrent::ScopedNestableTaskAllower::~ScopedNestableTaskAllower() {
- sequence_manager_->SetTaskExecutionAllowed(old_state_);
+MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop::
+ ~ScopedAllowApplicationTasksInNativeNestedLoop() {
+ sequence_manager_->SetTaskExecutionAllowed(previous_state_);
+ TRACE_EVENT_END0("base", "ScopedNestableTaskAllower");
+}
+
+bool MessageLoopCurrent::NestableTasksAllowed() const {
+ return current_->IsTaskExecutionAllowed();
}
bool MessageLoopCurrent::operator==(const MessageLoopCurrent& other) const {
diff --git a/chromium/base/message_loop/message_loop_current.h b/chromium/base/message_loop/message_loop_current.h
index 61b8c6fcc42..462098e2522 100644
--- a/chromium/base/message_loop/message_loop_current.h
+++ b/chromium/base/message_loop/message_loop_current.h
@@ -8,7 +8,7 @@
#include <ostream>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/memory/scoped_refptr.h"
#include "base/message_loop/message_pump_for_io.h"
#include "base/message_loop/message_pump_for_ui.h"
@@ -117,49 +117,46 @@ class BASE_EXPORT MessageLoopCurrent {
// posted tasks.
void SetAddQueueTimeToTasks(bool enable);
- // Enables or disables the recursive task processing. This happens in the case
- // of recursive message loops. Some unwanted message loops may occur when
- // using common controls or printer functions. By default, recursive task
- // processing is disabled.
+ // Enables nested task processing in scope of an upcoming native message loop.
+ // Some unwanted message loops may occur when using common controls or printer
+ // functions. Hence, nested task processing is disabled by default to avoid
+ // unplanned reentrancy. This re-enables it in cases where the stack is
+ // reentrancy safe and processing nestable tasks is explicitly safe.
//
- // Please use |ScopedNestableTaskAllower| instead of calling these methods
- // directly. In general, nestable message loops are to be avoided. They are
- // dangerous and difficult to get right, so please use with extreme caution.
- //
- // The specific case where tasks get queued is:
- // - The thread is running a message loop.
+ // For instance,
+ // - The current thread is running a message loop.
// - It receives a task #1 and executes it.
- // - The task #1 implicitly starts a message loop, like a MessageBox in the
- // unit test. This can also be StartDoc or GetSaveFileName.
+ // - The task #1 implicitly starts a nested message loop, like a MessageBox in
+ // the unit test. This can also be StartDoc or GetSaveFileName.
// - The thread receives a task #2 before or while in this second message
// loop.
// - With NestableTasksAllowed set to true, the task #2 will run right away.
// Otherwise, it will get executed right after task #1 completes at "thread
// message loop level".
//
- // DEPRECATED(https://crbug.com/750779): Use RunLoop::Type on the relevant
- // RunLoop instead of these methods.
- // TODO(gab): Migrate usage and delete these methods.
- void SetNestableTasksAllowed(bool allowed);
- bool NestableTasksAllowed() const;
-
- // Enables nestable tasks on the current MessageLoop while in scope.
- // DEPRECATED(https://crbug.com/750779): This should not be used when the
- // nested loop is driven by RunLoop (use RunLoop::Type::kNestableTasksAllowed
- // instead). It can however still be useful in a few scenarios where re-
- // entrancy is caused by a native message loop.
- // TODO(gab): Remove usage of this class alongside RunLoop and rename it to
- // ScopedApplicationTasksAllowedInNativeNestedLoop(?) for remaining use cases.
- class BASE_EXPORT ScopedNestableTaskAllower {
+ // Use RunLoop::Type::kNestableTasksAllowed when nesting is triggered by the
+ // application RunLoop rather than by native code.
+ class BASE_EXPORT ScopedAllowApplicationTasksInNativeNestedLoop {
public:
- ScopedNestableTaskAllower();
- ~ScopedNestableTaskAllower();
+ ScopedAllowApplicationTasksInNativeNestedLoop();
+ ~ScopedAllowApplicationTasksInNativeNestedLoop();
private:
sequence_manager::internal::SequenceManagerImpl* const sequence_manager_;
- const bool old_state_;
+ const bool previous_state_;
};
+ // TODO(https://crbug.com/781352): Remove usage of this old class. Either
+ // renaming it to ScopedAllowApplicationTasksInNativeNestedLoop when truly
+ // native or migrating it to RunLoop::Type::kNestableTasksAllowed otherwise.
+ using ScopedNestableTaskAllower =
+ ScopedAllowApplicationTasksInNativeNestedLoop;
+
+ // Returns true if nestable tasks are allowed on the current loop at this time
+ // (i.e. if a nested loop would start from the callee's point in the stack,
+ // would it be allowed to run application tasks).
+ bool NestableTasksAllowed() const;
+
// Returns true if this is the active MessageLoop for the current thread.
bool IsBoundToCurrentThread() const;
diff --git a/chromium/base/message_loop/message_loop_unittest.cc b/chromium/base/message_loop/message_loop_unittest.cc
deleted file mode 100644
index 1d0ed42b239..00000000000
--- a/chromium/base/message_loop/message_loop_unittest.cc
+++ /dev/null
@@ -1,2270 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/message_loop/message_loop.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include <vector>
-
-#include "base/bind.h"
-#include "base/bind_helpers.h"
-#include "base/compiler_specific.h"
-#include "base/logging.h"
-#include "base/macros.h"
-#include "base/memory/ptr_util.h"
-#include "base/memory/ref_counted.h"
-#include "base/message_loop/message_loop_current.h"
-#include "base/message_loop/message_pump_for_io.h"
-#include "base/message_loop/message_pump_type.h"
-#include "base/pending_task.h"
-#include "base/posix/eintr_wrapper.h"
-#include "base/run_loop.h"
-#include "base/single_thread_task_runner.h"
-#include "base/synchronization/waitable_event.h"
-#include "base/task/task_observer.h"
-#include "base/task/thread_pool/thread_pool_instance.h"
-#include "base/test/bind_test_util.h"
-#include "base/test/gtest_util.h"
-#include "base/test/metrics/histogram_tester.h"
-#include "base/test/test_simple_task_runner.h"
-#include "base/test/test_timeouts.h"
-#include "base/threading/platform_thread.h"
-#include "base/threading/sequence_local_storage_slot.h"
-#include "base/threading/thread.h"
-#include "base/threading/thread_task_runner_handle.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-#if defined(OS_ANDROID)
-#include "base/android/java_handler_thread.h"
-#include "base/android/jni_android.h"
-#include "base/test/android/java_handler_thread_helpers.h"
-#endif
-
-#if defined(OS_WIN)
-#include "base/message_loop/message_pump_win.h"
-#include "base/process/memory.h"
-#include "base/strings/string16.h"
-#include "base/win/current_module.h"
-#include "base/win/message_window.h"
-#include "base/win/scoped_handle.h"
-#endif
-
-namespace base {
-
-// TODO(darin): Platform-specific MessageLoop tests should be grouped together
-// to avoid chopping this file up with so many #ifdefs.
-
-namespace {
-
-class Foo : public RefCounted<Foo> {
- public:
- Foo() : test_count_(0) {}
-
- void Test0() { ++test_count_; }
-
- void Test1ConstRef(const std::string& a) {
- ++test_count_;
- result_.append(a);
- }
-
- void Test1Ptr(std::string* a) {
- ++test_count_;
- result_.append(*a);
- }
-
- void Test1Int(int a) { test_count_ += a; }
-
- void Test2Ptr(std::string* a, std::string* b) {
- ++test_count_;
- result_.append(*a);
- result_.append(*b);
- }
-
- void Test2Mixed(const std::string& a, std::string* b) {
- ++test_count_;
- result_.append(a);
- result_.append(*b);
- }
-
- int test_count() const { return test_count_; }
- const std::string& result() const { return result_; }
-
- private:
- friend class RefCounted<Foo>;
-
- ~Foo() = default;
-
- int test_count_;
- std::string result_;
-
- DISALLOW_COPY_AND_ASSIGN(Foo);
-};
-
-// This function runs slowly to simulate a large amount of work being done.
-static void SlowFunc(TimeDelta pause, int* quit_counter) {
- PlatformThread::Sleep(pause);
- if (--(*quit_counter) == 0)
- RunLoop::QuitCurrentWhenIdleDeprecated();
-}
-
-// This function records the time when Run was called in a Time object, which is
-// useful for building a variety of MessageLoop tests.
-static void RecordRunTimeFunc(TimeTicks* run_time, int* quit_counter) {
- *run_time = TimeTicks::Now();
-
- // Cause our Run function to take some time to execute. As a result we can
- // count on subsequent RecordRunTimeFunc()s running at a future time,
- // without worry about the resolution of our system clock being an issue.
- SlowFunc(TimeDelta::FromMilliseconds(10), quit_counter);
-}
-
-enum TaskType {
- MESSAGEBOX,
- ENDDIALOG,
- RECURSIVE,
- TIMEDMESSAGELOOP,
- QUITMESSAGELOOP,
- ORDERED,
- PUMPS,
- SLEEP,
- RUNS,
-};
-
-// Saves the order in which the tasks executed.
-struct TaskItem {
- TaskItem(TaskType t, int c, bool s) : type(t), cookie(c), start(s) {}
-
- TaskType type;
- int cookie;
- bool start;
-
- bool operator==(const TaskItem& other) const {
- return type == other.type && cookie == other.cookie && start == other.start;
- }
-};
-
-std::ostream& operator<<(std::ostream& os, TaskType type) {
- switch (type) {
- case MESSAGEBOX:
- os << "MESSAGEBOX";
- break;
- case ENDDIALOG:
- os << "ENDDIALOG";
- break;
- case RECURSIVE:
- os << "RECURSIVE";
- break;
- case TIMEDMESSAGELOOP:
- os << "TIMEDMESSAGELOOP";
- break;
- case QUITMESSAGELOOP:
- os << "QUITMESSAGELOOP";
- break;
- case ORDERED:
- os << "ORDERED";
- break;
- case PUMPS:
- os << "PUMPS";
- break;
- case SLEEP:
- os << "SLEEP";
- break;
- default:
- NOTREACHED();
- os << "Unknown TaskType";
- break;
- }
- return os;
-}
-
-std::ostream& operator<<(std::ostream& os, const TaskItem& item) {
- if (item.start)
- return os << item.type << " " << item.cookie << " starts";
- return os << item.type << " " << item.cookie << " ends";
-}
-
-class TaskList {
- public:
- void RecordStart(TaskType type, int cookie) {
- TaskItem item(type, cookie, true);
- DVLOG(1) << item;
- task_list_.push_back(item);
- }
-
- void RecordEnd(TaskType type, int cookie) {
- TaskItem item(type, cookie, false);
- DVLOG(1) << item;
- task_list_.push_back(item);
- }
-
- size_t Size() { return task_list_.size(); }
-
- TaskItem Get(int n) { return task_list_[n]; }
-
- private:
- std::vector<TaskItem> task_list_;
-};
-
-class DummyTaskObserver : public TaskObserver {
- public:
- explicit DummyTaskObserver(int num_tasks)
- : num_tasks_started_(0), num_tasks_processed_(0), num_tasks_(num_tasks) {}
-
- DummyTaskObserver(int num_tasks, int num_tasks_started)
- : num_tasks_started_(num_tasks_started),
- num_tasks_processed_(0),
- num_tasks_(num_tasks) {}
-
- ~DummyTaskObserver() override = default;
-
- void WillProcessTask(const PendingTask& pending_task,
- bool /* was_blocked_or_low_priority */) override {
- num_tasks_started_++;
- EXPECT_LE(num_tasks_started_, num_tasks_);
- EXPECT_EQ(num_tasks_started_, num_tasks_processed_ + 1);
- }
-
- void DidProcessTask(const PendingTask& pending_task) override {
- num_tasks_processed_++;
- EXPECT_LE(num_tasks_started_, num_tasks_);
- EXPECT_EQ(num_tasks_started_, num_tasks_processed_);
- }
-
- int num_tasks_started() const { return num_tasks_started_; }
- int num_tasks_processed() const { return num_tasks_processed_; }
-
- private:
- int num_tasks_started_;
- int num_tasks_processed_;
- const int num_tasks_;
-
- DISALLOW_COPY_AND_ASSIGN(DummyTaskObserver);
-};
-
-void RecursiveFunc(TaskList* order, int cookie, int depth, bool is_reentrant) {
- order->RecordStart(RECURSIVE, cookie);
- if (depth > 0) {
- if (is_reentrant)
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(&RecursiveFunc, order, cookie, depth - 1, is_reentrant));
- }
- order->RecordEnd(RECURSIVE, cookie);
-}
-
-void QuitFunc(TaskList* order, int cookie) {
- order->RecordStart(QUITMESSAGELOOP, cookie);
- RunLoop::QuitCurrentWhenIdleDeprecated();
- order->RecordEnd(QUITMESSAGELOOP, cookie);
-}
-
-void PostNTasks(int posts_remaining) {
- if (posts_remaining > 1) {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&PostNTasks, posts_remaining - 1));
- }
-}
-
-class MessageLoopTest : public ::testing::Test {};
-
-#if defined(OS_WIN)
-
-void SubPumpFunc(OnceClosure on_done) {
- MessageLoopCurrent::ScopedNestableTaskAllower allow_nestable_tasks;
- MSG msg;
- while (::GetMessage(&msg, NULL, 0, 0)) {
- ::TranslateMessage(&msg);
- ::DispatchMessage(&msg);
- }
- std::move(on_done).Run();
-}
-
-const wchar_t kMessageBoxTitle[] = L"MessageLoop Unit Test";
-
-// MessageLoop implicitly start a "modal message loop". Modal dialog boxes,
-// common controls (like OpenFile) and StartDoc printing function can cause
-// implicit message loops.
-void MessageBoxFunc(TaskList* order, int cookie, bool is_reentrant) {
- order->RecordStart(MESSAGEBOX, cookie);
- if (is_reentrant)
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
- MessageBox(NULL, L"Please wait...", kMessageBoxTitle, MB_OK);
- order->RecordEnd(MESSAGEBOX, cookie);
-}
-
-// Will end the MessageBox.
-void EndDialogFunc(TaskList* order, int cookie) {
- order->RecordStart(ENDDIALOG, cookie);
- HWND window = GetActiveWindow();
- if (window != NULL) {
- EXPECT_NE(EndDialog(window, IDCONTINUE), 0);
- // Cheap way to signal that the window wasn't found if RunEnd() isn't
- // called.
- order->RecordEnd(ENDDIALOG, cookie);
- }
-}
-
-void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
- HANDLE event,
- bool expect_window,
- TaskList* order,
- bool is_reentrant) {
- task_runner->PostTask(FROM_HERE,
- BindOnce(&RecursiveFunc, order, 1, 2, is_reentrant));
- task_runner->PostTask(FROM_HERE,
- BindOnce(&MessageBoxFunc, order, 2, is_reentrant));
- task_runner->PostTask(FROM_HERE,
- BindOnce(&RecursiveFunc, order, 3, 2, is_reentrant));
- // The trick here is that for recursive task processing, this task will be
- // ran _inside_ the MessageBox message loop, dismissing the MessageBox
- // without a chance.
- // For non-recursive task processing, this will be executed _after_ the
- // MessageBox will have been dismissed by the code below, where
- // expect_window_ is true.
- task_runner->PostTask(FROM_HERE, BindOnce(&EndDialogFunc, order, 4));
- task_runner->PostTask(FROM_HERE, BindOnce(&QuitFunc, order, 5));
-
- // Enforce that every tasks are sent before starting to run the main thread
- // message loop.
- ASSERT_TRUE(SetEvent(event));
-
- // Poll for the MessageBox. Don't do this at home! At the speed we do it,
- // you will never realize one MessageBox was shown.
- for (; expect_window;) {
- HWND window = FindWindow(L"#32770", kMessageBoxTitle);
- if (window) {
- // Dismiss it.
- for (;;) {
- HWND button = FindWindowEx(window, NULL, L"Button", NULL);
- if (button != NULL) {
- EXPECT_EQ(0, SendMessage(button, WM_LBUTTONDOWN, 0, 0));
- EXPECT_EQ(0, SendMessage(button, WM_LBUTTONUP, 0, 0));
- break;
- }
- }
- break;
- }
- }
-}
-
-#endif // defined(OS_WIN)
-
-void PostNTasksThenQuit(int posts_remaining) {
- if (posts_remaining > 1) {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&PostNTasksThenQuit, posts_remaining - 1));
- } else {
- RunLoop::QuitCurrentWhenIdleDeprecated();
- }
-}
-
-#if defined(OS_WIN)
-
-class TestIOHandler : public MessagePumpForIO::IOHandler {
- public:
- TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
-
- void OnIOCompleted(MessagePumpForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) override;
-
- void Init();
- void WaitForIO();
- OVERLAPPED* context() { return &context_.overlapped; }
- DWORD size() { return sizeof(buffer_); }
-
- private:
- char buffer_[48];
- MessagePumpForIO::IOContext context_;
- HANDLE signal_;
- win::ScopedHandle file_;
- bool wait_;
-};
-
-TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
- : MessagePumpForIO::IOHandler(FROM_HERE), signal_(signal), wait_(wait) {
- memset(buffer_, 0, sizeof(buffer_));
-
- file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
- FILE_FLAG_OVERLAPPED, NULL));
- EXPECT_TRUE(file_.IsValid());
-}
-
-void TestIOHandler::Init() {
- MessageLoopCurrentForIO::Get()->RegisterIOHandler(file_.Get(), this);
-
- DWORD read;
- EXPECT_FALSE(ReadFile(file_.Get(), buffer_, size(), &read, context()));
- EXPECT_EQ(static_cast<DWORD>(ERROR_IO_PENDING), GetLastError());
- if (wait_)
- WaitForIO();
-}
-
-void TestIOHandler::OnIOCompleted(MessagePumpForIO::IOContext* context,
- DWORD bytes_transfered,
- DWORD error) {
- ASSERT_TRUE(context == &context_);
- ASSERT_TRUE(SetEvent(signal_));
-}
-
-void TestIOHandler::WaitForIO() {
- EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(300, this));
- EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(400, this));
-}
-
-void RunTest_IOHandler() {
- win::ScopedHandle callback_called(CreateEvent(NULL, TRUE, FALSE, NULL));
- ASSERT_TRUE(callback_called.IsValid());
-
- const wchar_t* kPipeName = L"\\\\.\\pipe\\iohandler_pipe";
- win::ScopedHandle server(
- CreateNamedPipe(kPipeName, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
- ASSERT_TRUE(server.IsValid());
-
- Thread thread("IOHandler test");
- Thread::Options options;
- options.message_pump_type = MessagePumpType::IO;
- ASSERT_TRUE(thread.StartWithOptions(options));
-
- TestIOHandler handler(kPipeName, callback_called.Get(), false);
- thread.task_runner()->PostTask(
- FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler)));
- // Make sure the thread runs and sleeps for lack of work.
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
-
- const char buffer[] = "Hello there!";
- DWORD written;
- EXPECT_TRUE(WriteFile(server.Get(), buffer, sizeof(buffer), &written, NULL));
-
- DWORD result = WaitForSingleObject(callback_called.Get(), 1000);
- EXPECT_EQ(WAIT_OBJECT_0, result);
-
- thread.Stop();
-}
-
-void RunTest_WaitForIO() {
- win::ScopedHandle callback1_called(CreateEvent(NULL, TRUE, FALSE, NULL));
- win::ScopedHandle callback2_called(CreateEvent(NULL, TRUE, FALSE, NULL));
- ASSERT_TRUE(callback1_called.IsValid());
- ASSERT_TRUE(callback2_called.IsValid());
-
- const wchar_t* kPipeName1 = L"\\\\.\\pipe\\iohandler_pipe1";
- const wchar_t* kPipeName2 = L"\\\\.\\pipe\\iohandler_pipe2";
- win::ScopedHandle server1(
- CreateNamedPipe(kPipeName1, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
- win::ScopedHandle server2(
- CreateNamedPipe(kPipeName2, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
- ASSERT_TRUE(server1.IsValid());
- ASSERT_TRUE(server2.IsValid());
-
- Thread thread("IOHandler test");
- Thread::Options options;
- options.message_pump_type = MessagePumpType::IO;
- ASSERT_TRUE(thread.StartWithOptions(options));
-
- TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
- TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
- thread.task_runner()->PostTask(
- FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler1)));
- // TODO(ajwong): Do we really need such long Sleeps in this function?
- // Make sure the thread runs and sleeps for lack of work.
- TimeDelta delay = TimeDelta::FromMilliseconds(100);
- PlatformThread::Sleep(delay);
- thread.task_runner()->PostTask(
- FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler2)));
- PlatformThread::Sleep(delay);
-
- // At this time handler1 is waiting to be called, and the thread is waiting
- // on the Init method of handler2, filtering only handler2 callbacks.
-
- const char buffer[] = "Hello there!";
- DWORD written;
- EXPECT_TRUE(WriteFile(server1.Get(), buffer, sizeof(buffer), &written, NULL));
- PlatformThread::Sleep(2 * delay);
- EXPECT_EQ(static_cast<DWORD>(WAIT_TIMEOUT),
- WaitForSingleObject(callback1_called.Get(), 0))
- << "handler1 has not been called";
-
- EXPECT_TRUE(WriteFile(server2.Get(), buffer, sizeof(buffer), &written, NULL));
-
- HANDLE objects[2] = {callback1_called.Get(), callback2_called.Get()};
- DWORD result = WaitForMultipleObjects(2, objects, TRUE, 1000);
- EXPECT_EQ(WAIT_OBJECT_0, result);
-
- thread.Stop();
-}
-
-#endif // defined(OS_WIN)
-
-} // namespace
-
-//-----------------------------------------------------------------------------
-// Each test is run against each type of MessageLoop. That way we are sure
-// that message loops work properly in all configurations. Of course, in some
-// cases, a unit test may only be for a particular type of loop.
-
-class MessageLoopTypedTest : public ::testing::TestWithParam<MessagePumpType> {
- public:
- MessageLoopTypedTest() = default;
- ~MessageLoopTypedTest() = default;
-
- static std::string ParamInfoToString(
- ::testing::TestParamInfo<MessagePumpType> param_info) {
- switch (param_info.param) {
- case MessagePumpType::DEFAULT:
- return "default_pump";
- case MessagePumpType::IO:
- return "IO_pump";
- case MessagePumpType::UI:
- return "UI_pump";
- case MessagePumpType::CUSTOM:
- break;
-#if defined(OS_ANDROID)
- case MessagePumpType::JAVA:
- break;
-#endif // defined(OS_ANDROID)
-#if defined(OS_MACOSX)
- case MessagePumpType::NS_RUNLOOP:
- break;
-#endif // defined(OS_MACOSX)
-#if defined(OS_WIN)
- case MessagePumpType::UI_WITH_WM_QUIT_SUPPORT:
- break;
-#endif // defined(OS_WIN)
- }
- NOTREACHED();
- return "";
- }
-
- std::unique_ptr<MessageLoop> CreateMessageLoop() {
- auto message_loop = base::WrapUnique(new MessageLoop(GetParam(), nullptr));
- message_loop->BindToCurrentThread();
- return message_loop;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MessageLoopTypedTest);
-};
-
-TEST_P(MessageLoopTypedTest, PostTask) {
- auto loop = CreateMessageLoop();
- // Add tests to message loop
- scoped_refptr<Foo> foo(new Foo());
- std::string a("a"), b("b"), c("c"), d("d");
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&Foo::Test0, foo));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&Foo::Test1Ptr, foo, &b));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&Foo::Test1Int, foo, 100));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&Foo::Test2Ptr, foo, &a, &c));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&Foo::Test2Mixed, foo, a, &d));
- // After all tests, post a message that will shut down the message loop
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
-
- // Now kick things off
- RunLoop().Run();
-
- EXPECT_EQ(foo->test_count(), 105);
- EXPECT_EQ(foo->result(), "abacad");
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_Basic) {
- auto loop = CreateMessageLoop();
-
- // Test that PostDelayedTask results in a delayed task.
-
- const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
-
- int num_tasks = 1;
- TimeTicks run_time;
-
- TimeTicks time_before_run = TimeTicks::Now();
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
- RunLoop().Run();
- TimeTicks time_after_run = TimeTicks::Now();
-
- EXPECT_EQ(0, num_tasks);
- EXPECT_LT(kDelay, time_after_run - time_before_run);
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_InDelayOrder) {
- auto loop = CreateMessageLoop();
-
- // Test that two tasks with different delays run in the right order.
- int num_tasks = 2;
- TimeTicks run_time1, run_time2;
-
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
- TimeDelta::FromMilliseconds(200));
- // If we get a large pause in execution (due to a context switch) here, this
- // test could fail.
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
- TimeDelta::FromMilliseconds(10));
-
- RunLoop().Run();
- EXPECT_EQ(0, num_tasks);
-
- EXPECT_TRUE(run_time2 < run_time1);
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder) {
- auto loop = CreateMessageLoop();
-
- // Test that two tasks with the same delay run in the order in which they
- // were posted.
- //
- // NOTE: This is actually an approximate test since the API only takes a
- // "delay" parameter, so we are not exactly simulating two tasks that get
- // posted at the exact same time. It would be nice if the API allowed us to
- // specify the desired run time.
-
- const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
-
- int num_tasks = 2;
- TimeTicks run_time1, run_time2;
-
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
-
- RunLoop().Run();
- EXPECT_EQ(0, num_tasks);
-
- EXPECT_TRUE(run_time1 < run_time2);
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_2) {
- auto loop = CreateMessageLoop();
-
- // Test that a delayed task still runs after a normal tasks even if the
- // normal tasks take a long time to run.
-
- const TimeDelta kPause = TimeDelta::FromMilliseconds(50);
-
- int num_tasks = 2;
- TimeTicks run_time;
-
- loop->task_runner()->PostTask(FROM_HERE,
- BindOnce(&SlowFunc, kPause, &num_tasks));
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
- TimeDelta::FromMilliseconds(10));
-
- TimeTicks time_before_run = TimeTicks::Now();
- RunLoop().Run();
- TimeTicks time_after_run = TimeTicks::Now();
-
- EXPECT_EQ(0, num_tasks);
-
- EXPECT_LT(kPause, time_after_run - time_before_run);
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_InPostOrder_3) {
- auto loop = CreateMessageLoop();
-
- // Test that a delayed task still runs after a pile of normal tasks. The key
- // difference between this test and the previous one is that here we return
- // the MessageLoop a lot so we give the MessageLoop plenty of opportunities
- // to maybe run the delayed task. It should know not to do so until the
- // delayed task's delay has passed.
-
- int num_tasks = 11;
- TimeTicks run_time1, run_time2;
-
- // Clutter the ML with tasks.
- for (int i = 1; i < num_tasks; ++i)
- loop->task_runner()->PostTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks));
-
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
- TimeDelta::FromMilliseconds(1));
-
- RunLoop().Run();
- EXPECT_EQ(0, num_tasks);
-
- EXPECT_TRUE(run_time2 > run_time1);
-}
-
-TEST_P(MessageLoopTypedTest, PostDelayedTask_SharedTimer) {
- auto loop = CreateMessageLoop();
-
- // Test that the interval of the timer, used to run the next delayed task, is
- // set to a value corresponding to when the next delayed task should run.
-
- // By setting num_tasks to 1, we ensure that the first task to run causes the
- // run loop to exit.
- int num_tasks = 1;
- TimeTicks run_time1, run_time2;
-
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
- TimeDelta::FromSeconds(1000));
- loop->task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
- TimeDelta::FromMilliseconds(10));
-
- TimeTicks start_time = TimeTicks::Now();
-
- RunLoop().Run();
- EXPECT_EQ(0, num_tasks);
-
- // Ensure that we ran in far less time than the slower timer.
- TimeDelta total_time = TimeTicks::Now() - start_time;
- EXPECT_GT(5000, total_time.InMilliseconds());
-
- // In case both timers somehow run at nearly the same time, sleep a little
- // and then run all pending to force them both to have run. This is just
- // encouraging flakiness if there is any.
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
- RunLoop().RunUntilIdle();
-
- EXPECT_TRUE(run_time1.is_null());
- EXPECT_FALSE(run_time2.is_null());
-}
-
-namespace {
-
-// This is used to inject a test point for recording the destructor calls for
-// Closure objects send to MessageLoop::PostTask(). It is awkward usage since we
-// are trying to hook the actual destruction, which is not a common operation.
-class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
- public:
- RecordDeletionProbe(RecordDeletionProbe* post_on_delete, bool* was_deleted)
- : post_on_delete_(post_on_delete), was_deleted_(was_deleted) {}
- void Run() {}
-
- private:
- friend class RefCounted<RecordDeletionProbe>;
-
- ~RecordDeletionProbe() {
- *was_deleted_ = true;
- if (post_on_delete_.get())
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RecordDeletionProbe::Run, post_on_delete_));
- }
-
- scoped_refptr<RecordDeletionProbe> post_on_delete_;
- bool* was_deleted_;
-};
-
-} // namespace
-
-/* TODO(darin): MessageLoop does not support deleting all tasks in the */
-/* destructor. */
-/* Fails, http://crbug.com/50272. */
-TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion) {
- bool a_was_deleted = false;
- bool b_was_deleted = false;
- {
- auto loop = CreateMessageLoop();
- loop->task_runner()->PostTask(
- FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(nullptr, &a_was_deleted)));
- // TODO(ajwong): Do we really need 1000ms here?
- loop->task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(&RecordDeletionProbe::Run,
- new RecordDeletionProbe(nullptr, &b_was_deleted)),
- TimeDelta::FromMilliseconds(1000));
- }
- EXPECT_TRUE(a_was_deleted);
- EXPECT_TRUE(b_was_deleted);
-}
-
-/* TODO(darin): MessageLoop does not support deleting all tasks in the */
-/* destructor. */
-/* Fails, http://crbug.com/50272. */
-TEST_P(MessageLoopTypedTest, DISABLED_EnsureDeletion_Chain) {
- bool a_was_deleted = false;
- bool b_was_deleted = false;
- bool c_was_deleted = false;
- {
- auto loop = CreateMessageLoop();
- // The scoped_refptr for each of the below is held either by the chained
- // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
- RecordDeletionProbe* a = new RecordDeletionProbe(nullptr, &a_was_deleted);
- RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
- RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
- loop->task_runner()->PostTask(FROM_HERE,
- BindOnce(&RecordDeletionProbe::Run, c));
- }
- EXPECT_TRUE(a_was_deleted);
- EXPECT_TRUE(b_was_deleted);
- EXPECT_TRUE(c_was_deleted);
-}
-
-namespace {
-
-void NestingFunc(int* depth) {
- if (*depth > 0) {
- *depth -= 1;
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&NestingFunc, depth));
-
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
- RunLoop().Run();
- }
- base::RunLoop::QuitCurrentWhenIdleDeprecated();
-}
-
-} // namespace
-
-TEST_P(MessageLoopTypedTest, Nesting) {
- auto loop = CreateMessageLoop();
-
- int depth = 50;
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&NestingFunc, &depth));
- RunLoop().Run();
- EXPECT_EQ(depth, 0);
-}
-
-TEST_P(MessageLoopTypedTest, RecursiveDenial1) {
- auto loop = CreateMessageLoop();
-
- EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- TaskList order;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, false));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, false));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&QuitFunc, &order, 3));
-
- RunLoop().Run();
-
- // FIFO order.
- ASSERT_EQ(14U, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
- EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
- EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
- EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
- EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
-}
-
-namespace {
-
-void OrderedFunc(TaskList* order, int cookie) {
- order->RecordStart(ORDERED, cookie);
- order->RecordEnd(ORDERED, cookie);
-}
-
-} // namespace
-
-TEST_P(MessageLoopTypedTest, RecursiveSupport1) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2, true));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2, true));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&QuitFunc, &order, 3));
-
- RunLoop().Run();
-
- // FIFO order.
- ASSERT_EQ(14U, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
- EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
- EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
- EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
- EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
- EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
-}
-
-// Tests that non nestable tasks run in FIFO if there are no nested loops.
-TEST_P(MessageLoopTypedTest, NonNestableWithNoNesting) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, BindOnce(&OrderedFunc, &order, 1));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&QuitFunc, &order, 3));
- RunLoop().Run();
-
- // FIFO order.
- ASSERT_EQ(6U, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(ORDERED, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 1, false));
- EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(3), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
- EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
-}
-
-namespace {
-
-void FuncThatPumps(TaskList* order, int cookie) {
- order->RecordStart(PUMPS, cookie);
- RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
- order->RecordEnd(PUMPS, cookie);
-}
-
-void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {
- order->RecordStart(SLEEP, cookie);
- PlatformThread::Sleep(delay);
- order->RecordEnd(SLEEP, cookie);
-}
-
-} // namespace
-
-// Tests that non nestable tasks don't run when there's code in the call stack.
-TEST_P(MessageLoopTypedTest, NonNestableDelayedInNestedLoop) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&FuncThatPumps, &order, 1));
- ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 3));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 5));
- ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
- FROM_HERE, BindOnce(&QuitFunc, &order, 6));
-
- RunLoop().Run();
-
- // FIFO order.
- ASSERT_EQ(12U, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(PUMPS, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 3, true));
- EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 3, false));
- EXPECT_EQ(order.Get(3), TaskItem(SLEEP, 4, true));
- EXPECT_EQ(order.Get(4), TaskItem(SLEEP, 4, false));
- EXPECT_EQ(order.Get(5), TaskItem(ORDERED, 5, true));
- EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 5, false));
- EXPECT_EQ(order.Get(7), TaskItem(PUMPS, 1, false));
- EXPECT_EQ(order.Get(8), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(9), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 6, true));
- EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 6, false));
-}
-
-namespace {
-
-void FuncThatRuns(TaskList* order, int cookie, RunLoop* run_loop) {
- order->RecordStart(RUNS, cookie);
- {
- MessageLoopCurrent::ScopedNestableTaskAllower allow;
- run_loop->Run();
- }
- order->RecordEnd(RUNS, cookie);
-}
-
-void FuncThatQuitsNow() {
- base::RunLoop::QuitCurrentDeprecated();
-}
-
-} // namespace
-
-// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
-TEST_P(MessageLoopTypedTest, QuitNow) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&FuncThatQuitsNow));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 3));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&FuncThatQuitsNow));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&OrderedFunc, &order, 4)); // never runs
-
- RunLoop().Run();
-
- ASSERT_EQ(6U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
-TEST_P(MessageLoopTypedTest, RunLoopQuitTop) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop outer_run_loop;
- RunLoop nested_run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_run_loop.QuitClosure());
-
- outer_run_loop.Run();
-
- ASSERT_EQ(4U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
-TEST_P(MessageLoopTypedTest, RunLoopQuitNested) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop outer_run_loop;
- RunLoop nested_run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
-
- outer_run_loop.Run();
-
- ASSERT_EQ(4U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Quits current loop and immediately runs a nested loop.
-void QuitAndRunNestedLoop(TaskList* order,
- int cookie,
- RunLoop* outer_run_loop,
- RunLoop* nested_run_loop) {
- order->RecordStart(RUNS, cookie);
- outer_run_loop->Quit();
- nested_run_loop->Run();
- order->RecordEnd(RUNS, cookie);
-}
-
-// Test that we can run nested loop after quitting the current one.
-TEST_P(MessageLoopTypedTest, RunLoopNestedAfterQuit) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop outer_run_loop;
- RunLoop nested_run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&QuitAndRunNestedLoop, &order, 1, &outer_run_loop,
- &nested_run_loop));
-
- outer_run_loop.Run();
-
- ASSERT_EQ(2U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
-TEST_P(MessageLoopTypedTest, RunLoopQuitBogus) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop outer_run_loop;
- RunLoop nested_run_loop;
- RunLoop bogus_run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- bogus_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_run_loop.QuitClosure());
-
- outer_run_loop.Run();
-
- ASSERT_EQ(4U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
-TEST_P(MessageLoopTypedTest, RunLoopQuitDeep) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop outer_run_loop;
- RunLoop nested_loop1;
- RunLoop nested_loop2;
- RunLoop nested_loop3;
- RunLoop nested_loop4;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 5));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- outer_run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 6));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_loop1.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 7));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_loop2.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 8));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_loop3.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 9));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- nested_loop4.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 10));
-
- outer_run_loop.Run();
-
- ASSERT_EQ(18U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit works before RunWithID.
-TEST_P(MessageLoopTypedTest, RunLoopQuitOrderBefore) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop run_loop;
-
- run_loop.Quit();
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&OrderedFunc, &order, 1)); // never runs
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
-
- run_loop.Run();
-
- ASSERT_EQ(0U, order.Size());
-}
-
-// Tests RunLoopQuit works during RunWithID.
-TEST_P(MessageLoopTypedTest, RunLoopQuitOrderDuring) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 1));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&OrderedFunc, &order, 2)); // never runs
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
-
- run_loop.Run();
-
- ASSERT_EQ(2U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// Tests RunLoopQuit works after RunWithID.
-TEST_P(MessageLoopTypedTest, RunLoopQuitOrderAfter) {
- auto loop = CreateMessageLoop();
-
- TaskList order;
-
- RunLoop run_loop;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&run_loop)));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 2));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&FuncThatQuitsNow));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 3));
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, run_loop.QuitClosure()); // has no affect
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&OrderedFunc, &order, 4));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&FuncThatQuitsNow));
-
- run_loop.allow_quit_current_deprecated_ = true;
-
- RunLoop outer_run_loop;
- outer_run_loop.Run();
-
- ASSERT_EQ(8U, order.Size());
- int task_index = 0;
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, true));
- EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, false));
- EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
-}
-
-// There was a bug in the MessagePumpGLib where posting tasks recursively
-// caused the message loop to hang, due to the buffer of the internal pipe
-// becoming full. Test all MessageLoop types to ensure this issue does not
-// exist in other MessagePumps.
-//
-// On Linux, the pipe buffer size is 64KiB by default. The bug caused one
-// byte accumulated in the pipe per two posts, so we should repeat 128K
-// times to reproduce the bug.
-#if defined(OS_FUCHSIA)
-// TODO(crbug.com/810077): This is flaky on Fuchsia.
-#define MAYBE_RecursivePosts DISABLED_RecursivePosts
-#else
-#define MAYBE_RecursivePosts RecursivePosts
-#endif
-TEST_P(MessageLoopTypedTest, MAYBE_RecursivePosts) {
- const int kNumTimes = 1 << 17;
- auto loop = CreateMessageLoop();
- loop->task_runner()->PostTask(FROM_HERE,
- BindOnce(&PostNTasksThenQuit, kNumTimes));
- RunLoop().Run();
-}
-
-TEST_P(MessageLoopTypedTest, NestableTasksAllowedAtTopLevel) {
- auto loop = CreateMessageLoop();
- EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
-}
-
-// Nestable tasks shouldn't be allowed to run reentrantly by default (regression
-// test for https://crbug.com/754112).
-TEST_P(MessageLoopTypedTest, NestableTasksDisallowedByDefault) {
- auto loop = CreateMessageLoop();
- RunLoop run_loop;
- loop->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- run_loop->Quit();
- },
- Unretained(&run_loop)));
- run_loop.Run();
-}
-
-TEST_P(MessageLoopTypedTest, NestableTasksProcessedWhenRunLoopAllows) {
- auto loop = CreateMessageLoop();
- RunLoop run_loop;
- loop->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- // This test would hang if this RunLoop wasn't of type
- // kNestableTasksAllowed (i.e. this is testing that this is
- // processed and doesn't hang).
- RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* nested_run_loop) {
- // Each additional layer of application task nesting
- // requires its own allowance. The kNestableTasksAllowed
- // RunLoop allowed this task to be processed but further
- // nestable tasks are by default disallowed from this
- // layer.
- EXPECT_FALSE(
- MessageLoopCurrent::Get()->NestableTasksAllowed());
- nested_run_loop->Quit();
- },
- Unretained(&nested_run_loop)));
- nested_run_loop.Run();
-
- run_loop->Quit();
- },
- Unretained(&run_loop)));
- run_loop.Run();
-}
-
-TEST_P(MessageLoopTypedTest, NestableTasksAllowedExplicitlyInScope) {
- auto loop = CreateMessageLoop();
- RunLoop run_loop;
- loop->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- {
- MessageLoopCurrent::ScopedNestableTaskAllower
- allow_nestable_tasks;
- EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- }
- EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- run_loop->Quit();
- },
- Unretained(&run_loop)));
- run_loop.Run();
-}
-
-TEST_P(MessageLoopTypedTest, NestableTasksAllowedManually) {
- auto loop = CreateMessageLoop();
- RunLoop run_loop;
- loop->task_runner()->PostTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
- EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(false);
- EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
- run_loop->Quit();
- },
- Unretained(&run_loop)));
- run_loop.Run();
-}
-
-TEST_P(MessageLoopTypedTest, IsIdleForTesting) {
- auto loop = CreateMessageLoop();
- EXPECT_TRUE(loop->IsIdleForTesting());
- loop->task_runner()->PostTask(FROM_HERE, BindOnce([]() {}));
- loop->task_runner()->PostDelayedTask(FROM_HERE, BindOnce([]() {}),
- TimeDelta::FromMilliseconds(10));
- EXPECT_FALSE(loop->IsIdleForTesting());
- RunLoop().RunUntilIdle();
- EXPECT_TRUE(loop->IsIdleForTesting());
-
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
- EXPECT_TRUE(loop->IsIdleForTesting());
-}
-
-TEST_P(MessageLoopTypedTest, IsIdleForTestingNonNestableTask) {
- auto loop = CreateMessageLoop();
- RunLoop run_loop;
- EXPECT_TRUE(loop->IsIdleForTesting());
- bool nested_task_run = false;
- loop->task_runner()->PostTask(
- FROM_HERE, BindLambdaForTesting([&]() {
- RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
-
- loop->task_runner()->PostNonNestableTask(
- FROM_HERE, BindLambdaForTesting([&]() { nested_task_run = true; }));
-
- loop->task_runner()->PostTask(FROM_HERE, BindLambdaForTesting([&]() {
- EXPECT_FALSE(nested_task_run);
- EXPECT_TRUE(loop->IsIdleForTesting());
- }));
-
- nested_run_loop.RunUntilIdle();
- EXPECT_FALSE(nested_task_run);
- EXPECT_FALSE(loop->IsIdleForTesting());
- }));
-
- run_loop.RunUntilIdle();
-
- EXPECT_TRUE(nested_task_run);
- EXPECT_TRUE(loop->IsIdleForTesting());
-}
-
-INSTANTIATE_TEST_SUITE_P(All,
- MessageLoopTypedTest,
- ::testing::Values(MessagePumpType::DEFAULT,
- MessagePumpType::UI,
- MessagePumpType::IO),
- MessageLoopTypedTest::ParamInfoToString);
-
-#if defined(OS_WIN)
-
-// Verifies that the MessageLoop ignores WM_QUIT, rather than quitting.
-// Users of MessageLoop typically expect to control when their RunLoops stop
-// Run()ning explicitly, via QuitClosure() etc (see https://crbug.com/720078).
-TEST_F(MessageLoopTest, WmQuitIsIgnored) {
- MessageLoop loop(MessagePumpType::UI);
-
- // Post a WM_QUIT message to the current thread.
- ::PostQuitMessage(0);
-
- // Post a task to the current thread, with a small delay to make it less
- // likely that we process the posted task before looking for WM_* messages.
- bool task_was_run = false;
- RunLoop run_loop;
- loop.task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(
- [](bool* flag, OnceClosure closure) {
- *flag = true;
- std::move(closure).Run();
- },
- &task_was_run, run_loop.QuitClosure()),
- TestTimeouts::tiny_timeout());
-
- // Run the loop, and ensure that the posted task is processed before we quit.
- run_loop.Run();
- EXPECT_TRUE(task_was_run);
-}
-
-TEST_F(MessageLoopTest, PostDelayedTask_SharedTimer_SubPump) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- // Test that the interval of the timer, used to run the next delayed task, is
- // set to a value corresponding to when the next delayed task should run.
-
- // By setting num_tasks to 1, we ensure that the first task to run causes the
- // run loop to exit.
- int num_tasks = 1;
- TimeTicks run_time;
-
- RunLoop run_loop;
-
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
-
- // This very delayed task should never run.
- message_loop.task_runner()->PostDelayedTask(
- FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
- TimeDelta::FromSeconds(1000));
-
- // This slightly delayed task should run from within SubPumpFunc.
- message_loop.task_runner()->PostDelayedTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0),
- TimeDelta::FromMilliseconds(10));
-
- Time start_time = Time::Now();
-
- run_loop.Run();
- EXPECT_EQ(1, num_tasks);
-
- // Ensure that we ran in far less time than the slower timer.
- TimeDelta total_time = Time::Now() - start_time;
- EXPECT_GT(5000, total_time.InMilliseconds());
-
- // In case both timers somehow run at nearly the same time, sleep a little
- // and then run all pending to force them both to have run. This is just
- // encouraging flakiness if there is any.
- PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
- RunLoop().RunUntilIdle();
-
- EXPECT_TRUE(run_time.is_null());
-}
-
-namespace {
-
-// When this fires (per the associated WM_TIMER firing), it posts an
-// application task to quit the native loop.
-bool QuitOnSystemTimer(UINT message,
- WPARAM wparam,
- LPARAM lparam,
- LRESULT* result) {
- if (message == static_cast<UINT>(WM_TIMER)) {
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
- }
- *result = 0;
- return true;
-}
-
-// When this fires (per the associated WM_TIMER firing), it posts a delayed
-// application task to quit the native loop.
-bool DelayedQuitOnSystemTimer(UINT message,
- WPARAM wparam,
- LPARAM lparam,
- LRESULT* result) {
- if (message == static_cast<UINT>(WM_TIMER)) {
- ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE, BindOnce(&::PostQuitMessage, 0),
- TimeDelta::FromMilliseconds(10));
- }
- *result = 0;
- return true;
-}
-
-} // namespace
-
-// This is a regression test for
-// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125
-// See below for the delayed task version.
-TEST_F(MessageLoopTest, PostImmediateTaskFromSystemPump) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- RunLoop run_loop;
-
- // A native message window to generate a system message which invokes
- // QuitOnSystemTimer() when the native timer fires.
- win::MessageWindow local_message_window;
- local_message_window.Create(BindRepeating(&QuitOnSystemTimer));
- ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));
-
- // The first task will enter a native message loop. This test then verifies
- // that the pump is able to run an immediate application task after the native
- // pump went idle.
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
-
- // Test success is determined by not hanging in this Run() call.
- run_loop.Run();
-}
-
-// This is a regression test for
-// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125 This
-// is the delayed task equivalent of the above PostImmediateTaskFromSystemPump
-// test.
-//
-// As a reminder of how this works, here's the sequence of events in this test:
-// 1) Test start:
-// work_deduplicator.cc(24): BindToCurrentThread
-// work_deduplicator.cc(34): OnWorkRequested
-// thread_controller_with_message_pump_impl.cc(237) : DoWork
-// work_deduplicator.cc(50): OnWorkStarted
-// 2) SubPumpFunc entered:
-// message_loop_unittest.cc(278): SubPumpFunc
-// 3) ScopedNestableTaskAllower triggers nested ScheduleWork:
-// work_deduplicator.cc(34): OnWorkRequested
-// 4) Nested system loop starts and pumps internal kMsgHaveWork:
-// message_loop_unittest.cc(282): SubPumpFunc : Got Message
-// message_pump_win.cc(302): HandleWorkMessage
-// thread_controller_with_message_pump_impl.cc(237) : DoWork
-// 5) Attempt to DoWork(), there's nothing to do, NextWorkInfo indicates delay.
-// work_deduplicator.cc(50): OnWorkStarted
-// work_deduplicator.cc(58): WillCheckForMoreWork
-// work_deduplicator.cc(67): DidCheckForMoreWork
-// 6) Return control to HandleWorkMessage() which schedules native timer
-// and goes to sleep (no kMsgHaveWork in native queue).
-// message_pump_win.cc(328): HandleWorkMessage ScheduleNativeTimer
-// 7) Native timer fires and posts the delayed application task:
-// message_loop_unittest.cc(282): SubPumpFunc : Got Message
-// message_loop_unittest.cc(1581): DelayedQuitOnSystemTimer
-// !! This is the critical step verified by this test. Since the
-// ThreadController is idle after (6), it won't be invoked again and thus
-// won't get a chance to return a NextWorkInfo that indicates the next
-// delay. A native timer is thus required to have SubPumpFunc handle it.
-// work_deduplicator.cc(42): OnDelayedWorkRequested
-// message_pump_win.cc(129): ScheduleDelayedWork
-// 9) The scheduled native timer fires and runs application task binding
-// ::PostQuitMessage :
-// message_loop_unittest.cc(282) SubPumpFunc : Got Message
-// work_deduplicator.cc(50): OnWorkStarted
-// thread_controller_with_message_pump_impl.cc(237) : DoWork
-// 10) SequenceManager updates delay to none and notifies
-// (TODO(scheduler-dev): Could remove this step but WorkDeduplicator knows
-// to ignore at least):
-// work_deduplicator.cc(42): OnDelayedWorkRequested
-// 11) Nested application task completes and SubPumpFunc unwinds:
-// work_deduplicator.cc(58): WillCheckForMoreWork
-// work_deduplicator.cc(67): DidCheckForMoreWork
-// 12) ~ScopedNestableTaskAllower() makes sure WorkDeduplicator knows we're
-// back in DoWork() (not relevant in this test but important overall).
-// work_deduplicator.cc(50): OnWorkStarted
-// 13) Application task which ran SubPumpFunc completes and test finishes.
-// work_deduplicator.cc(67): DidCheckForMoreWork
-TEST_F(MessageLoopTest, PostDelayedTaskFromSystemPump) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- RunLoop run_loop;
-
- // A native message window to generate a system message which invokes
- // DelayedQuitOnSystemTimer() when the native timer fires.
- win::MessageWindow local_message_window;
- local_message_window.Create(BindRepeating(&DelayedQuitOnSystemTimer));
- ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));
-
- // The first task will enter a native message loop. This test then verifies
- // that the pump is able to run a delayed application task after the native
- // pump went idle.
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
-
- // Test success is determined by not hanging in this Run() call.
- run_loop.Run();
-}
-
-TEST_F(MessageLoopTest, WmQuitIsVisibleToSubPump) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- // Regression test for https://crbug.com/888559. When processing a
- // kMsgHaveWork we peek and remove the next message and dispatch that ourself,
- // to minimize impact of these messages on message-queue processing. If we
- // received kMsgHaveWork dispatched by a nested pump (e.g. ::GetMessage()
- // loop) then there is a risk that the next message is that loop's WM_QUIT
- // message, which must be processed directly by ::GetMessage() for the loop to
- // actually quit. This test verifies that WM_QUIT exits works as expected even
- // if it happens to immediately follow a kMsgHaveWork in the queue.
-
- RunLoop run_loop;
-
- // This application task will enter the subpump.
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
-
- // This application task will post a native WM_QUIT.
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
-
- // The presence of this application task means that the pump will see a
- // non-empty queue after processing the previous application task (which
- // posted the WM_QUIT) and hence will repost a kMsgHaveWork message in the
- // native event queue. Without the fix to https://crbug.com/888559, this would
- // previously result in the subpump processing kMsgHaveWork and it stealing
- // the WM_QUIT message, leaving the test hung in the subpump.
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
-
- // Test success is determined by not hanging in this Run() call.
- run_loop.Run();
-}
-
-TEST_F(MessageLoopTest, RepostingWmQuitDoesntStarveUpcomingNativeLoop) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- // This test ensures that application tasks are being processed by the native
- // subpump despite the kMsgHaveWork event having already been consumed by the
- // time the subpump is entered. This is subtly enforced by
- // MessageLoopCurrent::ScopedNestableTaskAllower which will ScheduleWork()
- // upon construction (and if it's absent, the MessageLoop shouldn't process
- // application tasks so kMsgHaveWork is irrelevant).
- // Note: This test also fails prior to the fix for https://crbug.com/888559
- // (in fact, the last two tasks are sufficient as a regression test), probably
- // because of a dangling kMsgHaveWork recreating the effect from
- // MessageLoopTest.NativeMsgProcessingDoesntStealWmQuit.
-
- RunLoop run_loop;
-
- // This application task will post a native WM_QUIT which will be ignored
- // by the main message pump.
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
-
- // Make sure the pump does a few extra cycles and processes (ignores) the
- // WM_QUIT.
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
-
- // This application task will enter the subpump.
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
-
- // Post an application task that will post WM_QUIT to the nested loop. The
- // test will hang if the subpump doesn't process application tasks as it
- // should.
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
-
- // Test success is determined by not hanging in this Run() call.
- run_loop.Run();
-}
-
-// TODO(https://crbug.com/890016): Enable once multiple layers of nested loops
-// works.
-TEST_F(MessageLoopTest,
- DISABLED_UnwindingMultipleSubPumpsDoesntStarveApplicationTasks) {
- MessageLoop message_loop(MessagePumpType::UI);
-
- // Regression test for https://crbug.com/890016.
- // Tests that the subpump is still processing application tasks after
- // unwinding from nested subpumps (i.e. that they didn't consume the last
- // kMsgHaveWork).
-
- RunLoop run_loop;
-
- // Enter multiple levels of nested subpumps.
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, DoNothing::Once()));
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce(&SubPumpFunc, DoNothing::Once()));
-
- // Quit two layers (with tasks in between to allow each quit to be handled
- // before continuing -- ::PostQuitMessage() sets a bit, it's not a real queued
- // message :
- // https://blogs.msdn.microsoft.com/oldnewthing/20051104-33/?p=33453).
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
- message_loop.task_runner()->PostTask(FROM_HERE, DoNothing());
-
- bool last_task_ran = false;
- message_loop.task_runner()->PostTask(
- FROM_HERE, BindOnce([](bool* to_set) { *to_set = true; },
- Unretained(&last_task_ran)));
-
- message_loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&::PostQuitMessage, 0));
-
- run_loop.Run();
-
- EXPECT_TRUE(last_task_ran);
-}
-
-namespace {
-
-// A side effect of this test is the generation a beep. Sorry.
-void RunTest_RecursiveDenial2(MessagePumpType message_pump_type) {
- MessageLoop loop(message_pump_type);
-
- Thread worker("RecursiveDenial2_worker");
- Thread::Options options;
- options.message_pump_type = message_pump_type;
- ASSERT_EQ(true, worker.StartWithOptions(options));
- TaskList order;
- win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
- worker.task_runner()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
- event.Get(), true, &order, false));
- // Let the other thread execute.
- WaitForSingleObject(event.Get(), INFINITE);
- RunLoop().Run();
-
- ASSERT_EQ(17u, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
- EXPECT_EQ(order.Get(3), TaskItem(MESSAGEBOX, 2, false));
- EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 3, false));
- // When EndDialogFunc is processed, the window is already dismissed, hence no
- // "end" entry.
- EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, true));
- EXPECT_EQ(order.Get(7), TaskItem(QUITMESSAGELOOP, 5, true));
- EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, false));
- EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, false));
- EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, false));
-}
-
-} // namespace
-
-// This test occasionally hangs. See http://crbug.com/44567.
-TEST_F(MessageLoopTest, DISABLED_RecursiveDenial2) {
- RunTest_RecursiveDenial2(MessagePumpType::DEFAULT);
- RunTest_RecursiveDenial2(MessagePumpType::UI);
- RunTest_RecursiveDenial2(MessagePumpType::IO);
-}
-
-// A side effect of this test is the generation a beep. Sorry. This test also
-// needs to process windows messages on the current thread.
-TEST_F(MessageLoopTest, RecursiveSupport2) {
- MessageLoop loop(MessagePumpType::UI);
-
- Thread worker("RecursiveSupport2_worker");
- Thread::Options options;
- options.message_pump_type = MessagePumpType::UI;
- ASSERT_EQ(true, worker.StartWithOptions(options));
- TaskList order;
- win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
- worker.task_runner()->PostTask(
- FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
- event.Get(), false, &order, true));
- // Let the other thread execute.
- WaitForSingleObject(event.Get(), INFINITE);
- RunLoop().Run();
-
- ASSERT_EQ(18u, order.Size());
- EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
- // Note that this executes in the MessageBox modal loop.
- EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, false));
- EXPECT_EQ(order.Get(5), TaskItem(ENDDIALOG, 4, true));
- EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, false));
- EXPECT_EQ(order.Get(7), TaskItem(MESSAGEBOX, 2, false));
- /* The order can subtly change here. The reason is that when RecursiveFunc(1)
- is called in the main thread, if it is faster than getting to the
- PostTask(FROM_HERE, BindOnce(&QuitFunc) execution, the order of task
- execution can change. We don't care anyway that the order isn't correct.
- EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, true));
- EXPECT_EQ(order.Get(9), TaskItem(QUITMESSAGELOOP, 5, false));
- EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
- */
- EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 3, false));
- EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, true));
- EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 1, false));
- EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, true));
- EXPECT_EQ(order.Get(17), TaskItem(RECURSIVE, 3, false));
-}
-
-#endif // defined(OS_WIN)
-
-TEST_F(MessageLoopTest, TaskObserver) {
- const int kNumPosts = 6;
- DummyTaskObserver observer(kNumPosts);
-
- MessageLoop loop;
- loop.AddTaskObserver(&observer);
- loop.task_runner()->PostTask(FROM_HERE,
- BindOnce(&PostNTasksThenQuit, kNumPosts));
- RunLoop().Run();
- loop.RemoveTaskObserver(&observer);
-
- EXPECT_EQ(kNumPosts, observer.num_tasks_started());
- EXPECT_EQ(kNumPosts, observer.num_tasks_processed());
-}
-
-#if defined(OS_WIN)
-TEST_F(MessageLoopTest, IOHandler) {
- RunTest_IOHandler();
-}
-
-TEST_F(MessageLoopTest, WaitForIO) {
- RunTest_WaitForIO();
-}
-
-TEST_F(MessageLoopTest, HighResolutionTimer) {
- MessageLoop message_loop;
- Time::EnableHighResolutionTimer(true);
-
- constexpr TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
- constexpr TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
-
- {
- // Post a fast task to enable the high resolution timers.
- RunLoop run_loop;
- message_loop.task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- EXPECT_TRUE(Time::IsHighResolutionTimerInUse());
- run_loop->QuitWhenIdle();
- },
- &run_loop),
- kFastTimer);
- run_loop.Run();
- }
- EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
- {
- // Check that a slow task does not trigger the high resolution logic.
- RunLoop run_loop;
- message_loop.task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(
- [](RunLoop* run_loop) {
- EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
- run_loop->QuitWhenIdle();
- },
- &run_loop),
- kSlowTimer);
- run_loop.Run();
- }
- Time::EnableHighResolutionTimer(false);
- Time::ResetHighResolutionTimerUsage();
-}
-
-#endif // defined(OS_WIN)
-
-namespace {
-// Inject a test point for recording the destructor calls for Closure objects
-// send to MessageLoop::PostTask(). It is awkward usage since we are trying to
-// hook the actual destruction, which is not a common operation.
-class DestructionObserverProbe : public RefCounted<DestructionObserverProbe> {
- public:
- DestructionObserverProbe(bool* task_destroyed,
- bool* destruction_observer_called)
- : task_destroyed_(task_destroyed),
- destruction_observer_called_(destruction_observer_called) {}
- virtual void Run() {
- // This task should never run.
- ADD_FAILURE();
- }
-
- private:
- friend class RefCounted<DestructionObserverProbe>;
-
- virtual ~DestructionObserverProbe() {
- EXPECT_FALSE(*destruction_observer_called_);
- *task_destroyed_ = true;
- }
-
- bool* task_destroyed_;
- bool* destruction_observer_called_;
-};
-
-class MLDestructionObserver : public MessageLoopCurrent::DestructionObserver {
- public:
- MLDestructionObserver(bool* task_destroyed, bool* destruction_observer_called)
- : task_destroyed_(task_destroyed),
- destruction_observer_called_(destruction_observer_called),
- task_destroyed_before_message_loop_(false) {}
- void WillDestroyCurrentMessageLoop() override {
- task_destroyed_before_message_loop_ = *task_destroyed_;
- *destruction_observer_called_ = true;
- }
- bool task_destroyed_before_message_loop() const {
- return task_destroyed_before_message_loop_;
- }
-
- private:
- bool* task_destroyed_;
- bool* destruction_observer_called_;
- bool task_destroyed_before_message_loop_;
-};
-
-} // namespace
-
-TEST_F(MessageLoopTest, DestructionObserverTest) {
- // Verify that the destruction observer gets called at the very end (after
- // all the pending tasks have been destroyed).
- MessageLoop* loop = new MessageLoop;
- const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
-
- bool task_destroyed = false;
- bool destruction_observer_called = false;
-
- MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
- MessageLoopCurrent::Get()->AddDestructionObserver(&observer);
- loop->task_runner()->PostDelayedTask(
- FROM_HERE,
- BindOnce(&DestructionObserverProbe::Run,
- base::MakeRefCounted<DestructionObserverProbe>(
- &task_destroyed, &destruction_observer_called)),
- kDelay);
- delete loop;
- EXPECT_TRUE(observer.task_destroyed_before_message_loop());
- // The task should have been destroyed when we deleted the loop.
- EXPECT_TRUE(task_destroyed);
- EXPECT_TRUE(destruction_observer_called);
-}
-
-// Verify that MessageLoop sets ThreadMainTaskRunner::current() and it
-// posts tasks on that message loop.
-TEST_F(MessageLoopTest, ThreadMainTaskRunner) {
- MessageLoop loop;
-
- scoped_refptr<Foo> foo(new Foo());
- std::string a("a");
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
-
- // Post quit task;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
-
- // Now kick things off
- RunLoop().Run();
-
- EXPECT_EQ(foo->test_count(), 1);
- EXPECT_EQ(foo->result(), "a");
-}
-
-TEST_F(MessageLoopTest, IsType) {
- MessageLoop loop(MessagePumpType::UI);
- EXPECT_TRUE(loop.IsType(MessagePumpType::UI));
- EXPECT_FALSE(loop.IsType(MessagePumpType::IO));
- EXPECT_FALSE(loop.IsType(MessagePumpType::DEFAULT));
-}
-
-#if defined(OS_WIN)
-void EmptyFunction() {}
-
-void PostMultipleTasks() {
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- base::BindOnce(&EmptyFunction));
- ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
- base::BindOnce(&EmptyFunction));
-}
-
-static const int kSignalMsg = WM_USER + 2;
-
-void PostWindowsMessage(HWND message_hwnd) {
- PostMessage(message_hwnd, kSignalMsg, 0, 2);
-}
-
-void EndTest(bool* did_run, HWND hwnd) {
- *did_run = true;
- PostMessage(hwnd, WM_CLOSE, 0, 0);
-}
-
-int kMyMessageFilterCode = 0x5002;
-
-LRESULT CALLBACK TestWndProcThunk(HWND hwnd,
- UINT message,
- WPARAM wparam,
- LPARAM lparam) {
- if (message == WM_CLOSE)
- EXPECT_TRUE(DestroyWindow(hwnd));
- if (message != kSignalMsg)
- return DefWindowProc(hwnd, message, wparam, lparam);
-
- switch (lparam) {
- case 1:
- // First, we post a task that will post multiple no-op tasks to make sure
- // that the pump's incoming task queue does not become empty during the
- // test.
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(&PostMultipleTasks));
- // Next, we post a task that posts a windows message to trigger the second
- // stage of the test.
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(&PostWindowsMessage, hwnd));
- break;
- case 2:
- // Since we're about to enter a modal loop, tell the message loop that we
- // intend to nest tasks.
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
- bool did_run = false;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(&EndTest, &did_run, hwnd));
- // Run a nested windows-style message loop and verify that our task runs.
- // If it doesn't, then we'll loop here until the test times out.
- MSG msg;
- while (GetMessage(&msg, 0, 0, 0)) {
- if (!CallMsgFilter(&msg, kMyMessageFilterCode))
- DispatchMessage(&msg);
- // If this message is a WM_CLOSE, explicitly exit the modal loop.
- // Posting a WM_QUIT should handle this, but unfortunately
- // MessagePumpWin eats WM_QUIT messages even when running inside a modal
- // loop.
- if (msg.message == WM_CLOSE)
- break;
- }
- EXPECT_TRUE(did_run);
- RunLoop::QuitCurrentWhenIdleDeprecated();
- break;
- }
- return 0;
-}
-
-TEST_F(MessageLoopTest, AlwaysHaveUserMessageWhenNesting) {
- MessageLoop loop(MessagePumpType::UI);
- HINSTANCE instance = CURRENT_MODULE();
- WNDCLASSEX wc = {0};
- wc.cbSize = sizeof(wc);
- wc.lpfnWndProc = TestWndProcThunk;
- wc.hInstance = instance;
- wc.lpszClassName = L"MessageLoopTest_HWND";
- ATOM atom = RegisterClassEx(&wc);
- ASSERT_TRUE(atom);
-
- HWND message_hwnd = CreateWindow(MAKEINTATOM(atom), 0, 0, 0, 0, 0, 0,
- HWND_MESSAGE, 0, instance, 0);
- ASSERT_TRUE(message_hwnd) << GetLastError();
-
- ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
-
- RunLoop().Run();
-
- ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
-}
-#endif // defined(OS_WIN)
-
-TEST_F(MessageLoopTest, SetTaskRunner) {
- MessageLoop loop;
- scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
-
- loop.SetTaskRunner(new_runner);
- EXPECT_EQ(new_runner, loop.task_runner());
- EXPECT_EQ(new_runner, ThreadTaskRunnerHandle::Get());
-}
-
-TEST_F(MessageLoopTest, OriginalRunnerWorks) {
- MessageLoop loop;
- scoped_refptr<SingleThreadTaskRunner> new_runner(new TestSimpleTaskRunner());
- scoped_refptr<SingleThreadTaskRunner> original_runner(loop.task_runner());
- loop.SetTaskRunner(new_runner);
-
- scoped_refptr<Foo> foo(new Foo());
- original_runner->PostTask(FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, "a"));
- RunLoop().RunUntilIdle();
- EXPECT_EQ(1, foo->test_count());
-}
-
-TEST_F(MessageLoopTest, DeleteUnboundLoop) {
- // It should be possible to delete an unbound message loop on a thread which
- // already has another active loop. This happens when thread creation fails.
- MessageLoop loop;
- std::unique_ptr<MessageLoop> unbound_loop(
- MessageLoop::CreateUnbound(MessagePumpType::DEFAULT));
- unbound_loop.reset();
- EXPECT_TRUE(loop.task_runner()->RunsTasksInCurrentSequence());
- EXPECT_EQ(loop.task_runner(), ThreadTaskRunnerHandle::Get());
-}
-
-// Verify that tasks posted to and code running in the scope of the same
-// MessageLoop access the same SequenceLocalStorage values.
-TEST_F(MessageLoopTest, SequenceLocalStorageSetGet) {
- MessageLoop loop;
-
- SequenceLocalStorageSlot<int> slot;
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindLambdaForTesting([&]() { slot.emplace(11); }));
-
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindLambdaForTesting([&]() { EXPECT_EQ(*slot, 11); }));
-
- RunLoop().RunUntilIdle();
- EXPECT_EQ(*slot, 11);
-}
-
-// Verify that tasks posted to and code running in different MessageLoops access
-// different SequenceLocalStorage values.
-TEST_F(MessageLoopTest, SequenceLocalStorageDifferentMessageLoops) {
- SequenceLocalStorageSlot<int> slot;
-
- {
- MessageLoop loop;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindLambdaForTesting([&]() { slot.emplace(11); }));
-
- RunLoop().RunUntilIdle();
- EXPECT_EQ(*slot, 11);
- }
-
- MessageLoop loop;
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindLambdaForTesting([&]() { EXPECT_FALSE(slot); }));
-
- RunLoop().RunUntilIdle();
- EXPECT_NE(slot.GetOrCreateValue(), 11);
-}
-
-namespace {
-
-class PostTaskOnDestroy {
- public:
- PostTaskOnDestroy(int times) : times_remaining_(times) {}
- ~PostTaskOnDestroy() { PostTaskWithPostingDestructor(times_remaining_); }
-
- // Post a task that will repost itself on destruction |times| times.
- static void PostTaskWithPostingDestructor(int times) {
- if (times > 0) {
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, BindOnce([](std::unique_ptr<PostTaskOnDestroy>) {},
- std::make_unique<PostTaskOnDestroy>(times - 1)));
- }
- }
-
- private:
- const int times_remaining_;
-
- DISALLOW_COPY_AND_ASSIGN(PostTaskOnDestroy);
-};
-
-} // namespace
-
-// Test that MessageLoop destruction handles a task's destructor posting another
-// task.
-TEST(MessageLoopDestructionTest, DestroysFineWithPostTaskOnDestroy) {
- std::unique_ptr<MessageLoop> loop = std::make_unique<MessageLoop>();
-
- PostTaskOnDestroy::PostTaskWithPostingDestructor(10);
- loop.reset();
-}
-
-} // namespace base
diff --git a/chromium/base/message_loop/message_pump.h b/chromium/base/message_loop/message_pump.h
index 2224f40b9a4..3151a5cbfa5 100644
--- a/chromium/base/message_loop/message_pump.h
+++ b/chromium/base/message_loop/message_pump.h
@@ -6,7 +6,7 @@
#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_H_
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/message_loop/message_pump_type.h"
#include "base/message_loop/timer_slack.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/message_loop/message_pump_fuchsia.cc b/chromium/base/message_loop/message_pump_fuchsia.cc
index 1c575681064..d8bda871821 100644
--- a/chromium/base/message_loop/message_pump_fuchsia.cc
+++ b/chromium/base/message_loop/message_pump_fuchsia.cc
@@ -15,7 +15,7 @@
#include "base/auto_reset.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/logging.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
diff --git a/chromium/base/message_loop/message_pump_glib_unittest.cc b/chromium/base/message_loop/message_pump_glib_unittest.cc
index 54fa300c290..c1da85c0cfe 100644
--- a/chromium/base/message_loop/message_pump_glib_unittest.cc
+++ b/chromium/base/message_loop/message_pump_glib_unittest.cc
@@ -14,6 +14,7 @@
#include "base/bind_helpers.h"
#include "base/callback.h"
#include "base/files/file_util.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
@@ -447,8 +448,6 @@ class GLibLoopRunner : public RefCounted<GLibLoopRunner> {
};
void TestGLibLoopInternal(EventInjector* injector, OnceClosure done) {
- // Allow tasks to be processed from 'native' event loops.
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
int task_count = 0;
@@ -472,7 +471,10 @@ void TestGLibLoopInternal(EventInjector* injector, OnceClosure done) {
TimeDelta::FromMilliseconds(40));
// Run a nested, straight GLib message loop.
- runner->RunGLib();
+ {
+ MessageLoopCurrent::ScopedNestableTaskAllower allow_nestable_tasks;
+ runner->RunGLib();
+ }
ASSERT_EQ(3, task_count);
EXPECT_EQ(4, injector->processed_events());
@@ -480,8 +482,6 @@ void TestGLibLoopInternal(EventInjector* injector, OnceClosure done) {
}
void TestGtkLoopInternal(EventInjector* injector, OnceClosure done) {
- // Allow tasks to be processed from 'native' event loops.
- MessageLoopCurrent::Get()->SetNestableTasksAllowed(true);
scoped_refptr<GLibLoopRunner> runner = new GLibLoopRunner();
int task_count = 0;
@@ -505,7 +505,10 @@ void TestGtkLoopInternal(EventInjector* injector, OnceClosure done) {
TimeDelta::FromMilliseconds(40));
// Run a nested, straight Gtk message loop.
- runner->RunLoop();
+ {
+ MessageLoopCurrent::ScopedNestableTaskAllower allow_nestable_tasks;
+ runner->RunLoop();
+ }
ASSERT_EQ(3, task_count);
EXPECT_EQ(4, injector->processed_events());
diff --git a/chromium/base/message_loop/message_pump_io_ios.cc b/chromium/base/message_loop/message_pump_io_ios.cc
index 9b43e8edb22..6dcc0d7c0bb 100644
--- a/chromium/base/message_loop/message_pump_io_ios.cc
+++ b/chromium/base/message_loop/message_pump_io_ios.cc
@@ -4,6 +4,8 @@
#include "base/message_loop/message_pump_io_ios.h"
+#include "base/notreached.h"
+
namespace base {
MessagePumpIOSForIO::FdWatchController::FdWatchController(
diff --git a/chromium/base/message_loop/message_pump_io_ios_unittest.cc b/chromium/base/message_loop/message_pump_io_ios_unittest.cc
index aec10012a7c..196c6d4aac8 100644
--- a/chromium/base/message_loop/message_pump_io_ios_unittest.cc
+++ b/chromium/base/message_loop/message_pump_io_ios_unittest.cc
@@ -6,6 +6,7 @@
#include <unistd.h>
+#include "base/logging.h"
#include "base/macros.h"
#include "base/message_loop/message_pump_for_io.h"
#include "base/posix/eintr_wrapper.h"
diff --git a/chromium/base/message_loop/message_pump_libevent.cc b/chromium/base/message_loop/message_pump_libevent.cc
index 5a8f5f5249e..17175f76d91 100644
--- a/chromium/base/message_loop/message_pump_libevent.cc
+++ b/chromium/base/message_loop/message_pump_libevent.cc
@@ -16,7 +16,7 @@
#include "base/posix/eintr_wrapper.h"
#include "base/third_party/libevent/event.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
@@ -130,7 +130,7 @@ bool MessagePumpLibevent::WatchFileDescriptor(int fd,
// threadsafe, and your watcher may never be registered.
DCHECK(watch_file_descriptor_caller_checker_.CalledOnValidThread());
- TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
+ TRACE_EVENT_WITH_FLOW1("toplevel.flow",
"MessagePumpLibevent::WatchFileDescriptor",
reinterpret_cast<uintptr_t>(controller) ^ fd,
TRACE_EVENT_FLAG_FLOW_OUT, "fd", fd);
@@ -315,11 +315,10 @@ void MessagePumpLibevent::OnLibeventNotification(int fd,
FdWatchController* controller = static_cast<FdWatchController*>(context);
DCHECK(controller);
TRACE_EVENT0("toplevel", "OnLibevent");
- TRACE_EVENT_WITH_FLOW1(TRACE_DISABLED_BY_DEFAULT("toplevel.flow"),
- "MessagePumpLibevent::OnLibeventNotification",
- reinterpret_cast<uintptr_t>(controller) ^ fd,
- TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
- "fd", fd);
+ TRACE_EVENT_WITH_FLOW1(
+ "toplevel.flow", "MessagePumpLibevent::OnLibeventNotification",
+ reinterpret_cast<uintptr_t>(controller) ^ fd,
+ TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, "fd", fd);
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION heap_profiler_scope(
controller->created_from_location().file_name());
diff --git a/chromium/base/message_loop/message_pump_libevent_unittest.cc b/chromium/base/message_loop/message_pump_libevent_unittest.cc
index f5dbd4006fb..ae5e011f70c 100644
--- a/chromium/base/message_loop/message_pump_libevent_unittest.cc
+++ b/chromium/base/message_loop/message_pump_libevent_unittest.cc
@@ -12,6 +12,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/files/file_util.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump_type.h"
#include "base/posix/eintr_wrapper.h"
diff --git a/chromium/base/message_loop/message_pump_mac.h b/chromium/base/message_loop/message_pump_mac.h
index 909b946dc8e..f9c9db9696f 100644
--- a/chromium/base/message_loop/message_pump_mac.h
+++ b/chromium/base/message_loop/message_pump_mac.h
@@ -36,9 +36,7 @@
#include <CoreFoundation/CoreFoundation.h>
#include "base/macros.h"
-#include "base/memory/weak_ptr.h"
#include "base/message_loop/timer_slack.h"
-#include "base/optional.h"
#include "build/build_config.h"
#if defined(__OBJC__)
@@ -145,10 +143,6 @@ class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
// Get the current mode mask from |enabled_modes_|.
int GetModeMask() const;
- // Controls whether the timer invalidation performance optimization is
- // allowed.
- void SetTimerInvalidationAllowed(bool allowed);
-
private:
class ScopedModeEnabler;
@@ -159,25 +153,6 @@ class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
// avoids querying Now() for key callers.
void ScheduleDelayedWorkImpl(TimeDelta delta);
- // Marking timers as invalid at the right time helps significantly reduce
- // power use (see the comment in RunDelayedWorkTimer()), however there is no
- // public API for doing so. CFRuntime.h states that CFRuntimeBase, upon which
- // the above timer invalidation functions are based, can change from release
- // to release and should not be accessed directly (this struct last changed at
- // least in 2008 in CF-476).
- //
- // This function uses private API to modify a test timer's valid state and
- // uses public API to confirm that the private API changed the right bit.
- static bool CanInvalidateCFRunLoopTimers();
-
- // Sets a Core Foundation object's "invalid" bit to |valid|. Based on code
- // from CFRunLoop.c.
- static void ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid);
-
- // Controls the validity of the delayed work timer. Does nothing if timer
- // invalidation is disallowed.
- void SetDelayedWorkTimerValid(bool valid);
-
// Timer callback scheduled by ScheduleDelayedWork. This does not do any
// work, but it signals |work_source_| so that delayed work can be performed
// within the appropriate priority constraints.
@@ -280,14 +255,6 @@ class BASE_EXPORT MessagePumpCFRunLoopBase : public MessagePump {
bool delegateless_work_;
bool delegateless_idle_work_;
- // Whether or not timer invalidation can be used in order to reduce the number
- // of reschedulings.
- bool allow_timer_invalidation_;
-
- // If changing timer validitity was attempted while it was disallowed, this
- // value tracks the desired state of the timer.
- Optional<bool> pending_timer_validity_;
-
DISALLOW_COPY_AND_ASSIGN(MessagePumpCFRunLoopBase);
};
diff --git a/chromium/base/message_loop/message_pump_mac.mm b/chromium/base/message_loop/message_pump_mac.mm
index cbda789a0d2..1d0ac66a917 100644
--- a/chromium/base/message_loop/message_pump_mac.mm
+++ b/chromium/base/message_loop/message_pump_mac.mm
@@ -53,48 +53,6 @@ bool g_not_using_cr_app = false;
// The MessagePump controlling [NSApp run].
MessagePumpNSApplication* g_app_pump;
-
-Feature kMessagePumpTimerInvalidation{"MessagePumpMacTimerInvalidation",
- FEATURE_ENABLED_BY_DEFAULT};
-
-// Various CoreFoundation definitions.
-typedef struct __CFRuntimeBase {
- uintptr_t _cfisa;
- uint8_t _cfinfo[4];
- uint32_t _rc;
-} CFRuntimeBase;
-
-#if defined(__BIG_ENDIAN__)
-#define __CF_BIG_ENDIAN__ 1
-#define __CF_LITTLE_ENDIAN__ 0
-#endif
-
-#if defined(__LITTLE_ENDIAN__)
-#define __CF_LITTLE_ENDIAN__ 1
-#define __CF_BIG_ENDIAN__ 0
-#endif
-
-#define CF_INFO_BITS (!!(__CF_BIG_ENDIAN__)*3)
-
-#define __CFBitfieldMask(N1, N2) \
- ((((UInt32)~0UL) << (31UL - (N1) + (N2))) >> (31UL - N1))
-#define __CFBitfieldSetValue(V, N1, N2, X) \
- ((V) = ((V) & ~__CFBitfieldMask(N1, N2)) | \
- (((X) << (N2)) & __CFBitfieldMask(N1, N2)))
-
-// Marking timers as invalid at the right time by flipping their valid bit helps
-// significantly reduce power use (see the explanation in
-// RunDelayedWorkTimer()), however there is no public API for doing so.
-// CFRuntime.h states that CFRuntimeBase can change from release to release
-// and should not be accessed directly. The last known change of this struct
-// occurred in 2008 in CF-476 / 10.5; unfortunately the source for 10.11 and
-// 10.12 is not available for inspection at this time.
-// CanInvalidateCFRunLoopTimers() will at least prevent us from invalidating
-// timers if this function starts flipping the wrong bit on a future OS release.
-void __ChromeCFRunLoopTimerSetValid(CFRunLoopTimerRef timer, bool valid) {
- __CFBitfieldSetValue(((CFRuntimeBase*)timer)->_cfinfo[CF_INFO_BITS], 3, 3,
- valid);
-}
#endif // !defined(OS_IOS)
} // namespace
@@ -217,16 +175,6 @@ void MessagePumpCFRunLoopBase::ScheduleDelayedWork(
}
void MessagePumpCFRunLoopBase::ScheduleDelayedWorkImpl(TimeDelta delta) {
- // Flip the timer's validation bit just before setting the new fire time. Do
- // this now because CFRunLoopTimerSetNextFireDate() likely checks the validity
- // of a timer before proceeding to set its fire date. Making the timer valid
- // now won't have any side effects (such as a premature firing of the timer)
- // because we're only flipping a bit.
- //
- // Please see the comment in RunDelayedWorkTimer() for more info on the whys
- // of invalidation.
- SetDelayedWorkTimerValid(true);
-
// The tolerance needs to be set before the fire date or it may be ignored.
if (timer_slack_ == TIMER_SLACK_MAXIMUM) {
CFRunLoopTimerSetTolerance(delayed_work_timer_, delta.InSecondsF() * 0.5);
@@ -256,8 +204,7 @@ MessagePumpCFRunLoopBase::MessagePumpCFRunLoopBase(int initial_mode_mask)
deepest_nesting_level_(0),
keep_running_(true),
delegateless_work_(false),
- delegateless_idle_work_(false),
- allow_timer_invalidation_(true) {
+ delegateless_idle_work_(false) {
run_loop_ = CFRunLoopGetCurrent();
CFRetain(run_loop_);
@@ -368,100 +315,11 @@ int MessagePumpCFRunLoopBase::GetModeMask() const {
return mask;
}
-#if !defined(OS_IOS)
-// This function uses private API to modify a test timer's valid state and
-// uses public API to confirm that the private API changed the correct bit.
-// static
-bool MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers() {
- if (!FeatureList::IsEnabled(kMessagePumpTimerInvalidation)) {
- return false;
- }
-
- CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
- timer_context.info = nullptr;
- ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(
- CFRunLoopTimerCreate(NULL, // allocator
- kCFTimeIntervalMax, // fire time
- kCFTimeIntervalMax, // interval
- 0, // flags
- 0, // priority
- nullptr, &timer_context));
- // Should be valid from the start.
- if (!CFRunLoopTimerIsValid(test_timer)) {
- return false;
- }
- // Confirm that the private API can mark the timer invalid.
- __ChromeCFRunLoopTimerSetValid(test_timer, false);
- if (CFRunLoopTimerIsValid(test_timer)) {
- return false;
- }
- // Confirm that the private API can mark the timer valid.
- __ChromeCFRunLoopTimerSetValid(test_timer, true);
- return CFRunLoopTimerIsValid(test_timer);
-}
-#endif // !defined(OS_IOS)
-
-// static
-void MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(
- CFRunLoopTimerRef timer,
- bool valid) {
-#if !defined(OS_IOS)
- static bool can_invalidate_timers = CanInvalidateCFRunLoopTimers();
- if (can_invalidate_timers) {
- __ChromeCFRunLoopTimerSetValid(timer, valid);
- }
-#endif // !defined(OS_IOS)
-}
-
-void MessagePumpCFRunLoopBase::SetDelayedWorkTimerValid(bool valid) {
- if (allow_timer_invalidation_) {
- ChromeCFRunLoopTimerSetValid(delayed_work_timer_, valid);
- } else {
- pending_timer_validity_ = valid;
- }
-}
-
-void MessagePumpCFRunLoopBase::SetTimerInvalidationAllowed(bool allowed) {
- if (!allowed)
- ChromeCFRunLoopTimerSetValid(delayed_work_timer_, true);
- allow_timer_invalidation_ = allowed;
- if (allowed && pending_timer_validity_.has_value()) {
- SetDelayedWorkTimerValid(*pending_timer_validity_);
- pending_timer_validity_ = nullopt;
- }
-}
-
// Called from the run loop.
// static
void MessagePumpCFRunLoopBase::RunDelayedWorkTimer(CFRunLoopTimerRef timer,
void* info) {
MessagePumpCFRunLoopBase* self = static_cast<MessagePumpCFRunLoopBase*>(info);
-
- // The message pump's timer needs to fire at changing and unpredictable
- // intervals. Creating a new timer for each firing time is very expensive, so
- // the message pump instead uses a repeating timer with a very large repeat
- // rate. After each firing of the timer, the run loop sets the timer's next
- // firing time to the distant future, essentially pausing the timer until the
- // pump sets the next firing time. This is the solution recommended by Apple.
- //
- // It turns out, however, that scheduling timers is also quite expensive, and
- // that every one of the message pump's timer firings incurs two
- // reschedulings. The first rescheduling occurs in ScheduleDelayedWork(),
- // which sets the desired next firing time. The second comes after exiting
- // this method (the timer's callback method), when the run loop sets the
- // timer's next firing time to far in the future.
- //
- // The code in __CFRunLoopDoTimer() inside CFRunLoop.c calls the timer's
- // callback, confirms that the timer is valid, and then sets its future
- // firing time based on its repeat frequency. Flipping the valid bit here
- // causes the __CFRunLoopDoTimer() to skip setting the future firing time.
- // Note that there's public API to invalidate a timer but it goes beyond
- // flipping the valid bit, making the timer unusable in the future.
- //
- // ScheduleDelayedWork() flips the valid bit back just before setting the
- // timer's new firing time.
- self->SetDelayedWorkTimerValid(false);
-
// The timer fired, assume we have work and let RunWork() figure out what to
// do and what to schedule after.
base::mac::CallWithEHFrame(^{
@@ -793,14 +651,11 @@ ScopedPumpMessagesInPrivateModes::ScopedPumpMessagesInPrivateModes() {
if ([NSApp modalWindow])
return;
g_app_pump->SetModeMask(kAllModesMask);
- // Disable timer invalidation to avoid hangs. See crbug.com/912273.
- g_app_pump->SetTimerInvalidationAllowed(false);
}
ScopedPumpMessagesInPrivateModes::~ScopedPumpMessagesInPrivateModes() {
DCHECK(g_app_pump);
g_app_pump->SetModeMask(kNSApplicationModalSafeModeMask);
- g_app_pump->SetTimerInvalidationAllowed(true);
}
int ScopedPumpMessagesInPrivateModes::GetModeMaskForTest() {
diff --git a/chromium/base/message_loop/message_pump_mac_unittest.mm b/chromium/base/message_loop/message_pump_mac_unittest.mm
index 85f4779868a..840a3f5f8eb 100644
--- a/chromium/base/message_loop/message_pump_mac_unittest.mm
+++ b/chromium/base/message_loop/message_pump_mac_unittest.mm
@@ -29,102 +29,6 @@ constexpr int kNSApplicationModalSafeModeMask = 0x3;
namespace base {
-class TestMessagePumpCFRunLoopBase {
- public:
- bool TestCanInvalidateTimers() {
- return MessagePumpCFRunLoopBase::CanInvalidateCFRunLoopTimers();
- }
- static void SetTimerValid(CFRunLoopTimerRef timer, bool valid) {
- MessagePumpCFRunLoopBase::ChromeCFRunLoopTimerSetValid(timer, valid);
- }
-
- static void PerformTimerCallback(CFRunLoopTimerRef timer, void* info) {
- TestMessagePumpCFRunLoopBase* self =
- static_cast<TestMessagePumpCFRunLoopBase*>(info);
- self->timer_callback_called_ = true;
-
- if (self->invalidate_timer_in_callback_) {
- SetTimerValid(timer, false);
- }
- }
-
- bool invalidate_timer_in_callback_;
-
- bool timer_callback_called_;
-};
-
-TEST(MessagePumpMacTest, TestCanInvalidateTimers) {
- TestMessagePumpCFRunLoopBase message_pump_test;
-
- // Catch whether or not the use of private API ever starts failing.
- EXPECT_TRUE(message_pump_test.TestCanInvalidateTimers());
-}
-
-TEST(MessagePumpMacTest, TestInvalidatedTimerReuse) {
- TestMessagePumpCFRunLoopBase message_pump_test;
-
- CFRunLoopTimerContext timer_context = CFRunLoopTimerContext();
- timer_context.info = &message_pump_test;
- const CFTimeInterval kCFTimeIntervalMax =
- std::numeric_limits<CFTimeInterval>::max();
- ScopedCFTypeRef<CFRunLoopTimerRef> test_timer(CFRunLoopTimerCreate(
- NULL, // allocator
- kCFTimeIntervalMax, // fire time
- kCFTimeIntervalMax, // interval
- 0, // flags
- 0, // priority
- TestMessagePumpCFRunLoopBase::PerformTimerCallback, &timer_context));
- CFRunLoopAddTimer(CFRunLoopGetCurrent(), test_timer,
- kMessageLoopExclusiveRunLoopMode);
-
- // Sanity check.
- EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
-
- // Confirm that the timer fires as expected, and that it's not a one-time-use
- // timer (those timers are invalidated after they fire).
- CFAbsoluteTime next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
- CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
- message_pump_test.timer_callback_called_ = false;
- message_pump_test.invalidate_timer_in_callback_ = false;
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
- EXPECT_TRUE(message_pump_test.timer_callback_called_);
- EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
-
- // As a repeating timer, the timer should have a new fire date set in the
- // future.
- EXPECT_GT(CFRunLoopTimerGetNextFireDate(test_timer), next_fire_time);
-
- // Try firing the timer, and invalidating it within its callback.
- next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
- CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
- message_pump_test.timer_callback_called_ = false;
- message_pump_test.invalidate_timer_in_callback_ = true;
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
- EXPECT_TRUE(message_pump_test.timer_callback_called_);
- EXPECT_FALSE(CFRunLoopTimerIsValid(test_timer));
-
- // The CFRunLoop believes the timer is invalid, so it should not have a
- // fire date.
- EXPECT_EQ(0, CFRunLoopTimerGetNextFireDate(test_timer));
-
- // Now mark the timer as valid and confirm that it still fires correctly.
- TestMessagePumpCFRunLoopBase::SetTimerValid(test_timer, true);
- EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
- next_fire_time = CFAbsoluteTimeGetCurrent() + 0.01;
- CFRunLoopTimerSetNextFireDate(test_timer, next_fire_time);
- message_pump_test.timer_callback_called_ = false;
- message_pump_test.invalidate_timer_in_callback_ = false;
- CFRunLoopRunInMode(kMessageLoopExclusiveRunLoopMode, 0.02, true);
- EXPECT_TRUE(message_pump_test.timer_callback_called_);
- EXPECT_TRUE(CFRunLoopTimerIsValid(test_timer));
-
- // Confirm that the run loop again gave it a new fire date in the future.
- EXPECT_GT(CFRunLoopTimerGetNextFireDate(test_timer), next_fire_time);
-
- CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), test_timer,
- kMessageLoopExclusiveRunLoopMode);
-}
-
namespace {
// PostedTasks are only executed while the message pump has a delegate. That is,
@@ -215,84 +119,6 @@ TEST(MessagePumpMacTest, ScopedPumpMessagesAttemptWithModalDialog) {
EXPECT_EQ(NSAlertFirstButtonReturn, result);
}
-// This is a regression test for a scenario where the invalidation of the
-// delayed work timer (using non-public APIs) causes a nested native run loop to
-// hang. The exact root cause of the hang is unknown since it involves the
-// closed-source Core Foundation runtime, but the steps needed to trigger it
-// are:
-//
-// 1. Post a delayed task that will run some time after step #4.
-// 2. Allow Chrome tasks to run in nested run loops (with
-// ScopedNestableTaskAllower).
-// 3. Allow running Chrome tasks during private run loop modes (with
-// ScopedPumpMessagesInPrivateModes).
-// 4. Open a pop-up menu via [NSMenu popupContextMenu]. This will start a
-// private native run loop to process menu interaction.
-// 5. In a posted task, close the menu with [NSMenu cancelTracking].
-//
-// At this point the menu closes visually but the nested run loop (flakily)
-// hangs forever in a live-lock, i.e., Chrome tasks keep executing but the
-// NSMenu call in #4 never returns.
-//
-// The workaround is to avoid timer invalidation during nested native run loops.
-//
-// DANGER: As the pop-up menu captures keyboard input, the bug will make the
-// machine's keyboard inoperable during the live-lock. Use a TTY-based remote
-// terminal such as SSH (as opposed to Chromoting) to investigate the issue.
-//
-TEST(MessagePumpMacTest, DontInvalidateTimerInNativeRunLoop) {
- test::SingleThreadTaskEnvironment task_environment(
- test::SingleThreadTaskEnvironment::MainThreadType::UI);
- NSWindow* window =
- [[[NSWindow alloc] initWithContentRect:NSMakeRect(0, 0, 100, 100)
- styleMask:NSBorderlessWindowMask
- backing:NSBackingStoreBuffered
- defer:NO] autorelease];
- NSMenu* menu = [[NSMenu alloc] initWithTitle:@"Test menu"];
- [menu insertItemWithTitle:@"Dummy item"
- action:@selector(dummy)
- keyEquivalent:@"a"
- atIndex:0];
- NSEvent* event = [NSEvent otherEventWithType:NSApplicationDefined
- location:NSZeroPoint
- modifierFlags:0
- timestamp:0
- windowNumber:0
- context:nil
- subtype:0
- data1:0
- data2:0];
-
- // Post a task to open the menu. This needs to be a separate task so that
- // nested task execution can be allowed.
- ThreadTaskRunnerHandle::Get()->PostTask(
- FROM_HERE, base::BindOnce(
- [](NSWindow* window, NSMenu* menu, NSEvent* event) {
- MessageLoopCurrent::ScopedNestableTaskAllower allow;
- ScopedPumpMessagesInPrivateModes pump_private;
- // When the bug triggers, this call never returns.
- [NSMenu popUpContextMenu:menu
- withEvent:event
- forView:[window contentView]];
- },
- window, menu, event));
-
- // Post another task to close the menu. The 100ms delay was determined
- // experimentally on a 2013 Mac Pro.
- RunLoop run_loop;
- ThreadTaskRunnerHandle::Get()->PostDelayedTask(
- FROM_HERE,
- base::BindOnce(
- [](RunLoop* run_loop, NSMenu* menu) {
- [menu cancelTracking];
- run_loop->Quit();
- },
- &run_loop, menu),
- base::TimeDelta::FromMilliseconds(100));
-
- EXPECT_NO_FATAL_FAILURE(run_loop.Run());
-}
-
TEST(MessagePumpMacTest, QuitWithModalWindow) {
test::SingleThreadTaskEnvironment task_environment(
test::SingleThreadTaskEnvironment::MainThreadType::UI);
diff --git a/chromium/base/message_loop/message_pump_win.cc b/chromium/base/message_loop/message_pump_win.cc
index 83cbec19c29..4c500064bcd 100644
--- a/chromium/base/message_loop/message_pump_win.cc
+++ b/chromium/base/message_loop/message_pump_win.cc
@@ -14,7 +14,7 @@
#include "base/metrics/histogram_macros.h"
#include "base/numerics/ranges.h"
#include "base/numerics/safe_conversions.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -24,11 +24,12 @@ namespace {
// opportunity to yield to other threads according to some heuristics (e.g.
// presumably when there's no input but perhaps a single WM_USER message posted
// later than another thread was readied). MessagePumpForUI doesn't intend to
-// give this opportunity to the kernel when invoking ::PeekMessage however as it
-// runs most tasks out-of-band. Hence, PM_NOYIELD should be used to tell
-// ::PeekMessage it's not the only source of work for this thread.
-const Feature kNoYieldFromNativePeek{"NoYieldFromNativePeek",
- FEATURE_DISABLED_BY_DEFAULT};
+// give this opportunity to the kernel when invoking ::PeekMessage however. This
+// experiment attempts to regain control of the pump (behind an experiment
+// because of how fragile this code is -- experiments help external contributors
+// diagnose regressions, e.g. crbug.com/1078475).
+const Feature kPreventMessagePumpHangs{"PreventMessagePumpHangs",
+ FEATURE_DISABLED_BY_DEFAULT};
enum MessageLoopProblems {
MESSAGE_POST_ERROR,
@@ -114,7 +115,7 @@ void MessagePumpForUI::ScheduleWork() {
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
- BOOL ret = PostMessage(message_window_.hwnd(), kMsgHaveWork, 0, 0);
+ const BOOL ret = ::PostMessage(message_window_.hwnd(), kMsgHaveWork, 0, 0);
if (ret)
return; // There was room in the Window Message queue.
@@ -131,6 +132,8 @@ void MessagePumpForUI::ScheduleWork() {
work_scheduled_ = false;
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", MESSAGE_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
+ TRACE_EVENT_INSTANT0("base", "Chrome.MessageLoopProblem.MESSAGE_POST_ERROR",
+ TRACE_EVENT_SCOPE_THREAD);
}
void MessagePumpForUI::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
@@ -252,8 +255,6 @@ void MessagePumpForUI::DoRunLoop() {
if (more_work_is_plausible)
continue;
- // WaitForWork() does some work itself, so notify the delegate of it.
- state_->delegate->BeforeWait();
WaitForWork(next_work_info);
}
}
@@ -267,6 +268,8 @@ void MessagePumpForUI::WaitForWork(Delegate::NextWorkInfo next_work_info) {
for (DWORD delay = GetSleepTimeoutMs(next_work_info.delayed_run_time,
next_work_info.recent_now);
delay != 0; delay = GetSleepTimeoutMs(next_work_info.delayed_run_time)) {
+ state_->delegate->BeforeWait();
+
// Tell the optimizer to retain these values to simplify analyzing hangs.
base::debug::Alias(&delay);
base::debug::Alias(&wait_flags);
@@ -296,13 +299,15 @@ void MessagePumpForUI::WaitForWork(Delegate::NextWorkInfo next_work_info) {
}
{
- static const auto kAdditionalFlags =
- FeatureList::IsEnabled(kNoYieldFromNativePeek) ? PM_NOYIELD : 0x0;
+ // ::GetQueueStatus() above may racily miss a sent-message and
+ // ::PeekMessage() below may thus process one and/or internal events per
+ // its doc.
+ state_->delegate->BeforeDoInternalWork();
MSG msg;
// Trace as in ProcessNextWindowsMessage().
TRACE_EVENT0("base", "MessagePumpForUI::WaitForWork PeekMessage");
- if (::PeekMessage(&msg, nullptr, 0, 0, kAdditionalFlags | PM_NOREMOVE))
+ if (::PeekMessage(&msg, nullptr, 0, 0, PM_NOREMOVE))
return;
}
@@ -341,6 +346,7 @@ void MessagePumpForUI::HandleWorkMessage() {
if (next_work_info.is_immediate()) {
ScheduleWork();
} else {
+ state_->delegate->BeforeWait();
ScheduleNativeTimer(next_work_info);
}
}
@@ -372,6 +378,7 @@ void MessagePumpForUI::HandleTimerMessage() {
if (next_work_info.is_immediate()) {
ScheduleWork();
} else {
+ state_->delegate->BeforeWait();
ScheduleNativeTimer(next_work_info);
}
}
@@ -426,18 +433,22 @@ void MessagePumpForUI::ScheduleNativeTimer(
// Tell the optimizer to retain the delay to simplify analyzing hangs.
base::debug::Alias(&delay_msec);
- UINT_PTR ret =
+ const UINT_PTR ret =
::SetTimer(message_window_.hwnd(), reinterpret_cast<UINT_PTR>(this),
delay_msec, nullptr);
- installed_native_timer_ = next_work_info.delayed_run_time;
- if (ret)
+ if (ret) {
+ installed_native_timer_ = next_work_info.delayed_run_time;
return;
- // If we can't set timers, we are in big trouble... but cross our fingers
- // for now.
- // TODO(jar): If we don't see this error, use a CHECK() here instead.
+ }
+ // This error is likely similar to MESSAGE_POST_ERROR (i.e. native queue is
+ // full). Since we only use ScheduleNativeTimer() in native nested loops
+ // this likely means this pump will not be given a chance to run application
+ // tasks until the nested loop completes.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", SET_TIMER_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
+ TRACE_EVENT_INSTANT0("base", "Chrome.MessageLoopProblem.SET_TIMER_ERROR",
+ TRACE_EVENT_SCOPE_THREAD);
}
}
@@ -480,17 +491,13 @@ bool MessagePumpForUI::ProcessNextWindowsMessage() {
// when ::PeekMessage turns out to be a no-op).
state_->delegate->BeforeDoInternalWork();
- static const auto kAdditionalFlags =
- FeatureList::IsEnabled(kNoYieldFromNativePeek) ? PM_NOYIELD : 0x0;
-
// PeekMessage can run a message if there are sent messages, trace that and
// emit the boolean param to see if it ever janks independently (ref.
// comment on GetQueueStatus).
TRACE_EVENT1("base",
"MessagePumpForUI::ProcessNextWindowsMessage PeekMessage",
"sent_messages_in_queue", more_work_is_plausible);
- has_msg = ::PeekMessage(&msg, nullptr, 0, 0,
- kAdditionalFlags | PM_REMOVE) != FALSE;
+ has_msg = ::PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
}
if (has_msg)
more_work_is_plausible |= ProcessMessageHelper(msg);
@@ -545,13 +552,33 @@ bool MessagePumpForUI::ProcessPumpReplacementMessage() {
// that peeked replacement. Note that the re-post of kMsgHaveWork may be
// asynchronous to this thread!!
- // As in ProcessNextWindowsMessage() since ::PeekMessage() may process
- // sent-messages.
+ // Bump the work id since ::PeekMessage may process internal events.
state_->delegate->BeforeDoInternalWork();
+ // The system headers don't define this; it's equivalent to PM_QS_INPUT |
+ // PM_QS_PAINT | PM_QS_POSTMESSAGE. i.e., anything but QS_SENDMESSAGE. Since
+ // we're looking to replace our kMsgHaveWork posted message, we can ignore
+ // sent messages (which never compete with posted messages in the initial
+ // PeekMessage call).
+ constexpr auto PM_QS_ALLEVENTS = QS_ALLEVENTS << 16;
+ static_assert(
+ PM_QS_ALLEVENTS == (PM_QS_INPUT | PM_QS_PAINT | PM_QS_POSTMESSAGE), "");
+ static_assert((PM_QS_ALLEVENTS & PM_QS_SENDMESSAGE) == 0, "");
+
MSG msg;
- const bool have_message =
- ::PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE;
+ bool have_message = false;
+ {
+ TRACE_EVENT0("base",
+ "MessagePumpForUI::ProcessPumpReplacementMessage PeekMessage");
+
+ static const auto peek_replacement_message_modifier =
+ base::FeatureList::IsEnabled(kPreventMessagePumpHangs) ? PM_QS_ALLEVENTS
+ : 0;
+
+ have_message =
+ ::PeekMessage(&msg, nullptr, 0, 0,
+ PM_REMOVE | peek_replacement_message_modifier) != FALSE;
+ }
// Expect no message or a message different than kMsgHaveWork.
DCHECK(!have_message || kMsgHaveWork != msg.message ||
@@ -623,9 +650,9 @@ void MessagePumpForIO::ScheduleWork() {
return; // Someone else continued the pumping.
// Make sure the MessagePump does some work for us.
- BOOL ret = ::PostQueuedCompletionStatus(port_.Get(), 0,
- reinterpret_cast<ULONG_PTR>(this),
- reinterpret_cast<OVERLAPPED*>(this));
+ const BOOL ret = ::PostQueuedCompletionStatus(
+ port_.Get(), 0, reinterpret_cast<ULONG_PTR>(this),
+ reinterpret_cast<OVERLAPPED*>(this));
if (ret)
return; // Post worked perfectly.
@@ -634,6 +661,9 @@ void MessagePumpForIO::ScheduleWork() {
work_scheduled_ = false; // Clarify that we didn't succeed.
UMA_HISTOGRAM_ENUMERATION("Chrome.MessageLoopProblem", COMPLETION_POST_ERROR,
MESSAGE_LOOP_PROBLEM_MAX);
+ TRACE_EVENT_INSTANT0("base",
+ "Chrome.MessageLoopProblem.COMPLETION_POST_ERROR",
+ TRACE_EVENT_SCOPE_THREAD);
}
void MessagePumpForIO::ScheduleDelayedWork(const TimeTicks& delayed_work_time) {
diff --git a/chromium/base/metrics/dummy_histogram.cc b/chromium/base/metrics/dummy_histogram.cc
index ca7c4d9d30d..45ae7f2fb11 100644
--- a/chromium/base/metrics/dummy_histogram.cc
+++ b/chromium/base/metrics/dummy_histogram.cc
@@ -101,10 +101,7 @@ std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const {
}
base::DictionaryValue DummyHistogram::ToGraphDict() const {
- base::DictionaryValue dict;
- dict.SetString("header", "dummy");
- dict.SetString("body", "dummy");
- return dict;
+ return base::DictionaryValue();
}
} // namespace base
diff --git a/chromium/base/metrics/dummy_histogram.h b/chromium/base/metrics/dummy_histogram.h
index 6c17cc8924b..08fd36374a0 100644
--- a/chromium/base/metrics/dummy_histogram.h
+++ b/chromium/base/metrics/dummy_histogram.h
@@ -44,9 +44,6 @@ class BASE_EXPORT DummyHistogram : public HistogramBase {
// HistogramBase:
void SerializeInfoImpl(Pickle* pickle) const override {}
void GetParameters(DictionaryValue* params) const override {}
- void GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const override {}
private:
friend class NoDestructor<DummyHistogram>;
diff --git a/chromium/base/metrics/field_trial.cc b/chromium/base/metrics/field_trial.cc
index 05470a4deec..9799bfc9713 100644
--- a/chromium/base/metrics/field_trial.cc
+++ b/chromium/base/metrics/field_trial.cc
@@ -719,9 +719,7 @@ void FieldTrialList::GetInitiallyActiveFieldTrials(
}
// static
-bool FieldTrialList::CreateTrialsFromString(
- const std::string& trials_string,
- const std::set<std::string>& ignored_trial_names) {
+bool FieldTrialList::CreateTrialsFromString(const std::string& trials_string) {
DCHECK(global_);
if (trials_string.empty() || !global_)
return true;
@@ -734,14 +732,6 @@ bool FieldTrialList::CreateTrialsFromString(
const std::string trial_name = entry.trial_name.as_string();
const std::string group_name = entry.group_name.as_string();
- if (Contains(ignored_trial_names, trial_name)) {
- // This is to warn that the field trial forced through command-line
- // input is unforcable.
- // Use --enable-logging or --enable-logging=stderr to see this warning.
- LOG(WARNING) << "Field trial: " << trial_name << " cannot be forced.";
- continue;
- }
-
FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
if (!trial)
return false;
@@ -787,8 +777,7 @@ void FieldTrialList::CreateTrialsFromCommandLine(
if (cmd_line.HasSwitch(switches::kForceFieldTrials)) {
bool result = FieldTrialList::CreateTrialsFromString(
- cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials),
- std::set<std::string>());
+ cmd_line.GetSwitchValueASCII(switches::kForceFieldTrials));
UMA_HISTOGRAM_BOOLEAN("ChildProcess.FieldTrials.CreateFromSwitchSuccess",
result);
DCHECK(result);
diff --git a/chromium/base/metrics/field_trial.h b/chromium/base/metrics/field_trial.h
index d0fd3c12d6a..00448389e0d 100644
--- a/chromium/base/metrics/field_trial.h
+++ b/chromium/base/metrics/field_trial.h
@@ -59,7 +59,6 @@
#include <map>
#include <memory>
-#include <set>
#include <string>
#include <vector>
@@ -543,11 +542,8 @@ class BASE_EXPORT FieldTrialList {
// browser process into this non-browser process, but could also be invoked
// through a command line argument to the browser process. Created field
// trials will be marked "used" for the purposes of active trial reporting
- // if they are prefixed with |kActivationMarker|. Trial names in
- // |ignored_trial_names| are ignored when parsing |trials_string|.
- static bool CreateTrialsFromString(
- const std::string& trials_string,
- const std::set<std::string>& ignored_trial_names);
+ // if they are prefixed with |kActivationMarker|.
+ static bool CreateTrialsFromString(const std::string& trials_string);
// Achieves the same thing as CreateTrialsFromString, except wraps the logic
// by taking in the trials from the command line, either via shared memory
diff --git a/chromium/base/metrics/field_trial_params.h b/chromium/base/metrics/field_trial_params.h
index 7b4bd5b37cc..056b50b230f 100644
--- a/chromium/base/metrics/field_trial_params.h
+++ b/chromium/base/metrics/field_trial_params.h
@@ -10,6 +10,7 @@
#include "base/base_export.h"
#include "base/logging.h"
+#include "base/notreached.h"
namespace base {
diff --git a/chromium/base/metrics/field_trial_unittest.cc b/chromium/base/metrics/field_trial_unittest.cc
index 18723053c00..52e590561c4 100644
--- a/chromium/base/metrics/field_trial_unittest.cc
+++ b/chromium/base/metrics/field_trial_unittest.cc
@@ -527,8 +527,7 @@ TEST_F(FieldTrialTest, Restore) {
ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
- FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
- std::set<std::string>());
+ FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/");
FieldTrial* trial = FieldTrialList::Find("Some_name");
ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
@@ -542,8 +541,7 @@ TEST_F(FieldTrialTest, Restore) {
}
TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
- EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
- std::set<std::string>()));
+ EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname"));
FieldTrial* trial = FieldTrialList::Find("tname");
ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
@@ -552,16 +550,11 @@ TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
}
TEST_F(FieldTrialTest, BogusRestore) {
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
- std::set<std::string>()));
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
- std::set<std::string>()));
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
- std::set<std::string>()));
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
- std::set<std::string>()));
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
- std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash"));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/"));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/"));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname"));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname"));
}
TEST_F(FieldTrialTest, DuplicateRestore) {
@@ -575,19 +568,16 @@ TEST_F(FieldTrialTest, DuplicateRestore) {
EXPECT_EQ("Some name/Winner/", save_string);
// It is OK if we redundantly specify a winner.
- EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
- std::set<std::string>()));
+ EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string));
// But it is an error to try to change to a different winner.
- EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
- std::set<std::string>()));
+ EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/"));
}
TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
- ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
- std::set<std::string>()));
+ ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/"));
FieldTrial::ActiveGroups active_groups;
FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -609,8 +599,8 @@ TEST_F(FieldTrialTest, CreateTrialsFromStringForceActivation) {
ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
ASSERT_FALSE(FieldTrialList::TrialExists("def"));
ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
- ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
- "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
+ ASSERT_TRUE(
+ FieldTrialList::CreateTrialsFromString("*Abc/cba/def/fed/*Xyz/zyx/"));
FieldTrial::ActiveGroups active_groups;
FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -625,8 +615,7 @@ TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
TestFieldTrialObserver observer(TestFieldTrialObserver::ASYNCHRONOUS);
- ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
- std::set<std::string>()));
+ ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/"));
RunLoop().RunUntilIdle();
// Observer shouldn't be notified.
EXPECT_TRUE(observer.trial_name().empty());
@@ -639,47 +628,6 @@ TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
EXPECT_EQ("def", observer.group_name());
}
-TEST_F(FieldTrialTest, CreateTrialsFromStringWithIgnoredFieldTrials) {
- ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
- ASSERT_FALSE(FieldTrialList::TrialExists("Foo"));
- ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
- ASSERT_FALSE(FieldTrialList::TrialExists("Bar"));
- ASSERT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
-
- std::set<std::string> ignored_trial_names;
- ignored_trial_names.insert("Unaccepted1");
- ignored_trial_names.insert("Unaccepted2");
- ignored_trial_names.insert("Unaccepted3");
-
- FieldTrialList::CreateTrialsFromString(
- "Unaccepted1/Unaccepted1_name/"
- "Foo/Foo_name/"
- "Unaccepted2/Unaccepted2_name/"
- "Bar/Bar_name/"
- "Unaccepted3/Unaccepted3_name/",
- ignored_trial_names);
-
- EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
- EXPECT_TRUE(FieldTrialList::TrialExists("Foo"));
- EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted2"));
- EXPECT_TRUE(FieldTrialList::TrialExists("Bar"));
- EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted3"));
-
- FieldTrial::ActiveGroups active_groups;
- FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
- EXPECT_TRUE(active_groups.empty());
-
- FieldTrial* trial = FieldTrialList::Find("Foo");
- ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
- EXPECT_EQ("Foo", trial->trial_name());
- EXPECT_EQ("Foo_name", trial->group_name());
-
- trial = FieldTrialList::Find("Bar");
- ASSERT_NE(static_cast<FieldTrial*>(nullptr), trial);
- EXPECT_EQ("Bar", trial->trial_name());
- EXPECT_EQ("Bar_name", trial->group_name());
-}
-
TEST_F(FieldTrialTest, CreateFieldTrial) {
ASSERT_FALSE(FieldTrialList::TrialExists("Some_name"));
@@ -1154,8 +1102,7 @@ TEST(FieldTrialTestWithoutList, StatesStringFormat) {
// Starting with a new blank FieldTrialList.
FieldTrialList field_trial_list(nullptr);
- ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
- std::set<std::string>()));
+ ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string));
FieldTrial::ActiveGroups active_groups;
field_trial_list.GetActiveFieldTrialGroups(&active_groups);
diff --git a/chromium/base/metrics/histogram.cc b/chromium/base/metrics/histogram.cc
index 12782db9a9d..7e9ea3c7d0d 100644
--- a/chromium/base/metrics/histogram.cc
+++ b/chromium/base/metrics/histogram.cc
@@ -821,27 +821,6 @@ void Histogram::GetParameters(DictionaryValue* params) const {
params->SetIntKey("bucket_count", static_cast<int>(bucket_count()));
}
-void Histogram::GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const {
- std::unique_ptr<SampleVector> snapshot = SnapshotAllSamples();
- *count = snapshot->TotalCount();
- *sum = snapshot->sum();
- uint32_t index = 0;
- for (uint32_t i = 0; i < bucket_count(); ++i) {
- Sample count_at_index = snapshot->GetCountAtIndex(i);
- if (count_at_index > 0) {
- std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
- bucket_value->SetIntKey("low", ranges(i));
- if (i != bucket_count() - 1)
- bucket_value->SetIntKey("high", ranges(i + 1));
- bucket_value->SetIntKey("count", count_at_index);
- buckets->Set(index, std::move(bucket_value));
- ++index;
- }
- }
-}
-
//------------------------------------------------------------------------------
// LinearHistogram: This histogram uses a traditional set of evenly spaced
// buckets.
diff --git a/chromium/base/metrics/histogram.h b/chromium/base/metrics/histogram.h
index ba300c70917..3e1c51f46ee 100644
--- a/chromium/base/metrics/histogram.h
+++ b/chromium/base/metrics/histogram.h
@@ -73,10 +73,10 @@
#include <vector>
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
@@ -314,13 +314,10 @@ class BASE_EXPORT Histogram : public HistogramBase {
const uint32_t i,
std::string* output) const;
- // WriteJSON calls these.
+ // Writes the type, min, max, and bucket count information of the histogram in
+ // |params|.
void GetParameters(DictionaryValue* params) const override;
- void GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const override;
-
// Samples that have not yet been logged with SnapshotDelta().
std::unique_ptr<SampleVectorBase> unlogged_samples_;
diff --git a/chromium/base/metrics/histogram_base.cc b/chromium/base/metrics/histogram_base.cc
index 8d55e92e1dd..ce7fbf16e38 100644
--- a/chromium/base/metrics/histogram_base.cc
+++ b/chromium/base/metrics/histogram_base.cc
@@ -179,6 +179,30 @@ void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
cb.Run(sample);
}
+void HistogramBase::GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const {
+ std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
+ *count = snapshot->TotalCount();
+ *sum = snapshot->sum();
+ std::unique_ptr<SampleCountIterator> it = snapshot->Iterator();
+ uint32_t index = 0;
+ while (!it->Done()) {
+ std::unique_ptr<DictionaryValue> bucket_value(new DictionaryValue());
+ Sample bucket_min;
+ int64_t bucket_max;
+ Count bucket_count;
+ it->Get(&bucket_min, &bucket_max, &bucket_count);
+
+ bucket_value->SetIntKey("low", bucket_min);
+ bucket_value->SetIntKey("high", bucket_max);
+ bucket_value->SetIntKey("count", bucket_count);
+ buckets->Set(index, std::move(bucket_value));
+ it->Next();
+ ++index;
+ }
+}
+
void HistogramBase::WriteAsciiBucketGraph(double current_size,
double max_size,
std::string* output) const {
diff --git a/chromium/base/metrics/histogram_base.h b/chromium/base/metrics/histogram_base.h
index de3a2cc6427..7a12fc1989e 100644
--- a/chromium/base/metrics/histogram_base.h
+++ b/chromium/base/metrics/histogram_base.h
@@ -236,7 +236,8 @@ class BASE_EXPORT HistogramBase {
// The following method provides graphical histogram displays.
virtual void WriteAscii(std::string* output) const = 0;
- // Returns histogram data as a Dict with the following format:
+ // Returns histograms data as a Dict (or an empty dict if not available),
+ // with the following format:
// {"header": "Name of the histogram with samples, mean, and/or flags",
// "body": "ASCII histogram representation"}
virtual base::DictionaryValue ToGraphDict() const = 0;
@@ -262,9 +263,9 @@ class BASE_EXPORT HistogramBase {
// Writes information about the current (non-empty) buckets and their sample
// counts to |buckets|, the total sample count to |count| and the total sum
// to |sum|.
- virtual void GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const = 0;
+ void GetCountAndBucketData(Count* count,
+ int64_t* sum,
+ ListValue* buckets) const;
//// Produce actual graph (set of blank vs non blank char's) for a bucket.
void WriteAsciiBucketGraph(double current_size,
diff --git a/chromium/base/metrics/histogram_macros_internal.h b/chromium/base/metrics/histogram_macros_internal.h
index dcbff508e8e..9f68583d8af 100644
--- a/chromium/base/metrics/histogram_macros_internal.h
+++ b/chromium/base/metrics/histogram_macros_internal.h
@@ -12,7 +12,7 @@
#include <memory>
#include <type_traits>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/metrics/histogram.h"
#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
diff --git a/chromium/base/metrics/histogram_macros_local.h b/chromium/base/metrics/histogram_macros_local.h
index 38a2d785202..47e5ba42f49 100644
--- a/chromium/base/metrics/histogram_macros_local.h
+++ b/chromium/base/metrics/histogram_macros_local.h
@@ -5,7 +5,6 @@
#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
-#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros_internal.h"
#include "base/time/time.h"
diff --git a/chromium/base/metrics/histogram_unittest.cc b/chromium/base/metrics/histogram_unittest.cc
index aef19340d4b..d7addaaa266 100644
--- a/chromium/base/metrics/histogram_unittest.cc
+++ b/chromium/base/metrics/histogram_unittest.cc
@@ -101,6 +101,15 @@ class HistogramTest : public testing::TestWithParam<bool> {
return h->SnapshotAllSamples();
}
+ void GetCountAndBucketData(Histogram* histogram,
+ base::Histogram::Count* count,
+ int64_t* sum,
+ base::ListValue* buckets) {
+ // A simple wrapper around |GetCountAndBucketData| to make it visible for
+ // testing.
+ histogram->GetCountAndBucketData(count, sum, buckets);
+ }
+
const bool use_persistent_histogram_allocator_;
std::unique_ptr<StatisticsRecorder> statistics_recorder_;
@@ -899,6 +908,46 @@ TEST_P(HistogramTest, ExpiredHistogramTest) {
EXPECT_EQ(2, samples->TotalCount());
}
+TEST_P(HistogramTest, CheckGetCountAndBucketData) {
+ const size_t kBucketCount = 50;
+ Histogram* histogram = static_cast<Histogram*>(Histogram::FactoryGet(
+ "AddCountHistogram", 10, 100, kBucketCount, HistogramBase::kNoFlags));
+ // Add samples in reverse order and make sure the output is in correct order.
+ histogram->AddCount(/*sample=*/30, /*value=*/14);
+ histogram->AddCount(/*sample=*/20, /*value=*/15);
+ histogram->AddCount(/*sample=*/20, /*value=*/15);
+ histogram->AddCount(/*sample=*/30, /*value=*/14);
+
+ base::Histogram::Count total_count;
+ int64_t sum;
+ base::ListValue buckets;
+ GetCountAndBucketData(histogram, &total_count, &sum, &buckets);
+ EXPECT_EQ(58, total_count);
+ EXPECT_EQ(1440, sum);
+ EXPECT_EQ(2u, buckets.GetSize());
+
+ int low, high, count;
+ // Check the first bucket.
+ base::DictionaryValue* bucket1;
+ EXPECT_TRUE(buckets.GetDictionary(0, &bucket1));
+ EXPECT_TRUE(bucket1->GetInteger("low", &low));
+ EXPECT_TRUE(bucket1->GetInteger("high", &high));
+ EXPECT_TRUE(bucket1->GetInteger("count", &count));
+ EXPECT_EQ(20, low);
+ EXPECT_EQ(21, high);
+ EXPECT_EQ(30, count);
+
+ // Check the second bucket.
+ base::DictionaryValue* bucket2;
+ EXPECT_TRUE(buckets.GetDictionary(1, &bucket2));
+ EXPECT_TRUE(bucket2->GetInteger("low", &low));
+ EXPECT_TRUE(bucket2->GetInteger("high", &high));
+ EXPECT_TRUE(bucket2->GetInteger("count", &count));
+ EXPECT_EQ(30, low);
+ EXPECT_EQ(31, high);
+ EXPECT_EQ(28, count);
+}
+
TEST_P(HistogramTest, WriteAscii) {
HistogramBase* histogram =
LinearHistogram::FactoryGet("AsciiOut", /*minimum=*/1, /*maximum=*/10,
diff --git a/chromium/base/metrics/sparse_histogram.cc b/chromium/base/metrics/sparse_histogram.cc
index 61635d0417f..6d2720c2369 100644
--- a/chromium/base/metrics/sparse_histogram.cc
+++ b/chromium/base/metrics/sparse_histogram.cc
@@ -238,13 +238,9 @@ HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
}
void SparseHistogram::GetParameters(DictionaryValue* params) const {
- // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
-}
-
-void SparseHistogram::GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const {
- // TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
+ // Unlike Histogram::GetParameters, only set the type here, and no other
+ // params. The other params do not make sense for sparse histograms.
+ params->SetString("type", HistogramTypeToString(GetHistogramType()));
}
void SparseHistogram::WriteAsciiBody(const HistogramSamples& snapshot,
diff --git a/chromium/base/metrics/sparse_histogram.h b/chromium/base/metrics/sparse_histogram.h
index ab1d9157063..ff1ce569b92 100644
--- a/chromium/base/metrics/sparse_histogram.h
+++ b/chromium/base/metrics/sparse_histogram.h
@@ -75,10 +75,8 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
+ // Writes the type of the sparse histogram in the |params|.
void GetParameters(DictionaryValue* params) const override;
- void GetCountAndBucketData(Count* count,
- int64_t* sum,
- ListValue* buckets) const override;
// Helpers for emitting Ascii graphic. Each method appends data to output.
void WriteAsciiBody(const HistogramSamples& snapshot,
@@ -90,7 +88,7 @@ class BASE_EXPORT SparseHistogram : public HistogramBase {
void WriteAsciiHeader(const HistogramSamples& snapshot,
std::string* output) const;
- // For constuctor calling.
+ // For constructor calling.
friend class SparseHistogramTest;
// Protects access to |samples_|.
diff --git a/chromium/base/metrics/sparse_histogram_unittest.cc b/chromium/base/metrics/sparse_histogram_unittest.cc
index a92b68b4a6d..47d8b818920 100644
--- a/chromium/base/metrics/sparse_histogram_unittest.cc
+++ b/chromium/base/metrics/sparse_histogram_unittest.cc
@@ -78,6 +78,15 @@ class SparseHistogramTest : public testing::TestWithParam<bool> {
return std::unique_ptr<SparseHistogram>(new SparseHistogram(name));
}
+ void GetCountAndBucketData(SparseHistogram* histogram,
+ base::Histogram::Count* count,
+ int64_t* sum,
+ base::ListValue* buckets) {
+ // A simple wrapper around |GetCountAndBucketData| to make it visible for
+ // testing.
+ histogram->GetCountAndBucketData(count, sum, buckets);
+ }
+
const bool use_persistent_histogram_allocator_;
std::unique_ptr<StatisticsRecorder> statistics_recorder_;
@@ -387,6 +396,44 @@ TEST_P(SparseHistogramTest, HistogramNameHash) {
EXPECT_EQ(histogram->name_hash(), HashMetricName(kName));
}
+TEST_P(SparseHistogramTest, CheckGetCountAndBucketData) {
+ std::unique_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+ // Add samples in reverse order and make sure the output is in correct order.
+ histogram->AddCount(/*sample=*/200, /*value=*/15);
+ histogram->AddCount(/*sample=*/100, /*value=*/5);
+ // Add samples to the same bucket and make sure they'll be aggregated.
+ histogram->AddCount(/*sample=*/100, /*value=*/5);
+
+ base::Histogram::Count total_count;
+ int64_t sum;
+ base::ListValue buckets;
+ GetCountAndBucketData(histogram.get(), &total_count, &sum, &buckets);
+ EXPECT_EQ(25, total_count);
+ EXPECT_EQ(4000, sum);
+ EXPECT_EQ(2u, buckets.GetSize());
+
+ int low, high, count;
+ // Check the first bucket.
+ base::DictionaryValue* bucket1;
+ EXPECT_TRUE(buckets.GetDictionary(0, &bucket1));
+ EXPECT_TRUE(bucket1->GetInteger("low", &low));
+ EXPECT_TRUE(bucket1->GetInteger("high", &high));
+ EXPECT_TRUE(bucket1->GetInteger("count", &count));
+ EXPECT_EQ(100, low);
+ EXPECT_EQ(101, high);
+ EXPECT_EQ(10, count);
+
+ // Check the second bucket.
+ base::DictionaryValue* bucket2;
+ EXPECT_TRUE(buckets.GetDictionary(1, &bucket2));
+ EXPECT_TRUE(bucket2->GetInteger("low", &low));
+ EXPECT_TRUE(bucket2->GetInteger("high", &high));
+ EXPECT_TRUE(bucket2->GetInteger("count", &count));
+ EXPECT_EQ(200, low);
+ EXPECT_EQ(201, high);
+ EXPECT_EQ(15, count);
+}
+
TEST_P(SparseHistogramTest, WriteAscii) {
HistogramBase* histogram =
SparseHistogram::FactoryGet("AsciiOut", HistogramBase::kNoFlags);
diff --git a/chromium/base/metrics/ukm_source_id.cc b/chromium/base/metrics/ukm_source_id.cc
index 3513b954a36..0b2a8baf296 100644
--- a/chromium/base/metrics/ukm_source_id.cc
+++ b/chromium/base/metrics/ukm_source_id.cc
@@ -33,7 +33,8 @@ UkmSourceId UkmSourceId::New() {
static_cast<int64_t>(RandUint64()) & ~kLowBitsMask;
// Generate some bits which are unique within the process, using a counter.
static AtomicSequenceNumber seq;
- UkmSourceId local_id = FromOtherId(seq.GetNext() + 1, UkmSourceId::Type::UKM);
+ UkmSourceId local_id =
+ FromOtherId(seq.GetNext() + 1, UkmSourceId::Type::DEFAULT);
// Combine the local and process bits to generate a unique ID.
return UkmSourceId((local_id.value_ & kLowBitsMask) | process_id_bits);
}
diff --git a/chromium/base/metrics/ukm_source_id.h b/chromium/base/metrics/ukm_source_id.h
index 3ba01a0d64c..1b9bdccece7 100644
--- a/chromium/base/metrics/ukm_source_id.h
+++ b/chromium/base/metrics/ukm_source_id.h
@@ -13,7 +13,9 @@ namespace base {
// An ID used to identify a Source to UKM, for recording information about it.
// These objects are copyable, assignable, and occupy 64-bits per instance.
-// Prefer passing them by value.
+// Prefer passing them by value. When a new type is added, please also update
+// the enum type in third_party/metrics_proto/ukm/source.proto and the
+// converting function ToProtobufSourceType.
class BASE_EXPORT UkmSourceId {
public:
enum class Type : int64_t {
@@ -21,7 +23,7 @@ class BASE_EXPORT UkmSourceId {
// 'custom' source other than the types below. Source of this type has
// additional restrictions with logging, as determined by
// IsWhitelistedSourceId.
- UKM = 0,
+ DEFAULT = 0,
// Sources created by navigation. They will be kept in memory as long as
// the associated tab is still alive and the number of sources are within
// the max threshold.
diff --git a/chromium/base/metrics/user_metrics.cc b/chromium/base/metrics/user_metrics.cc
index b007d62af3d..24a44515e60 100644
--- a/chromium/base/metrics/user_metrics.cc
+++ b/chromium/base/metrics/user_metrics.cc
@@ -14,7 +14,7 @@
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace {
diff --git a/chromium/base/numerics/checked_math_impl.h b/chromium/base/numerics/checked_math_impl.h
index e083389ebf3..b6cc2966a6d 100644
--- a/chromium/base/numerics/checked_math_impl.h
+++ b/chromium/base/numerics/checked_math_impl.h
@@ -251,13 +251,23 @@ struct CheckedModOp<T,
using result_type = typename MaxExponentPromotion<T, U>::type;
template <typename V>
static constexpr bool Do(T x, U y, V* result) {
+ if (BASE_NUMERICS_UNLIKELY(!y))
+ return false;
+
using Promotion = typename BigEnoughPromotion<T, U>::type;
- if (BASE_NUMERICS_LIKELY(y)) {
- Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
- *result = static_cast<Promotion>(presult);
- return IsValueInRangeForNumericType<V>(presult);
+ if (BASE_NUMERICS_UNLIKELY(
+ (std::is_signed<T>::value && std::is_signed<U>::value &&
+ IsTypeInRangeForNumericType<T, Promotion>::value &&
+ static_cast<Promotion>(x) ==
+ std::numeric_limits<Promotion>::lowest() &&
+ y == static_cast<U>(-1)))) {
+ *result = 0;
+ return true;
}
- return false;
+
+ Promotion presult = static_cast<Promotion>(x) % static_cast<Promotion>(y);
+ *result = static_cast<Promotion>(presult);
+ return IsValueInRangeForNumericType<V>(presult);
}
};
diff --git a/chromium/base/numerics/safe_conversions.h b/chromium/base/numerics/safe_conversions.h
index b9636fec428..b9f81e85b85 100644
--- a/chromium/base/numerics/safe_conversions.h
+++ b/chromium/base/numerics/safe_conversions.h
@@ -170,7 +170,7 @@ struct SaturateFastOp<
std::is_integral<Dst>::value &&
SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static const bool is_supported = true;
- static Dst Do(Src value) { return SaturateFastAsmOp<Dst, Src>::Do(value); }
+ static constexpr Dst Do(Src value) { return SaturateFastAsmOp<Dst, Src>::Do(value); }
};
template <typename Dst, typename Src>
@@ -181,7 +181,7 @@ struct SaturateFastOp<
std::is_integral<Dst>::value &&
!SaturateFastAsmOp<Dst, Src>::is_supported>::type> {
static const bool is_supported = true;
- static Dst Do(Src value) {
+ static constexpr Dst Do(Src value) {
// The exact order of the following is structured to hit the correct
// optimization heuristics across compilers. Do not change without
// checking the emitted code.
diff --git a/chromium/base/observer_list.h b/chromium/base/observer_list.h
index 8f30c1158cf..52c8fad0d2a 100644
--- a/chromium/base/observer_list.h
+++ b/chromium/base/observer_list.h
@@ -13,9 +13,10 @@
#include <utility>
#include <vector>
+#include "base/check_op.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
+#include "base/notreached.h"
#include "base/observer_list_internal.h"
#include "base/sequence_checker.h"
#include "base/stl_util.h"
diff --git a/chromium/base/observer_list_internal.h b/chromium/base/observer_list_internal.h
index 6d28d410472..8df908be00f 100644
--- a/chromium/base/observer_list_internal.h
+++ b/chromium/base/observer_list_internal.h
@@ -6,8 +6,8 @@
#define BASE_OBSERVER_LIST_INTERNAL_H_
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/containers/linked_list.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/observer_list_types.h"
diff --git a/chromium/base/observer_list_threadsafe.h b/chromium/base/observer_list_threadsafe.h
index 727ba0e1efb..00040ab9b14 100644
--- a/chromium/base/observer_list_threadsafe.h
+++ b/chromium/base/observer_list_threadsafe.h
@@ -10,9 +10,9 @@
#include "base/base_export.h"
#include "base/bind.h"
+#include "base/check_op.h"
#include "base/lazy_instance.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/observer_list.h"
diff --git a/chromium/base/observer_list_threadsafe_unittest.cc b/chromium/base/observer_list_threadsafe_unittest.cc
index 57685b8ead2..6baad56ef5a 100644
--- a/chromium/base/observer_list_threadsafe_unittest.cc
+++ b/chromium/base/observer_list_threadsafe_unittest.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/compiler_specific.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/sequenced_task_runner.h"
diff --git a/chromium/base/one_shot_event.h b/chromium/base/one_shot_event.h
index 26f84aa8831..d83254f874f 100644
--- a/chromium/base/one_shot_event.h
+++ b/chromium/base/one_shot_event.h
@@ -8,7 +8,7 @@
#include <vector>
#include "base/callback_forward.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
diff --git a/chromium/base/optional.h b/chromium/base/optional.h
index 1586efd99fa..3707c8bc2d6 100644
--- a/chromium/base/optional.h
+++ b/chromium/base/optional.h
@@ -9,7 +9,7 @@
#include <type_traits>
#include <utility>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/template_util.h"
namespace base {
diff --git a/chromium/base/pickle.cc b/chromium/base/pickle.cc
index c8d784a3a82..7f812821966 100644
--- a/chromium/base/pickle.cc
+++ b/chromium/base/pickle.cc
@@ -199,6 +199,17 @@ bool PickleIterator::ReadData(const char** data, int* length) {
return ReadBytes(data, *length);
}
+bool PickleIterator::ReadData(base::span<const uint8_t>* data) {
+ const char* ptr;
+ int length;
+
+ if (!ReadData(&ptr, &length))
+ return false;
+
+ *data = base::as_bytes(base::make_span(ptr, length));
+ return true;
+}
+
bool PickleIterator::ReadBytes(const char** data, int length) {
const char* read_from = GetReadPointerAndAdvance(length);
if (!read_from)
diff --git a/chromium/base/pickle.h b/chromium/base/pickle.h
index 1000a562dc2..6e3a08239ad 100644
--- a/chromium/base/pickle.h
+++ b/chromium/base/pickle.h
@@ -11,9 +11,10 @@
#include <string>
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
+#include "base/containers/span.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
@@ -56,6 +57,9 @@ class BASE_EXPORT PickleIterator {
// until the message data is mutated). Do not keep the pointer around!
bool ReadData(const char** data, int* length) WARN_UNUSED_RESULT;
+ // Similar, but using base::span for convenience.
+ bool ReadData(base::span<const uint8_t>* data) WARN_UNUSED_RESULT;
+
// A pointer to the data will be placed in |*data|. The caller specifies the
// number of bytes to read, and ReadBytes will validate this length. The
// pointer placed into |*data| points into the message's buffer so it will be
diff --git a/chromium/base/posix/file_descriptor_shuffle.cc b/chromium/base/posix/file_descriptor_shuffle.cc
index d2fd39a95aa..deaacf07d74 100644
--- a/chromium/base/posix/file_descriptor_shuffle.cc
+++ b/chromium/base/posix/file_descriptor_shuffle.cc
@@ -8,8 +8,9 @@
#include <stddef.h>
#include <ostream>
-#include "base/posix/eintr_wrapper.h"
+#include "base/check.h"
#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
namespace base {
diff --git a/chromium/base/posix/unix_domain_socket.cc b/chromium/base/posix/unix_domain_socket.cc
index 7c087a53b5c..f1b925e148f 100644
--- a/chromium/base/posix/unix_domain_socket.cc
+++ b/chromium/base/posix/unix_domain_socket.cc
@@ -15,6 +15,7 @@
#include "base/files/scoped_file.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/pickle.h"
#include "base/posix/eintr_wrapper.h"
#include "base/stl_util.h"
diff --git a/chromium/base/power_monitor/power_monitor.cc b/chromium/base/power_monitor/power_monitor.cc
index 0a48f30f521..18cfacf17fa 100644
--- a/chromium/base/power_monitor/power_monitor.cc
+++ b/chromium/base/power_monitor/power_monitor.cc
@@ -7,8 +7,9 @@
#include <atomic>
#include <utility>
+#include "base/logging.h"
#include "base/power_monitor/power_monitor_source.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -25,12 +26,8 @@ bool PowerMonitor::IsInitialized() {
return GetInstance()->source_.get() != nullptr;
}
-bool PowerMonitor::AddObserver(PowerObserver* obs) {
- PowerMonitor* power_monitor = GetInstance();
- if (!IsInitialized())
- return false;
- power_monitor->observers_->AddObserver(obs);
- return true;
+void PowerMonitor::AddObserver(PowerObserver* obs) {
+ GetInstance()->observers_->AddObserver(obs);
}
void PowerMonitor::RemoveObserver(PowerObserver* obs) {
@@ -47,15 +44,19 @@ bool PowerMonitor::IsOnBatteryPower() {
}
void PowerMonitor::ShutdownForTesting() {
- PowerMonitor::GetInstance()->observers_->AssertEmpty();
GetInstance()->source_ = nullptr;
- g_is_process_suspended.store(false);
+ g_is_process_suspended.store(false, std::memory_order_relaxed);
}
bool PowerMonitor::IsProcessSuspended() {
return g_is_process_suspended.load(std::memory_order_relaxed);
}
+PowerObserver::DeviceThermalState PowerMonitor::GetCurrentThermalState() {
+ DCHECK(IsInitialized());
+ return GetInstance()->source_->GetCurrentThermalState();
+}
+
void PowerMonitor::NotifyPowerStateChange(bool battery_in_use) {
DCHECK(IsInitialized());
DVLOG(1) << "PowerStateChange: " << (battery_in_use ? "On" : "Off")
@@ -82,6 +83,15 @@ void PowerMonitor::NotifyResume() {
GetInstance()->observers_->Notify(FROM_HERE, &PowerObserver::OnResume);
}
+void PowerMonitor::NotifyThermalStateChange(
+ PowerObserver::DeviceThermalState new_state) {
+ DCHECK(IsInitialized());
+ DVLOG(1) << "ThermalStateChange: "
+ << PowerMonitorSource::DeviceThermalStateToString(new_state);
+ GetInstance()->observers_->Notify(
+ FROM_HERE, &PowerObserver::OnThermalStateChange, new_state);
+}
+
PowerMonitor* PowerMonitor::GetInstance() {
static base::NoDestructor<PowerMonitor> power_monitor;
return power_monitor.get();
diff --git a/chromium/base/power_monitor/power_monitor.h b/chromium/base/power_monitor/power_monitor.h
index fcf5ee482a5..6b80f3d2dd4 100644
--- a/chromium/base/power_monitor/power_monitor.h
+++ b/chromium/base/power_monitor/power_monitor.h
@@ -40,12 +40,10 @@ class BASE_EXPORT PowerMonitor {
// from which it was registered.
// Must not be called from within a notification callback.
//
- // AddObserver() fails and returns false if PowerMonitor::Initialize() has not
- // been invoked. Failure should only happen in unit tests, where the
- // PowerMonitor is generally not initialized. It is safe to call
- // RemoveObserver with a PowerObserver that was not successfully added as an
+ // It is safe to add observers before the PowerMonitor is initialized. It is
+ // safe to call RemoveObserver with a PowerObserver that was not added as an
// observer.
- static bool AddObserver(PowerObserver* observer);
+ static void AddObserver(PowerObserver* observer);
static void RemoveObserver(PowerObserver* observer);
// Is the computer currently on battery power. May only be called if the
@@ -58,6 +56,10 @@ class BASE_EXPORT PowerMonitor {
// what is the real power state.
static bool IsProcessSuspended();
+ // Read the current DeviceThermalState if known. Can be called on any thread.
+ // May only be called if the PowerMonitor has been initialized.
+ static PowerObserver::DeviceThermalState GetCurrentThermalState();
+
// Uninitializes the PowerMonitor. Should be called at the end of any unit
// test that mocks out the PowerMonitor, to avoid affecting subsequent tests.
// There must be no live PowerObservers when invoked. Safe to call even if the
@@ -76,6 +78,8 @@ class BASE_EXPORT PowerMonitor {
static void NotifyPowerStateChange(bool battery_in_use);
static void NotifySuspend();
static void NotifyResume();
+ static void NotifyThermalStateChange(
+ PowerObserver::DeviceThermalState new_state);
static PowerMonitor* GetInstance();
diff --git a/chromium/base/power_monitor/power_monitor_device_source.h b/chromium/base/power_monitor/power_monitor_device_source.h
index 409cf0902f6..83b057f139a 100644
--- a/chromium/base/power_monitor/power_monitor_device_source.h
+++ b/chromium/base/power_monitor/power_monitor_device_source.h
@@ -20,6 +20,7 @@
#include "base/mac/scoped_cftyperef.h"
#include "base/mac/scoped_ionotificationportref.h"
+#include "base/power_monitor/thermal_state_observer_mac.h"
#endif
#if defined(OS_IOS)
@@ -46,6 +47,8 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
#endif
private:
+ friend class PowerMonitorDeviceSourceTest;
+
#if defined(OS_WIN)
// Represents a message-only window for power message handling on Windows.
// Only allow PowerMonitor to create it.
@@ -85,6 +88,9 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
bool IsOnBatteryPowerImpl() override;
#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // PowerMonitorSource:
+ PowerObserver::DeviceThermalState GetCurrentThermalState() override;
+
// Reference to the system IOPMrootDomain port.
io_connect_t power_manager_port_ = IO_OBJECT_NULL;
@@ -96,6 +102,9 @@ class BASE_EXPORT PowerMonitorDeviceSource : public PowerMonitorSource {
// Run loop source to observe power-source-change events.
ScopedCFTypeRef<CFRunLoopSourceRef> power_source_run_loop_source_;
+
+ // Observer of thermal state events: critical temperature etc.
+ std::unique_ptr<ThermalStateObserverMac> thermal_state_observer_;
#endif
#if defined(OS_IOS)
diff --git a/chromium/base/power_monitor/power_monitor_device_source_mac.mm b/chromium/base/power_monitor/power_monitor_device_source_mac.mm
index 71c7403e00c..8618be1549c 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_mac.mm
+++ b/chromium/base/power_monitor/power_monitor_device_source_mac.mm
@@ -51,6 +51,14 @@ bool PowerMonitorDeviceSource::IsOnBatteryPowerImpl() {
return true;
}
+PowerObserver::DeviceThermalState
+PowerMonitorDeviceSource::GetCurrentThermalState() {
+ if (@available(macOS 10.10.3, *)) {
+ return thermal_state_observer_->GetCurrentThermalState();
+ };
+ return PowerObserver::DeviceThermalState::kUnknown;
+}
+
namespace {
void BatteryEventCallback(void*) {
@@ -82,6 +90,11 @@ void PowerMonitorDeviceSource::PlatformInit() {
CFRunLoopAddSource(CFRunLoopGetCurrent(), power_source_run_loop_source_,
kCFRunLoopDefaultMode);
+
+ if (@available(macOS 10.10.3, *)) {
+ thermal_state_observer_ = std::make_unique<ThermalStateObserverMac>(
+ BindRepeating(&PowerMonitorSource::ProcessThermalEvent));
+ };
}
void PowerMonitorDeviceSource::PlatformDestroy() {
diff --git a/chromium/base/power_monitor/power_monitor_device_source_unittest.cc b/chromium/base/power_monitor/power_monitor_device_source_unittest.cc
new file mode 100644
index 00000000000..889f7577ade
--- /dev/null
+++ b/chromium/base/power_monitor/power_monitor_device_source_unittest.cc
@@ -0,0 +1,39 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/power_monitor_device_source.h"
+
+#include "base/logging.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using DeviceThermalState = base::PowerObserver::DeviceThermalState;
+
+namespace base {
+
+class PowerMonitorDeviceSourceTest : public testing::Test {
+ public:
+ PowerMonitorDeviceSourceTest() = default;
+ ~PowerMonitorDeviceSourceTest() override = default;
+
+ DeviceThermalState GetCurrentThermalState() {
+ return power_monitor_device_source_.GetCurrentThermalState();
+ }
+
+ PowerMonitorDeviceSource power_monitor_device_source_;
+};
+
+TEST_F(PowerMonitorDeviceSourceTest, GetCurrentThermalState) {
+ const DeviceThermalState current_state = GetCurrentThermalState();
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+ // We cannot make assumptions on |current_state|. Print it out to use the var.
+ DVLOG(1) << PowerMonitorSource::DeviceThermalStateToString(current_state);
+#else
+ EXPECT_EQ(current_state, DeviceThermalState::kUnknown);
+#endif
+}
+
+} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor_device_source_win.cc b/chromium/base/power_monitor/power_monitor_device_source_win.cc
index f7351356307..ae5ded7ea06 100644
--- a/chromium/base/power_monitor/power_monitor_device_source_win.cc
+++ b/chromium/base/power_monitor/power_monitor_device_source_win.cc
@@ -4,6 +4,7 @@
#include "base/power_monitor/power_monitor_device_source.h"
+#include "base/logging.h"
#include "base/message_loop/message_loop_current.h"
#include "base/power_monitor/power_monitor.h"
#include "base/power_monitor/power_monitor_source.h"
diff --git a/chromium/base/power_monitor/power_monitor_source.cc b/chromium/base/power_monitor/power_monitor_source.cc
index 794eee7b764..dab12ffc2bd 100644
--- a/chromium/base/power_monitor/power_monitor_source.cc
+++ b/chromium/base/power_monitor/power_monitor_source.cc
@@ -17,6 +17,11 @@ bool PowerMonitorSource::IsOnBatteryPower() {
return on_battery_power_;
}
+PowerObserver::DeviceThermalState PowerMonitorSource::GetCurrentThermalState() {
+ return PowerObserver::DeviceThermalState::kUnknown;
+}
+
+// static
void PowerMonitorSource::ProcessPowerEvent(PowerEvent event_id) {
if (!PowerMonitor::IsInitialized())
return;
@@ -58,6 +63,14 @@ void PowerMonitorSource::ProcessPowerEvent(PowerEvent event_id) {
}
}
+// static
+void PowerMonitorSource::ProcessThermalEvent(
+ PowerObserver::DeviceThermalState new_thermal_state) {
+ if (!PowerMonitor::IsInitialized())
+ return;
+ PowerMonitor::NotifyThermalStateChange(new_thermal_state);
+}
+
void PowerMonitorSource::SetInitialOnBatteryPowerState(bool on_battery_power) {
// Must only be called before an initialized PowerMonitor exists, otherwise
// the caller should have just used a normal
@@ -66,4 +79,23 @@ void PowerMonitorSource::SetInitialOnBatteryPowerState(bool on_battery_power) {
on_battery_power_ = on_battery_power;
}
+// static
+const char* PowerMonitorSource::DeviceThermalStateToString(
+ PowerObserver::DeviceThermalState state) {
+ switch (state) {
+ case PowerObserver::DeviceThermalState::kUnknown:
+ return "Unknown";
+ case PowerObserver::DeviceThermalState::kNominal:
+ return "Nominal";
+ case PowerObserver::DeviceThermalState::kFair:
+ return "Fair";
+ case PowerObserver::DeviceThermalState::kSerious:
+ return "Serious";
+ case PowerObserver::DeviceThermalState::kCritical:
+ return "Critical";
+ }
+ NOTREACHED();
+ return "Unknown";
+}
+
} // namespace base
diff --git a/chromium/base/power_monitor/power_monitor_source.h b/chromium/base/power_monitor/power_monitor_source.h
index 7f59a644026..a80dc59c6de 100644
--- a/chromium/base/power_monitor/power_monitor_source.h
+++ b/chromium/base/power_monitor/power_monitor_source.h
@@ -8,6 +8,7 @@
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
+#include "base/power_monitor/power_observer.h"
#include "base/synchronization/lock.h"
namespace base {
@@ -30,15 +31,24 @@ class BASE_EXPORT PowerMonitorSource {
// Is the computer currently on battery power. Can be called on any thread.
bool IsOnBatteryPower();
+ // Reads the current DeviceThermalState, if available on the platform.
+ // Otherwise, returns kUnknown.
+ virtual PowerObserver::DeviceThermalState GetCurrentThermalState();
+
+ static const char* DeviceThermalStateToString(
+ PowerObserver::DeviceThermalState state);
+
protected:
friend class PowerMonitorTest;
// Friend function that is allowed to access the protected ProcessPowerEvent.
friend void ProcessPowerEventHelper(PowerEvent);
- // ProcessPowerEvent should only be called from a single thread, most likely
+ // Process*Event should only be called from a single thread, most likely
// the UI thread or, in child processes, the IO thread.
static void ProcessPowerEvent(PowerEvent event_id);
+ static void ProcessThermalEvent(
+ PowerObserver::DeviceThermalState new_thermal_state);
// Platform-specific method to check whether the system is currently
// running on battery power. Returns true if running on batteries,
diff --git a/chromium/base/power_monitor/power_monitor_unittest.cc b/chromium/base/power_monitor/power_monitor_unittest.cc
index 2f7e04485d8..db18fbfa9c7 100644
--- a/chromium/base/power_monitor/power_monitor_unittest.cc
+++ b/chromium/base/power_monitor/power_monitor_unittest.cc
@@ -13,12 +13,15 @@ namespace base {
class PowerMonitorTest : public testing::Test {
protected:
- PowerMonitorTest() {
+ PowerMonitorTest() = default;
+
+ void TearDown() override { PowerMonitor::ShutdownForTesting(); }
+
+ void PowerMonitorInitialize() {
power_monitor_source_ = new PowerMonitorTestSource();
PowerMonitor::Initialize(
std::unique_ptr<PowerMonitorSource>(power_monitor_source_));
}
- ~PowerMonitorTest() override { PowerMonitor::ShutdownForTesting(); }
PowerMonitorTestSource* source() { return power_monitor_source_; }
@@ -34,9 +37,11 @@ class PowerMonitorTest : public testing::Test {
TEST_F(PowerMonitorTest, PowerNotifications) {
const int kObservers = 5;
+ PowerMonitorInitialize();
+
PowerMonitorTestObserver observers[kObservers];
for (auto& index : observers)
- EXPECT_TRUE(PowerMonitor::AddObserver(&index));
+ PowerMonitor::AddObserver(&index);
// Sending resume when not suspended should have no effect.
source()->GenerateResumeEvent();
@@ -82,4 +87,53 @@ TEST_F(PowerMonitorTest, PowerNotifications) {
PowerMonitor::RemoveObserver(&index);
}
+TEST_F(PowerMonitorTest, ThermalThrottling) {
+ PowerMonitorTestObserver observer;
+ PowerMonitor::AddObserver(&observer);
+
+ PowerMonitorInitialize();
+
+ constexpr PowerObserver::DeviceThermalState kThermalStates[] = {
+ PowerObserver::DeviceThermalState::kUnknown,
+ PowerObserver::DeviceThermalState::kNominal,
+ PowerObserver::DeviceThermalState::kFair,
+ PowerObserver::DeviceThermalState::kSerious,
+ PowerObserver::DeviceThermalState::kCritical};
+
+ for (const auto state : kThermalStates) {
+ source()->GenerateThermalThrottlingEvent(state);
+ EXPECT_EQ(state, source()->GetCurrentThermalState());
+ EXPECT_EQ(observer.last_thermal_state(), state);
+ }
+
+ PowerMonitor::RemoveObserver(&observer);
+}
+
+TEST_F(PowerMonitorTest, AddObserverBeforeAndAfterInitialization) {
+ PowerMonitorTestObserver observer1;
+ PowerMonitorTestObserver observer2;
+
+ // An observer is added before the PowerMonitor initialization.
+ PowerMonitor::AddObserver(&observer1);
+
+ PowerMonitorInitialize();
+
+ // An observer is added after the PowerMonitor initialization.
+ PowerMonitor::AddObserver(&observer2);
+
+ // Simulate suspend/resume notifications.
+ source()->GenerateSuspendEvent();
+ EXPECT_EQ(observer1.suspends(), 1);
+ EXPECT_EQ(observer2.suspends(), 1);
+ EXPECT_EQ(observer1.resumes(), 0);
+ EXPECT_EQ(observer2.resumes(), 0);
+
+ source()->GenerateResumeEvent();
+ EXPECT_EQ(observer1.resumes(), 1);
+ EXPECT_EQ(observer2.resumes(), 1);
+
+ PowerMonitor::RemoveObserver(&observer1);
+ PowerMonitor::RemoveObserver(&observer2);
+}
+
} // namespace base
diff --git a/chromium/base/power_monitor/power_observer.h b/chromium/base/power_monitor/power_observer.h
index 658172c0ead..e6e3d89fc6b 100644
--- a/chromium/base/power_monitor/power_observer.h
+++ b/chromium/base/power_monitor/power_observer.h
@@ -12,6 +12,21 @@ namespace base {
class BASE_EXPORT PowerObserver {
public:
+ // Values to indicate the system's thermal states: from kNominal onwards to
+ // kCritical they represent increasing SoC die temperatures, usually needing
+ // disruptive actions by the system like e.g. turning on the fans (on systems
+ // equipped with those) or reducing voltage and frequency (oftentimes
+ // degrading overall responsiveness). The taxonomy is derived from MacOS (see
+ // e.g. [1]) but applies to others e.g. Linux/ChromeOS.
+ // [1] https://developer.apple.com/library/archive/documentation/Performance/Conceptual/power_efficiency_guidelines_osx/RespondToThermalStateChanges.html
+ enum class DeviceThermalState {
+ kUnknown,
+ kNominal,
+ kFair,
+ kSerious,
+ kCritical,
+ };
+
// Notification of a change in power status of the computer, such
// as from switching between battery and A/C power.
virtual void OnPowerStateChange(bool on_battery_power) {}
@@ -22,6 +37,15 @@ class BASE_EXPORT PowerObserver {
// Notification that the system is resuming.
virtual void OnResume() {}
+ // Notification of a change in the thermal status of the system, such as
+ // entering a critical temperature range. Depending on the severity, the SoC
+ // or the OS might take steps to reduce said temperature e.g., throttling the
+ // CPU or switching on the fans if available. API clients may react to the new
+ // state by reducing expensive computing tasks (e.g. video encoding), or
+ // notifying the user. The same |new_state| might be received repeatedly.
+ // TODO(crbug.com/1071431): implemented on MacOS, extend to Linux/CrOs.
+ virtual void OnThermalStateChange(DeviceThermalState new_state) {}
+
protected:
virtual ~PowerObserver() = default;
};
diff --git a/chromium/base/power_monitor/thermal_state_observer_mac.h b/chromium/base/power_monitor/thermal_state_observer_mac.h
new file mode 100644
index 00000000000..70466a3d8ff
--- /dev/null
+++ b/chromium/base/power_monitor/thermal_state_observer_mac.h
@@ -0,0 +1,40 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_POWER_MONITOR_THERMAL_STATE_OBSERVER_MAC_H_
+#define BASE_POWER_MONITOR_THERMAL_STATE_OBSERVER_MAC_H_
+
+#include <objc/objc.h>
+#include <memory>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/power_monitor/power_observer.h"
+
+namespace base {
+
+// This class is used to listen for the thermal state change notification
+// NSProcessInfoThermalStateDidChangeNotification, routing it to
+// PowerMonitorSource.
+class BASE_EXPORT ThermalStateObserverMac {
+ public:
+ using StateUpdateCallback =
+ base::RepeatingCallback<void(PowerObserver::DeviceThermalState)>;
+
+ explicit ThermalStateObserverMac(StateUpdateCallback state_update_callback);
+ ~ThermalStateObserverMac();
+
+ PowerObserver::DeviceThermalState GetCurrentThermalState();
+
+ private:
+ FRIEND_TEST_ALL_PREFIXES(ThermalStateObserverMacTest, StateChange);
+ PowerObserver::DeviceThermalState state_for_testing_ =
+ PowerObserver::DeviceThermalState::kUnknown;
+
+ id thermal_state_update_observer_;
+};
+
+} // namespace base
+
+#endif // BASE_POWER_MONITOR_THERMAL_STATE_OBSERVER_MAC_H_
diff --git a/chromium/base/power_monitor/thermal_state_observer_mac.mm b/chromium/base/power_monitor/thermal_state_observer_mac.mm
new file mode 100644
index 00000000000..54b81aaafee
--- /dev/null
+++ b/chromium/base/power_monitor/thermal_state_observer_mac.mm
@@ -0,0 +1,75 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/thermal_state_observer_mac.h"
+
+#import <Foundation/Foundation.h>
+
+#include "base/logging.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+
+namespace {
+
+base::PowerObserver::DeviceThermalState
+NSProcessInfoThermalStateToDeviceThermalState(
+ NSProcessInfoThermalState nsinfo_state) NS_AVAILABLE_MAC(10_10_3) {
+ switch (nsinfo_state) {
+ case NSProcessInfoThermalStateNominal:
+ return base::PowerObserver::DeviceThermalState::kNominal;
+ case NSProcessInfoThermalStateFair:
+ return base::PowerObserver::DeviceThermalState::kFair;
+ case NSProcessInfoThermalStateSerious:
+ return base::PowerObserver::DeviceThermalState::kSerious;
+ case NSProcessInfoThermalStateCritical:
+ return base::PowerObserver::DeviceThermalState::kCritical;
+ }
+ NOTREACHED();
+ return base::PowerObserver::DeviceThermalState::kUnknown;
+}
+}
+
+namespace base {
+
+ThermalStateObserverMac::ThermalStateObserverMac(
+ StateUpdateCallback state_update_callback) NS_AVAILABLE_MAC(10_10_3) {
+ auto on_state_change_block = ^(NSNotification* notification) {
+ auto state = PowerObserver::DeviceThermalState::kUnknown;
+ // |thermalState| is basically a scale of power usage and its associated
+ // thermal dissipation increase, from Nominal upwards, see:
+ // https://developer.apple.com/library/archive/documentation/Performance/Conceptual/power_efficiency_guidelines_osx/RespondToThermalStateChanges.html
+ NSProcessInfoThermalState nsinfo_state =
+ [[NSProcessInfo processInfo] thermalState];
+ state = NSProcessInfoThermalStateToDeviceThermalState(nsinfo_state);
+ if (state_for_testing_ != PowerObserver::DeviceThermalState::kUnknown)
+ state = state_for_testing_;
+ DVLOG(1) << __func__ << ": "
+ << PowerMonitorSource::DeviceThermalStateToString(state);
+ state_update_callback.Run(state);
+ };
+
+ thermal_state_update_observer_ = [[NSNotificationCenter defaultCenter]
+ addObserverForName:NSProcessInfoThermalStateDidChangeNotification
+ object:nil
+ queue:nil
+ usingBlock:on_state_change_block];
+
+ // Force a first call to grab the current status.
+ on_state_change_block(nil);
+}
+
+ThermalStateObserverMac::~ThermalStateObserverMac() {
+ [[NSNotificationCenter defaultCenter]
+ removeObserver:thermal_state_update_observer_];
+}
+
+PowerObserver::DeviceThermalState
+ThermalStateObserverMac::GetCurrentThermalState() NS_AVAILABLE_MAC(10_10_3) {
+ if (state_for_testing_ != PowerObserver::DeviceThermalState::kUnknown)
+ return state_for_testing_;
+ NSProcessInfoThermalState nsinfo_state =
+ [[NSProcessInfo processInfo] thermalState];
+ return NSProcessInfoThermalStateToDeviceThermalState(nsinfo_state);
+}
+}
diff --git a/chromium/base/power_monitor/thermal_state_observer_mac_unittest.mm b/chromium/base/power_monitor/thermal_state_observer_mac_unittest.mm
new file mode 100644
index 00000000000..3ecaae78e69
--- /dev/null
+++ b/chromium/base/power_monitor/thermal_state_observer_mac_unittest.mm
@@ -0,0 +1,52 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/power_monitor/thermal_state_observer_mac.h"
+
+#include <memory>
+#include <queue>
+
+#import <Foundation/Foundation.h>
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using DeviceThermalState = base::PowerObserver::DeviceThermalState;
+
+namespace base {
+
+class ThermalStateObserverMacTest : public testing::Test {
+ public:
+ ThermalStateObserverMacTest() = default;
+ ~ThermalStateObserverMacTest() override = default;
+
+ void OnStateChange(DeviceThermalState state) { state_history_.push(state); }
+
+ std::queue<DeviceThermalState> state_history_;
+ std::unique_ptr<ThermalStateObserverMac> thermal_state_observer_;
+};
+
+// Verifies that a NSProcessInfoThermalStateDidChangeNotification produces the
+// adequate OnStateChange() call.
+TEST_F(ThermalStateObserverMacTest, StateChange) NS_AVAILABLE_MAC(10_10_3) {
+ EXPECT_TRUE(state_history_.empty());
+
+ // ThermalStateObserverMac sends the current thermal state on construction.
+ thermal_state_observer_ =
+ std::make_unique<ThermalStateObserverMac>(BindRepeating(
+ &ThermalStateObserverMacTest::OnStateChange, Unretained(this)));
+ EXPECT_EQ(state_history_.size(), 1u);
+ state_history_.pop();
+
+ thermal_state_observer_->state_for_testing_ = DeviceThermalState::kCritical;
+ [NSNotificationCenter.defaultCenter
+ postNotificationName:NSProcessInfoThermalStateDidChangeNotification
+ object:nil
+ userInfo:nil];
+ EXPECT_EQ(state_history_.size(), 1u);
+ EXPECT_EQ(state_history_.front(), DeviceThermalState::kCritical);
+}
+
+} // namespace base
diff --git a/chromium/base/process/internal_linux.cc b/chromium/base/process/internal_linux.cc
index 35b491738af..41cae833a24 100644
--- a/chromium/base/process/internal_linux.cc
+++ b/chromium/base/process/internal_linux.cc
@@ -13,6 +13,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
@@ -56,6 +57,7 @@ pid_t ProcDirSlotToPid(const char* d_name) {
}
bool ReadProcFile(const FilePath& file, std::string* buffer) {
+ DCHECK(FilePath(kProcDir).IsParent(file));
buffer->clear();
// Synchronously reading files in /proc is safe.
ThreadRestrictions::ScopedAllowIO allow_io;
diff --git a/chromium/base/process/internal_linux.h b/chromium/base/process/internal_linux.h
index d8904fd1102..dd6657ac795 100644
--- a/chromium/base/process/internal_linux.h
+++ b/chromium/base/process/internal_linux.h
@@ -30,6 +30,11 @@ extern const char kStatFile[];
// Returns a FilePath to "/proc/pid".
base::FilePath GetProcPidDir(pid_t pid);
+// Reads a file from /proc into a string. This is allowed on any thread as
+// reading from /proc does not hit the disk. Returns true if the file can be
+// read and is non-empty.
+bool ReadProcFile(const FilePath& file, std::string* buffer);
+
// Take a /proc directory entry named |d_name|, and if it is the directory for
// a process, convert it to a pid_t.
// Returns 0 on failure.
diff --git a/chromium/base/process/kill_fuchsia.cc b/chromium/base/process/kill_fuchsia.cc
index 2684a14756d..1b7bbd6a283 100644
--- a/chromium/base/process/kill_fuchsia.cc
+++ b/chromium/base/process/kill_fuchsia.cc
@@ -6,6 +6,7 @@
#include <zircon/syscalls.h>
+#include "base/logging.h"
#include "base/process/process_iterator.h"
#include "base/task/post_task.h"
#include "base/threading/platform_thread.h"
diff --git a/chromium/base/process/kill_win.cc b/chromium/base/process/kill_win.cc
index 3b85dea1cd4..61e8ed5c86c 100644
--- a/chromium/base/process/kill_win.cc
+++ b/chromium/base/process/kill_win.cc
@@ -12,6 +12,7 @@
#include "base/logging.h"
#include "base/macros.h"
+#include "base/notreached.h"
#include "base/process/memory.h"
#include "base/process/process_iterator.h"
diff --git a/chromium/base/process/launch_mac.cc b/chromium/base/process/launch_mac.cc
index 6fc6ba115fb..9e523d39181 100644
--- a/chromium/base/process/launch_mac.cc
+++ b/chromium/base/process/launch_mac.cc
@@ -19,7 +19,7 @@
#include "base/process/environment_internal.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
extern "C" {
// Changes the current thread's directory to a path or directory file
diff --git a/chromium/base/process/launch_posix.cc b/chromium/base/process/launch_posix.cc
index 9b7573fdc65..8ad7aa154fa 100644
--- a/chromium/base/process/launch_posix.cc
+++ b/chromium/base/process/launch_posix.cc
@@ -45,7 +45,7 @@
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if defined(OS_LINUX) || defined(OS_AIX)
diff --git a/chromium/base/process/memory_linux.cc b/chromium/base/process/memory_linux.cc
index 6a6091a6047..ac8ffe1c243 100644
--- a/chromium/base/process/memory_linux.cc
+++ b/chromium/base/process/memory_linux.cc
@@ -132,11 +132,11 @@ bool UncheckedMalloc(size_t size, void** result) {
#if BUILDFLAG(USE_ALLOCATOR_SHIM)
*result = allocator::UncheckedAlloc(size);
#elif defined(MEMORY_TOOL_REPLACES_ALLOCATOR) || \
- (!defined(LIBC_GLIBC) && !defined(USE_TCMALLOC))
+ (!defined(LIBC_GLIBC) && !BUILDFLAG(USE_TCMALLOC))
*result = malloc(size);
-#elif defined(LIBC_GLIBC) && !defined(USE_TCMALLOC)
+#elif defined(LIBC_GLIBC) && !BUILDFLAG(USE_TCMALLOC)
*result = __libc_malloc(size);
-#elif defined(USE_TCMALLOC)
+#elif BUILDFLAG(USE_TCMALLOC)
*result = tc_malloc_skip_new_handler(size);
#endif
return *result != nullptr;
diff --git a/chromium/base/process/process_handle.cc b/chromium/base/process/process_handle.cc
index 7b130b40b14..fcaa129126c 100644
--- a/chromium/base/process/process_handle.cc
+++ b/chromium/base/process/process_handle.cc
@@ -6,6 +6,8 @@
#include <stdint.h>
+#include <ostream>
+
#include "base/check.h"
#include "build/build_config.h"
diff --git a/chromium/base/process/process_info_win.cc b/chromium/base/process/process_info_win.cc
index 05d0f21a930..594fd1d112d 100644
--- a/chromium/base/process/process_info_win.cc
+++ b/chromium/base/process/process_info_win.cc
@@ -9,6 +9,7 @@
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/notreached.h"
#include "base/time/time.h"
#include "base/win/scoped_handle.h"
diff --git a/chromium/base/process/process_iterator_linux.cc b/chromium/base/process/process_iterator_linux.cc
index 39313b2bb55..fd1821a2670 100644
--- a/chromium/base/process/process_iterator_linux.cc
+++ b/chromium/base/process/process_iterator_linux.cc
@@ -8,6 +8,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
+#include "base/notreached.h"
#include "base/process/internal_linux.h"
#include "base/strings/string_split.h"
#include "base/strings/string_util.h"
diff --git a/chromium/base/process/process_metrics.h b/chromium/base/process/process_metrics.h
index 2b7cfe14f73..be23db0f928 100644
--- a/chromium/base/process/process_metrics.h
+++ b/chromium/base/process/process_metrics.h
@@ -13,11 +13,14 @@
#include <memory>
#include <string>
+#include <utility>
+#include <vector>
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/process/process_handle.h"
+#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "base/values.h"
#include "build/build_config.h"
@@ -118,6 +121,17 @@ class BASE_EXPORT ProcessMetrics {
// will result in a time delta of 2 seconds/per 1 wall-clock second.
TimeDelta GetCumulativeCPUUsage();
+ // Emits the cumulative CPU usage for all currently active threads since they
+ // were started into the output parameter (replacing its current contents).
+ // Threads that have already terminated will not be reported. Thus, the sum of
+ // these times may not equal the value returned by GetCumulativeCPUUsage().
+ // Returns false on failure. We return the usage via an output parameter to
+ // allow reuse of CPUUsagePerThread's std::vector by the caller, e.g. to avoid
+ // allocations between repeated calls to method.
+ // NOTE: Currently only supported on Linux/Android.
+ using CPUUsagePerThread = std::vector<std::pair<PlatformThreadId, TimeDelta>>;
+ bool GetCumulativeCPUUsagePerThread(CPUUsagePerThread&);
+
// Returns the number of average idle cpu wakeups per second since the last
// call.
int GetIdleWakeupsPerSecond();
diff --git a/chromium/base/process/process_metrics_freebsd.cc b/chromium/base/process/process_metrics_freebsd.cc
index a552c033dd8..3fc3160cc1e 100644
--- a/chromium/base/process/process_metrics_freebsd.cc
+++ b/chromium/base/process/process_metrics_freebsd.cc
@@ -42,6 +42,11 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return TimeDelta();
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ NOTREACHED();
+ return false;
+}
+
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
return false;
}
diff --git a/chromium/base/process/process_metrics_fuchsia.cc b/chromium/base/process/process_metrics_fuchsia.cc
index 3c7d14cd84e..fc9b0c46eee 100644
--- a/chromium/base/process/process_metrics_fuchsia.cc
+++ b/chromium/base/process/process_metrics_fuchsia.cc
@@ -6,6 +6,8 @@
#include <lib/fdio/limits.h>
+#include "base/notreached.h"
+
namespace base {
size_t GetMaxFds() {
@@ -35,6 +37,12 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return TimeDelta();
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ // TODO(https://crbug.com/926581).
+ NOTIMPLEMENTED();
+ return false;
+}
+
bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
// TODO(https://crbug.com/926581).
return false;
diff --git a/chromium/base/process/process_metrics_ios.cc b/chromium/base/process/process_metrics_ios.cc
index ae36b7d4fc6..6e466801f3b 100644
--- a/chromium/base/process/process_metrics_ios.cc
+++ b/chromium/base/process/process_metrics_ios.cc
@@ -32,6 +32,11 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return TimeDelta();
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
size_t GetMaxFds() {
static const rlim_t kSystemDefaultMaxFds = 256;
rlim_t max_fds;
diff --git a/chromium/base/process/process_metrics_linux.cc b/chromium/base/process/process_metrics_linux.cc
index 7ffccec2b3e..99bc54bc32b 100644
--- a/chromium/base/process/process_metrics_linux.cc
+++ b/chromium/base/process/process_metrics_linux.cc
@@ -18,6 +18,7 @@
#include "base/files/file_util.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
+#include "base/notreached.h"
#include "base/optional.h"
#include "base/process/internal_linux.h"
#include "base/process/process_metrics_iocounters.h"
@@ -60,13 +61,9 @@ bool ReadProcFileToTrimmedStringPairs(pid_t pid,
StringPiece filename,
StringPairs* key_value_pairs) {
std::string status_data;
- {
- // Synchronously reading files in /proc does not hit the disk.
- ThreadRestrictions::ScopedAllowIO allow_io;
- FilePath status_file = internal::GetProcPidDir(pid).Append(filename);
- if (!ReadFileToString(status_file, &status_data))
- return false;
- }
+ FilePath status_file = internal::GetProcPidDir(pid).Append(filename);
+ if (!internal::ReadProcFile(status_file, &status_data))
+ return false;
SplitStringIntoKeyValuePairs(status_data, ':', '\n', key_value_pairs);
TrimKeyValuePairs(key_value_pairs);
return true;
@@ -130,6 +127,13 @@ bool ReadProcStatusAndGetFieldAsUint64(pid_t pid,
}
#endif // defined(OS_LINUX) || defined(OS_AIX)
+// Get the total CPU from a proc stat buffer. Return value is number of jiffies
+// on success or 0 if parsing failed.
+int64_t ParseTotalCPUTimeFromStats(const std::vector<std::string>& proc_stats) {
+ return internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_UTIME) +
+ internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_STIME);
+}
+
// Get the total CPU of a single process. Return value is number of jiffies
// on success or -1 on error.
int64_t GetProcessCPU(pid_t pid) {
@@ -140,11 +144,7 @@ int64_t GetProcessCPU(pid_t pid) {
return -1;
}
- int64_t total_cpu =
- internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_UTIME) +
- internal::GetProcStatsFieldAsInt64(proc_stats, internal::VM_STIME);
-
- return total_cpu;
+ return ParseTotalCPUTimeFromStats(proc_stats);
}
#if defined(OS_CHROMEOS)
@@ -175,7 +175,7 @@ void ReadChromeOSGraphicsMemory(SystemMemoryInfoKB* meminfo) {
// Incorporate Mali graphics memory if present.
FilePath mali_memory_file("/sys/class/misc/mali0/device/memory");
std::string mali_memory_data;
- if (ReadFileToString(mali_memory_file, &mali_memory_data)) {
+ if (ReadFileToStringNonBlocking(mali_memory_file, &mali_memory_data)) {
long long mali_size = -1;
int num_res = sscanf(mali_memory_data.c_str(), "%lld bytes", &mali_size);
if (num_res == 1)
@@ -202,6 +202,43 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return internal::ClockTicksToTimeDelta(GetProcessCPU(process_));
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(
+ CPUUsagePerThread& cpu_per_thread) {
+ cpu_per_thread.clear();
+
+ // Iterate through the different threads tracked in /proc/<pid>/task.
+ FilePath fd_path = internal::GetProcPidDir(process_).Append("task");
+
+ DirReaderPosix dir_reader(fd_path.value().c_str());
+ if (!dir_reader.IsValid())
+ return false;
+
+ for (; dir_reader.Next();) {
+ const char* tid_str = dir_reader.name();
+ if (strcmp(tid_str, ".") == 0 || strcmp(tid_str, "..") == 0)
+ continue;
+
+ PlatformThreadId tid;
+ if (!StringToInt(tid_str, &tid))
+ continue;
+
+ FilePath thread_stat_path = fd_path.Append(tid_str).Append("stat");
+
+ std::string buffer;
+ std::vector<std::string> proc_stats;
+ if (!internal::ReadProcFile(thread_stat_path, &buffer) ||
+ !internal::ParseProcStats(buffer, &proc_stats)) {
+ continue;
+ }
+
+ TimeDelta thread_time =
+ internal::ClockTicksToTimeDelta(ParseTotalCPUTimeFromStats(proc_stats));
+ cpu_per_thread.emplace_back(tid, thread_time);
+ }
+
+ return !cpu_per_thread.empty();
+}
+
// For the /proc/self/io file to exist, the Linux kernel must have
// CONFIG_TASK_IO_ACCOUNTING enabled.
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
@@ -280,7 +317,7 @@ int ProcessMetrics::GetOpenFdSoftLimit() const {
FilePath fd_path = internal::GetProcPidDir(process_).Append("limits");
std::string limits_contents;
- if (!ReadFileToString(fd_path, &limits_contents))
+ if (!ReadFileToStringNonBlocking(fd_path, &limits_contents))
return -1;
for (const auto& line : SplitStringPiece(
@@ -556,7 +593,7 @@ bool GetSystemMemoryInfo(SystemMemoryInfoKB* meminfo) {
// Used memory is: total - free - buffers - caches
FilePath meminfo_file("/proc/meminfo");
std::string meminfo_data;
- if (!ReadFileToString(meminfo_file, &meminfo_data)) {
+ if (!ReadFileToStringNonBlocking(meminfo_file, &meminfo_data)) {
DLOG(WARNING) << "Failed to open " << meminfo_file.value();
return false;
}
@@ -587,7 +624,7 @@ bool GetVmStatInfo(VmStatInfo* vmstat) {
FilePath vmstat_file("/proc/vmstat");
std::string vmstat_data;
- if (!ReadFileToString(vmstat_file, &vmstat_data)) {
+ if (!ReadFileToStringNonBlocking(vmstat_file, &vmstat_data)) {
DLOG(WARNING) << "Failed to open " << vmstat_file.value();
return false;
}
@@ -666,7 +703,7 @@ bool GetSystemDiskInfo(SystemDiskInfo* diskinfo) {
FilePath diskinfo_file("/proc/diskstats");
std::string diskinfo_data;
- if (!ReadFileToString(diskinfo_file, &diskinfo_data)) {
+ if (!ReadFileToStringNonBlocking(diskinfo_file, &diskinfo_data)) {
DLOG(WARNING) << "Failed to open " << diskinfo_file.value();
return false;
}
@@ -866,7 +903,7 @@ bool GetSwapInfoImpl(SwapInfo* swap_info) {
}
std::string mm_stat_data;
- if (!ReadFileToString(zram_mm_stat_file, &mm_stat_data)) {
+ if (!ReadFileToStringNonBlocking(zram_mm_stat_file, &mm_stat_data)) {
DLOG(WARNING) << "Failed to open " << zram_mm_stat_file.value();
return false;
}
@@ -879,7 +916,7 @@ bool GetSwapInfoImpl(SwapInfo* swap_info) {
FilePath zram_stat_file("/sys/block/zram0/stat");
std::string stat_data;
- if (!ReadFileToString(zram_stat_file, &stat_data)) {
+ if (!ReadFileToStringNonBlocking(zram_stat_file, &stat_data)) {
DLOG(WARNING) << "Failed to open " << zram_stat_file.value();
return false;
}
diff --git a/chromium/base/process/process_metrics_mac.cc b/chromium/base/process/process_metrics_mac.cc
index d0f35471cb7..33b10d8563f 100644
--- a/chromium/base/process/process_metrics_mac.cc
+++ b/chromium/base/process/process_metrics_mac.cc
@@ -154,6 +154,11 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return TimeDelta::FromMicroseconds(TimeValToMicroseconds(task_timeval));
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
int ProcessMetrics::GetPackageIdleWakeupsPerSecond() {
mach_port_t task = TaskForPid(process_);
task_power_info power_info_data;
diff --git a/chromium/base/process/process_metrics_openbsd.cc b/chromium/base/process/process_metrics_openbsd.cc
index 0fb41b6ff3a..0399f8d4441 100644
--- a/chromium/base/process/process_metrics_openbsd.cc
+++ b/chromium/base/process/process_metrics_openbsd.cc
@@ -64,6 +64,11 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
return TimeDelta();
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ NOTREACHED();
+ return false;
+}
+
ProcessMetrics::ProcessMetrics(ProcessHandle process)
: process_(process),
last_cpu_(0) {}
diff --git a/chromium/base/process/process_metrics_unittest.cc b/chromium/base/process/process_metrics_unittest.cc
index 808a7be09d0..c269a4713f7 100644
--- a/chromium/base/process/process_metrics_unittest.cc
+++ b/chromium/base/process/process_metrics_unittest.cc
@@ -35,7 +35,8 @@
namespace base {
namespace debug {
-#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN)
+#if defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN) || \
+ defined(OS_ANDROID)
namespace {
void BusyWork(std::vector<std::string>* vec) {
@@ -47,7 +48,8 @@ void BusyWork(std::vector<std::string>* vec) {
}
} // namespace
-#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+#endif // defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN) ||
+ // defined(OS_ANDROID)
// Tests for SystemMetrics.
// Exists as a class so it can be a friend of SystemMetrics.
@@ -376,7 +378,7 @@ TEST_F(SystemMetricsTest, TestNoNegativeCpuUsage) {
EXPECT_GE(metrics->GetPlatformIndependentCPUUsage(), 0.0);
}
-#endif // defined(OS_LINUX) || defined(OS_CHROMEOS)
+#endif // defined(OS_LINUX) || defined(OS_CHROMEOS) || defined(OS_WIN)
#if defined(OS_CHROMEOS)
TEST_F(SystemMetricsTest, ParseZramMmStat) {
@@ -646,6 +648,7 @@ TEST(ProcessMetricsTest, GetOpenFdCount) {
#endif // defined(OS_LINUX) || (defined(OS_MACOSX) && !defined(OS_IOS))
#if defined(OS_ANDROID) || defined(OS_LINUX)
+
TEST(ProcessMetricsTestLinux, GetPageFaultCounts) {
std::unique_ptr<base::ProcessMetrics> process_metrics(
base::ProcessMetrics::CreateProcessMetrics(
@@ -676,6 +679,66 @@ TEST(ProcessMetricsTestLinux, GetPageFaultCounts) {
ASSERT_GT(counts_after.minor, counts.minor);
ASSERT_GE(counts_after.major, counts.major);
}
+
+TEST(ProcessMetricsTestLinux, GetCumulativeCPUUsagePerThread) {
+ ProcessHandle handle = GetCurrentProcessHandle();
+ std::unique_ptr<ProcessMetrics> metrics(
+ ProcessMetrics::CreateProcessMetrics(handle));
+
+ Thread thread1("thread1");
+ thread1.StartAndWaitForTesting();
+ ASSERT_TRUE(thread1.IsRunning());
+
+ std::vector<std::string> vec1;
+ thread1.task_runner()->PostTask(FROM_HERE, BindOnce(&BusyWork, &vec1));
+
+ ProcessMetrics::CPUUsagePerThread prev_thread_times;
+ EXPECT_TRUE(metrics->GetCumulativeCPUUsagePerThread(prev_thread_times));
+
+ // Should have at least the test runner thread and the thread spawned above.
+ EXPECT_GE(prev_thread_times.size(), 2u);
+ EXPECT_TRUE(std::any_of(
+ prev_thread_times.begin(), prev_thread_times.end(),
+ [&thread1](const std::pair<PlatformThreadId, base::TimeDelta>& entry) {
+ return entry.first == thread1.GetThreadId();
+ }));
+ EXPECT_TRUE(std::any_of(
+ prev_thread_times.begin(), prev_thread_times.end(),
+ [](const std::pair<PlatformThreadId, base::TimeDelta>& entry) {
+ return entry.first == base::PlatformThread::CurrentId();
+ }));
+
+ for (const auto& entry : prev_thread_times) {
+ EXPECT_GE(entry.second, base::TimeDelta());
+ }
+
+ thread1.Stop();
+
+ ProcessMetrics::CPUUsagePerThread current_thread_times;
+ EXPECT_TRUE(metrics->GetCumulativeCPUUsagePerThread(current_thread_times));
+
+ // The stopped thread may still be reported until the kernel cleans it up.
+ EXPECT_GE(prev_thread_times.size(), 1u);
+ EXPECT_TRUE(std::any_of(
+ current_thread_times.begin(), current_thread_times.end(),
+ [](const std::pair<PlatformThreadId, base::TimeDelta>& entry) {
+ return entry.first == base::PlatformThread::CurrentId();
+ }));
+
+ // Reported times should not decrease.
+ for (const auto& entry : current_thread_times) {
+ auto prev_it = std::find_if(
+ prev_thread_times.begin(), prev_thread_times.end(),
+ [&entry](
+ const std::pair<PlatformThreadId, base::TimeDelta>& prev_entry) {
+ return entry.first == prev_entry.first;
+ });
+
+ if (prev_it != prev_thread_times.end())
+ EXPECT_GE(entry.second, prev_it->second);
+ }
+}
+
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_WIN)
diff --git a/chromium/base/process/process_metrics_win.cc b/chromium/base/process/process_metrics_win.cc
index 5cf0943c654..3cd51852a33 100644
--- a/chromium/base/process/process_metrics_win.cc
+++ b/chromium/base/process/process_metrics_win.cc
@@ -162,6 +162,11 @@ TimeDelta ProcessMetrics::GetCumulativeCPUUsage() {
TimeDelta::FromFileTime(user_time);
}
+bool ProcessMetrics::GetCumulativeCPUUsagePerThread(CPUUsagePerThread&) {
+ NOTIMPLEMENTED();
+ return false;
+}
+
bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
if (!process_.IsValid())
return false;
diff --git a/chromium/base/profiler/chrome_unwinder_android.cc b/chromium/base/profiler/chrome_unwinder_android.cc
index 8eea2a8b5c8..4e19111fa46 100644
--- a/chromium/base/profiler/chrome_unwinder_android.cc
+++ b/chromium/base/profiler/chrome_unwinder_android.cc
@@ -13,16 +13,17 @@ namespace base {
ChromeUnwinderAndroid::ChromeUnwinderAndroid(
const ArmCFITable* cfi_table,
- const ModuleCache::Module* chrome_module)
- : cfi_table_(cfi_table), chrome_module_(chrome_module) {
+ uintptr_t chrome_module_base_address)
+ : cfi_table_(cfi_table),
+ chrome_module_base_address_(chrome_module_base_address) {
DCHECK(cfi_table_);
- DCHECK(chrome_module_);
}
ChromeUnwinderAndroid::~ChromeUnwinderAndroid() = default;
bool ChromeUnwinderAndroid::CanUnwindFrom(const Frame& current_frame) const {
- return current_frame.module == chrome_module_;
+ return current_frame.module &&
+ current_frame.module->GetBaseAddress() == chrome_module_base_address_;
}
UnwindResult ChromeUnwinderAndroid::TryUnwind(RegisterContext* thread_context,
diff --git a/chromium/base/profiler/chrome_unwinder_android.h b/chromium/base/profiler/chrome_unwinder_android.h
index b8f67894a5e..6894380c022 100644
--- a/chromium/base/profiler/chrome_unwinder_android.h
+++ b/chromium/base/profiler/chrome_unwinder_android.h
@@ -19,7 +19,7 @@ namespace base {
class BASE_EXPORT ChromeUnwinderAndroid : public Unwinder {
public:
ChromeUnwinderAndroid(const ArmCFITable* cfi_table,
- const ModuleCache::Module* chrome_module);
+ uintptr_t chrome_module_base_address);
~ChromeUnwinderAndroid() override;
ChromeUnwinderAndroid(const ChromeUnwinderAndroid&) = delete;
ChromeUnwinderAndroid& operator=(const ChromeUnwinderAndroid&) = delete;
@@ -43,7 +43,7 @@ class BASE_EXPORT ChromeUnwinderAndroid : public Unwinder {
const ArmCFITable::FrameEntry& entry);
const ArmCFITable* cfi_table_;
- const ModuleCache::Module* const chrome_module_;
+ const uintptr_t chrome_module_base_address_;
};
} // namespace base
diff --git a/chromium/base/profiler/chrome_unwinder_android_unittest.cc b/chromium/base/profiler/chrome_unwinder_android_unittest.cc
index e74a710497e..9e8b1bb4c9f 100644
--- a/chromium/base/profiler/chrome_unwinder_android_unittest.cc
+++ b/chromium/base/profiler/chrome_unwinder_android_unittest.cc
@@ -213,7 +213,8 @@ TEST(ChromeUnwinderAndroidTest, CanUnwindFrom) {
auto non_chrome_module =
std::make_unique<TestModule>(0x2000, 0x500, "OtherModule");
- ChromeUnwinderAndroid unwinder(cfi_table.get(), chrome_module.get());
+ ChromeUnwinderAndroid unwinder(cfi_table.get(),
+ chrome_module->GetBaseAddress());
EXPECT_TRUE(unwinder.CanUnwindFrom({0x1100, chrome_module.get()}));
EXPECT_FALSE(unwinder.CanUnwindFrom({0x2100, non_chrome_module.get()}));
@@ -227,7 +228,8 @@ TEST(ChromeUnwinderAndroidTest, TryUnwind) {
const ModuleCache::Module* chrome_module = AddNativeModule(
&module_cache, std::make_unique<TestModule>(0x1000, 0x500));
- ChromeUnwinderAndroid unwinder(cfi_table.get(), chrome_module);
+ ChromeUnwinderAndroid unwinder(cfi_table.get(),
+ chrome_module->GetBaseAddress());
std::vector<uintptr_t> stack_buffer = {
0xFFFF,
@@ -264,7 +266,8 @@ TEST(ChromeUnwinderAndroidTest, TryUnwindAbort) {
const ModuleCache::Module* chrome_module = AddNativeModule(
&module_cache, std::make_unique<TestModule>(0x1000, 0x500));
- ChromeUnwinderAndroid unwinder(cfi_table.get(), chrome_module);
+ ChromeUnwinderAndroid unwinder(cfi_table.get(),
+ chrome_module->GetBaseAddress());
std::vector<uintptr_t> stack_buffer = {
0xFFFF,
@@ -295,7 +298,8 @@ TEST(ChromeUnwinderAndroidTest, TryUnwindNoData) {
const ModuleCache::Module* chrome_module = AddNativeModule(
&module_cache, std::make_unique<TestModule>(0x1000, 0x500));
- ChromeUnwinderAndroid unwinder(cfi_table.get(), chrome_module);
+ ChromeUnwinderAndroid unwinder(cfi_table.get(),
+ chrome_module->GetBaseAddress());
std::vector<uintptr_t> stack_buffer = {0xFFFF};
diff --git a/chromium/base/profiler/metadata_recorder.cc b/chromium/base/profiler/metadata_recorder.cc
index a6e3cc22dde..f140c2547f6 100644
--- a/chromium/base/profiler/metadata_recorder.cc
+++ b/chromium/base/profiler/metadata_recorder.cc
@@ -113,15 +113,10 @@ MetadataRecorder::MetadataProvider::~MetadataProvider() = default;
size_t MetadataRecorder::MetadataProvider::GetItems(
ItemArray* const items) const {
- // Assertion is only necessary so that thread annotations recognize that
- // |read_lock_| is acquired.
- metadata_recorder_->read_lock_.AssertAcquired();
return metadata_recorder_->GetItems(items);
}
size_t MetadataRecorder::GetItems(ItemArray* const items) const {
- read_lock_.AssertAcquired();
-
// If a writer adds a new item after this load, it will be ignored. We do
// this instead of calling item_slots_used_.load() explicitly in the for loop
// bounds checking, which would be expensive.
diff --git a/chromium/base/profiler/metadata_recorder.h b/chromium/base/profiler/metadata_recorder.h
index 6e120dd40b7..86296348463 100644
--- a/chromium/base/profiler/metadata_recorder.h
+++ b/chromium/base/profiler/metadata_recorder.h
@@ -187,8 +187,10 @@ class BASE_EXPORT MetadataRecorder {
// Retrieves the first |available_slots| items in the metadata recorder and
// copies them into |items|, returning the number of metadata items that
// were copied. To ensure that all items can be copied, |available slots|
- // should be greater than or equal to |MAX_METADATA_COUNT|.
- size_t GetItems(ItemArray* const items) const;
+ // should be greater than or equal to |MAX_METADATA_COUNT|. Requires
+ // NO_THREAD_SAFETY_ANALYSIS because clang's analyzer doesn't understand the
+ // cross-class locking used in this class' implementation.
+ size_t GetItems(ItemArray* const items) const NO_THREAD_SAFETY_ANALYSIS;
private:
const MetadataRecorder* const metadata_recorder_;
diff --git a/chromium/base/profiler/native_unwinder_android.cc b/chromium/base/profiler/native_unwinder_android.cc
index fa06494aee2..c76a81959d4 100644
--- a/chromium/base/profiler/native_unwinder_android.cc
+++ b/chromium/base/profiler/native_unwinder_android.cc
@@ -4,21 +4,22 @@
#include "base/profiler/native_unwinder_android.h"
+#include <sys/mman.h>
+
#include <string>
#include <vector>
-#include <sys/mman.h>
-
#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Elf.h"
-#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h"
#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Regs.h"
#include "base/memory/ptr_util.h"
+#include "base/notreached.h"
#include "base/profiler/module_cache.h"
#include "base/profiler/native_unwinder.h"
#include "base/profiler/profile_builder.h"
-#include "base/profiler/unwindstack_internal_android.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/strings/string_piece.h"
#include "build/build_config.h"
#if defined(ARCH_CPU_ARM_FAMILY) && defined(ARCH_CPU_32_BITS)
@@ -32,20 +33,46 @@
namespace base {
namespace {
+// Returns the hex string build id given the binary build id from MapInfo.
+// Returns the empty string if no build id is present.
+//
+// Build IDs follow a cross-platform format consisting of two fields
+// concatenated together:
+// - the module's unique id encoded as a hex string, and
+// - the age suffix for incremental builds.
+//
+// On POSIX, the unique id comes from the ELF binary's .note.gnu.build-id
+// section. The age field is always 0.
+std::string EncodeBuildID(StringPiece map_info_build_id) {
+ if (map_info_build_id.empty())
+ return std::string();
+
+ return HexEncode(map_info_build_id.data(), map_info_build_id.size()) + "0";
+}
+
+// We assume this is a file-backed region if the name looks like an absolute
+// path, and return the basename. Otherwise we assume the region is not
+// associated with a file and return the full name string.
+FilePath CreateDebugBasename(StringPiece map_info_name) {
+ const FilePath path = FilePath(map_info_name);
+ return !map_info_name.empty() && map_info_name[0] == '/' ? path.BaseName()
+ : path;
+}
+
class AndroidModule : public ModuleCache::Module {
public:
AndroidModule(unwindstack::MapInfo* map_info)
: start_(map_info->start),
size_(map_info->end - map_info->start),
- build_id_(map_info->GetBuildID()),
- name_(map_info->name) {}
+ build_id_(EncodeBuildID(map_info->GetBuildID())),
+ debug_basename_(CreateDebugBasename(map_info->name)) {}
~AndroidModule() override = default;
uintptr_t GetBaseAddress() const override { return start_; }
std::string GetId() const override { return build_id_; }
- FilePath GetDebugBasename() const override { return FilePath(name_); }
+ FilePath GetDebugBasename() const override { return debug_basename_; }
// Gets the size of the module.
size_t GetSize() const override { return size_; }
@@ -56,7 +83,7 @@ class AndroidModule : public ModuleCache::Module {
const uintptr_t start_;
const size_t size_;
const std::string build_id_;
- const std::string name_;
+ const FilePath debug_basename_;
};
std::unique_ptr<unwindstack::Regs> CreateFromRegisterContext(
@@ -88,6 +115,23 @@ void CopyToRegisterContext(unwindstack::Regs* regs,
} // namespace
+UnwindStackMemoryAndroid::UnwindStackMemoryAndroid(uintptr_t stack_ptr,
+ uintptr_t stack_top)
+ : stack_ptr_(stack_ptr), stack_top_(stack_top) {
+ DCHECK_LE(stack_ptr_, stack_top_);
+}
+
+UnwindStackMemoryAndroid::~UnwindStackMemoryAndroid() = default;
+
+size_t UnwindStackMemoryAndroid::Read(uint64_t addr, void* dst, size_t size) {
+ if (addr < stack_ptr_)
+ return 0;
+ if (size >= stack_top_ || addr > stack_top_ - size)
+ return 0;
+ memcpy(dst, reinterpret_cast<void*>(addr), size);
+ return size;
+}
+
// static
std::unique_ptr<unwindstack::Maps> NativeUnwinderAndroid::CreateMaps() {
auto maps = std::make_unique<unwindstack::LocalMaps>();
@@ -99,19 +143,7 @@ std::unique_ptr<unwindstack::Maps> NativeUnwinderAndroid::CreateMaps() {
// static
std::unique_ptr<unwindstack::Memory>
NativeUnwinderAndroid::CreateProcessMemory() {
- return std::make_unique<unwindstack::MemoryLocal>();
-}
-
-void NativeUnwinderAndroid::AddInitialModulesFromMaps(
- const unwindstack::Maps& memory_regions_map,
- ModuleCache* module_cache) {
- for (const auto& region : memory_regions_map) {
- // Only add executable regions.
- if (!(region->flags & PROT_EXEC))
- continue;
- module_cache->AddCustomNativeModule(
- std::make_unique<AndroidModule>(region.get()));
- }
+ return unwindstack::Memory::CreateLocalProcessMemory();
}
NativeUnwinderAndroid::NativeUnwinderAndroid(
@@ -160,7 +192,8 @@ UnwindResult NativeUnwinderAndroid::TryUnwind(RegisterContext* thread_context,
uintptr_t rel_pc = elf->GetRelPc(cur_pc, map_info);
bool finished = false;
bool stepped =
- elf->Step(rel_pc, rel_pc, regs.get(), &stack_memory, &finished);
+ elf->StepIfSignalHandler(rel_pc, regs.get(), &stack_memory) ||
+ elf->Step(rel_pc, regs.get(), &stack_memory, &finished);
if (stepped && finished)
return UnwindResult::COMPLETED;
@@ -207,6 +240,19 @@ UnwindResult NativeUnwinderAndroid::TryUnwind(RegisterContext* thread_context,
return UnwindResult::UNRECOGNIZED_FRAME;
}
+// static
+void NativeUnwinderAndroid::AddInitialModulesFromMaps(
+ const unwindstack::Maps& memory_regions_map,
+ ModuleCache* module_cache) {
+ for (const auto& region : memory_regions_map) {
+ // Only add executable regions.
+ if (!(region->flags & PROT_EXEC))
+ continue;
+ module_cache->AddCustomNativeModule(
+ std::make_unique<AndroidModule>(region.get()));
+ }
+}
+
void NativeUnwinderAndroid::EmitDexFrame(uintptr_t dex_pc,
ModuleCache* module_cache,
std::vector<Frame>* stack) const {
diff --git a/chromium/base/profiler/native_unwinder_android.h b/chromium/base/profiler/native_unwinder_android.h
index 926a581b32a..62774e6adf5 100644
--- a/chromium/base/profiler/native_unwinder_android.h
+++ b/chromium/base/profiler/native_unwinder_android.h
@@ -6,14 +6,26 @@
#define BASE_PROFILER_NATIVE_UNWINDER_ANDROID_H_
#include "base/profiler/unwinder.h"
-
-namespace unwindstack {
-class Maps;
-class Memory;
-} // namespace unwindstack
+#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h"
+#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
namespace base {
+// Implementation of unwindstack::Memory that restricts memory access to a stack
+// buffer, used by NativeUnwinderAndroid. While unwinding, only memory accesses
+// within the stack should be performed to restore registers.
+class UnwindStackMemoryAndroid : public unwindstack::Memory {
+ public:
+ UnwindStackMemoryAndroid(uintptr_t stack_ptr, uintptr_t stack_top);
+ ~UnwindStackMemoryAndroid() override;
+
+ size_t Read(uint64_t addr, void* dst, size_t size) override;
+
+ private:
+ const uintptr_t stack_ptr_;
+ const uintptr_t stack_top_;
+};
+
// Native unwinder implementation for Android, using libunwindstack.
class NativeUnwinderAndroid : public Unwinder {
public:
@@ -22,10 +34,6 @@ class NativeUnwinderAndroid : public Unwinder {
// all profiles in a process.
static std::unique_ptr<unwindstack::Maps> CreateMaps();
static std::unique_ptr<unwindstack::Memory> CreateProcessMemory();
- // Adds modules found from executable loaded memory regions to |module_cache|.
- static void AddInitialModulesFromMaps(
- const unwindstack::Maps& memory_regions_map,
- ModuleCache* module_cache);
// |exclude_module_with_base_address| is used to exclude a specific module
// and let another unwinder take control. TryUnwind() will exit with
@@ -33,7 +41,7 @@ class NativeUnwinderAndroid : public Unwinder {
// encountered in that module.
NativeUnwinderAndroid(unwindstack::Maps* memory_regions_map,
unwindstack::Memory* process_memory,
- uintptr_t exclude_module_with_base_address = 0);
+ uintptr_t exclude_module_with_base_address);
~NativeUnwinderAndroid() override;
NativeUnwinderAndroid(const NativeUnwinderAndroid&) = delete;
@@ -47,6 +55,12 @@ class NativeUnwinderAndroid : public Unwinder {
ModuleCache* module_cache,
std::vector<Frame>* stack) const override;
+ // Adds modules found from executable loaded memory regions to |module_cache|.
+ // Public for test access.
+ static void AddInitialModulesFromMaps(
+ const unwindstack::Maps& memory_regions_map,
+ ModuleCache* module_cache);
+
private:
void EmitDexFrame(uintptr_t dex_pc,
ModuleCache* module_cache,
diff --git a/chromium/base/profiler/native_unwinder_android_unittest.cc b/chromium/base/profiler/native_unwinder_android_unittest.cc
index 236ab3ae6fc..c82a6e48f4c 100644
--- a/chromium/base/profiler/native_unwinder_android_unittest.cc
+++ b/chromium/base/profiler/native_unwinder_android_unittest.cc
@@ -4,7 +4,11 @@
#include "base/profiler/native_unwinder_android.h"
+#include <sys/mman.h>
+
+#include <stdio.h> // For printf address.
#include <string.h>
+#include <iterator>
#include "base/android/build_info.h"
#include "base/android/jni_android.h"
@@ -13,9 +17,9 @@
#include "base/profiler/register_context.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/stack_copier_signal.h"
+#include "base/profiler/stack_sampler.h"
#include "base/profiler/stack_sampling_profiler_test_util.h"
#include "base/profiler/thread_delegate_posix.h"
-#include "base/profiler/unwindstack_internal_android.h"
#include "base/test/bind_test_util.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -24,6 +28,27 @@ extern char __executable_start;
namespace base {
+namespace {
+
+// Add a MapInfo with the provided values to |maps|.
+void AddMapInfo(uint64_t start,
+ uint64_t end,
+ uint64_t offset,
+ uint64_t flags,
+ const std::string& name,
+ const std::string& binary_build_id,
+ unwindstack::Maps& maps) {
+ maps.Add(start, end, offset, flags, name, /* load_bias = */ 0u);
+ unwindstack::MapInfo& map_info = **std::prev(maps.end());
+ // Yes, this *is* how MapInfo wants this field set. The string is deleted in
+ // its destructor.
+ map_info.build_id =
+ reinterpret_cast<uintptr_t>(new std::string(binary_build_id));
+ map_info.elf_offset = map_info.offset;
+}
+
+} // namespace
+
class TestStackCopierDelegate : public StackCopier::Delegate {
public:
void OnStackCopy() override {}
@@ -362,4 +387,153 @@ TEST(NativeUnwinderAndroidTest, UnwindStackMemoryTest) {
check_read_succeeds(end - 1, 1);
}
+// Checks the debug basename for a module with a path name.
+TEST(NativeUnwinderAndroidTest, ModuleDebugBasenameForPath) {
+ unwindstack::Maps maps;
+
+ AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "/usr/lib/foo.so",
+ {0xAA}, maps);
+
+ ModuleCache module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(maps, &module_cache);
+
+ std::vector<const ModuleCache::Module*> modules = module_cache.GetModules();
+
+ ASSERT_EQ(1u, modules.size());
+ EXPECT_EQ("foo.so", modules[0]->GetDebugBasename().value());
+}
+
+// Checks the debug basename is the whole name for a module with a non-path
+// name.
+TEST(NativeUnwinderAndroidTest, ModuleDebugBasenameForNonPath) {
+ unwindstack::Maps maps;
+
+ AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "[foo / bar]", {0xAA},
+ maps);
+
+ ModuleCache module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(maps, &module_cache);
+
+ std::vector<const ModuleCache::Module*> modules = module_cache.GetModules();
+
+ ASSERT_EQ(1u, modules.size());
+ EXPECT_EQ("[foo / bar]", modules[0]->GetDebugBasename().value());
+}
+
+// Checks that the specified build id is returned.
+TEST(NativeUnwinderAndroidTest, ModuleId) {
+ unwindstack::Maps maps;
+
+ AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "/lib/foo.so",
+ {0x12, 0x34, 0x56, 0x78, 0x90, 0xAB, 0xCD, 0xEF}, maps);
+
+ ModuleCache module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(maps, &module_cache);
+
+ std::vector<const ModuleCache::Module*> modules = module_cache.GetModules();
+
+ ASSERT_EQ(1u, modules.size());
+ // The id should have a '0' age field appended.
+ EXPECT_EQ("1234567890ABCDEF0", modules[0]->GetId());
+}
+
+// Checks that an empty module id has no age field appended.
+TEST(NativeUnwinderAndroidTest, EmptyModuleId) {
+ unwindstack::Maps maps;
+
+ AddMapInfo(0x1000u, 0x2000u, 0u, PROT_READ | PROT_EXEC, "/lib/foo.so",
+ std::string(), maps);
+
+ ModuleCache module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(maps, &module_cache);
+
+ std::vector<const ModuleCache::Module*> modules = module_cache.GetModules();
+
+ ASSERT_EQ(1u, modules.size());
+ EXPECT_EQ(std::string(), modules[0]->GetId());
+}
+
+// ModuleCache::GetModuleForAddress() is not implemented for 64-bit arm.
+#if defined(ARCH_CPU_ARM64)
+#define MAYBE_ModuleState_SystemLibrary DISABLED_ModuleState_SystemLibrary
+#else
+#define MAYBE_ModuleState_SystemLibrary ModuleState_SystemLibrary
+#endif
+// Checks that the module state created by the unwinder is consistent with the
+// state created by the ModuleCache. Checks the module for a system library.
+TEST(NativeUnwinderAndroidTest, MAYBE_ModuleState_SystemLibrary) {
+ ModuleCache unwinder_module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(
+ *NativeUnwinderAndroid::CreateMaps(), &unwinder_module_cache);
+
+ const uintptr_t c_library_function_address =
+ reinterpret_cast<uintptr_t>(&printf);
+
+ std::vector<const ModuleCache::Module*> unwinder_modules =
+ unwinder_module_cache.GetModules();
+ const auto unwinder_module_loc = std::find_if(
+ unwinder_modules.begin(), unwinder_modules.end(),
+ [c_library_function_address](const ModuleCache::Module* module) {
+ return c_library_function_address >= module->GetBaseAddress() &&
+ c_library_function_address <
+ module->GetBaseAddress() + module->GetSize();
+ });
+ ASSERT_NE(unwinder_modules.end(), unwinder_module_loc);
+ const ModuleCache::Module* unwinder_module = *unwinder_module_loc;
+
+ ModuleCache reference_module_cache;
+ const ModuleCache::Module* reference_module =
+ reference_module_cache.GetModuleForAddress(c_library_function_address);
+ ASSERT_NE(nullptr, reference_module);
+
+ // TODO(https://crbug.com/1004855): Fix base address and size discrepancies
+ // and add checks.
+ EXPECT_EQ(reference_module->GetId(), unwinder_module->GetId());
+ EXPECT_EQ(reference_module->GetDebugBasename(),
+ unwinder_module->GetDebugBasename());
+}
+
+// ModuleCache::GetModuleForAddress() is not implemented for 64-bit arm.
+#if defined(ARCH_CPU_ARM64)
+#define MAYBE_ModuleState_ChromeLibrary DISABLED_ModuleState_ChromeLibrary
+#else
+#define MAYBE_ModuleState_ChromeLibrary ModuleState_ChromeLibrary
+#endif
+// Checks that the module state created by the unwinder is consistent with the
+// state created by the ModuleCache. Checks the module for a Chrome-compiled
+// library.
+TEST(NativeUnwinderAndroidTest, MAYBE_ModuleState_ChromeLibrary) {
+ ModuleCache unwinder_module_cache;
+ NativeUnwinderAndroid::AddInitialModulesFromMaps(
+ *NativeUnwinderAndroid::CreateMaps(), &unwinder_module_cache);
+
+ const uintptr_t chrome_function_address =
+ reinterpret_cast<uintptr_t>(&CaptureScenario);
+
+ std::vector<const ModuleCache::Module*> unwinder_modules =
+ unwinder_module_cache.GetModules();
+ const auto unwinder_module_loc = std::find_if(
+ unwinder_modules.begin(), unwinder_modules.end(),
+ [chrome_function_address](const ModuleCache::Module* module) {
+ return chrome_function_address >= module->GetBaseAddress() &&
+ chrome_function_address <
+ module->GetBaseAddress() + module->GetSize();
+ });
+ ASSERT_NE(unwinder_modules.end(), unwinder_module_loc);
+ const ModuleCache::Module* unwinder_module = *unwinder_module_loc;
+
+ ModuleCache reference_module_cache;
+ const ModuleCache::Module* reference_module =
+ reference_module_cache.GetModuleForAddress(chrome_function_address);
+ ASSERT_NE(nullptr, reference_module);
+
+ EXPECT_EQ(reference_module->GetBaseAddress(),
+ unwinder_module->GetBaseAddress());
+ EXPECT_NE("", unwinder_module->GetId());
+ EXPECT_EQ(reference_module->GetId(), unwinder_module->GetId());
+ EXPECT_EQ(reference_module->GetDebugBasename(),
+ unwinder_module->GetDebugBasename());
+ // TODO(https://crbug.com/1004855): Fix size discrepancy and add check.
+}
+
} // namespace base
diff --git a/chromium/base/profiler/stack_copier_signal.cc b/chromium/base/profiler/stack_copier_signal.cc
index bbef65bf520..5a7d8b97806 100644
--- a/chromium/base/profiler/stack_copier_signal.cc
+++ b/chromium/base/profiler/stack_copier_signal.cc
@@ -11,10 +11,11 @@
#include <atomic>
+#include "base/notreached.h"
#include "base/profiler/register_context.h"
#include "base/profiler/stack_buffer.h"
#include "base/profiler/suspendable_thread_delegate.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
diff --git a/chromium/base/profiler/stack_copier_unittest.cc b/chromium/base/profiler/stack_copier_unittest.cc
index 7119af446db..4eb6e7d8486 100644
--- a/chromium/base/profiler/stack_copier_unittest.cc
+++ b/chromium/base/profiler/stack_copier_unittest.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cstring>
#include <memory>
#include <numeric>
diff --git a/chromium/base/profiler/stack_sampler.h b/chromium/base/profiler/stack_sampler.h
index 9fc91051e85..9b4dfd971db 100644
--- a/chromium/base/profiler/stack_sampler.h
+++ b/chromium/base/profiler/stack_sampler.h
@@ -6,6 +6,7 @@
#define BASE_PROFILER_STACK_SAMPLER_H_
#include <memory>
+#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
@@ -28,12 +29,14 @@ class BASE_EXPORT StackSampler {
virtual ~StackSampler();
// Creates a stack sampler that records samples for thread with
- // |thread_token|. Returns null if this platform does not support stack
- // sampling.
+ // |thread_token|. Unwinders in |unwinders| must be stored in increasing
+ // priority to guide unwind attempts. Only the unwinder with the lowest
+ // priority is allowed to return with UnwindResult::COMPLETED. Returns null if
+ // this platform does not support stack sampling.
static std::unique_ptr<StackSampler> Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate);
// Gets the required size of the stack buffer.
@@ -47,9 +50,8 @@ class BASE_EXPORT StackSampler {
// thread being sampled).
// Adds an auxiliary unwinder to handle additional, non-native-code unwind
- // scenarios. When attempting to unwind, the relative priority of auxiliary
- // unwinders is the inverse of the order of insertion, and the native
- // unwinder is given the lowest priority
+ // scenarios. Unwinders must be inserted in increasing priority, following
+ // |unwinders| provided in Create(), to guide unwind attempts.
virtual void AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) = 0;
// Records a set of frames and returns them.
diff --git a/chromium/base/profiler/stack_sampler_android.cc b/chromium/base/profiler/stack_sampler_android.cc
index 8414de9e3f4..44df18b9a31 100644
--- a/chromium/base/profiler/stack_sampler_android.cc
+++ b/chromium/base/profiler/stack_sampler_android.cc
@@ -18,13 +18,15 @@ namespace base {
std::unique_ptr<StackSampler> StackSampler::Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate) {
- DCHECK(native_unwinder);
+ // |core_unwinders| must contain NativeUnwinderAndroid and
+ // ChromeUnwinderAndroid, respectively.
+ DCHECK_EQ(2U, core_unwinders.size());
return std::make_unique<StackSamplerImpl>(
std::make_unique<StackCopierSignal>(
std::make_unique<ThreadDelegatePosix>(thread_token)),
- std::move(native_unwinder), module_cache, test_delegate);
+ std::move(core_unwinders), module_cache, test_delegate);
}
size_t StackSampler::GetStackBufferSize() {
diff --git a/chromium/base/profiler/stack_sampler_impl.cc b/chromium/base/profiler/stack_sampler_impl.cc
index 9903b33feea..7b28a6036e0 100644
--- a/chromium/base/profiler/stack_sampler_impl.cc
+++ b/chromium/base/profiler/stack_sampler_impl.cc
@@ -4,11 +4,11 @@
#include "base/profiler/stack_sampler_impl.h"
+#include <iterator>
#include <utility>
#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/profiler/metadata_recorder.h"
#include "base/profiler/profile_builder.h"
#include "base/profiler/sample_metadata.h"
@@ -64,15 +64,22 @@ class StackCopierDelegate : public StackCopier::Delegate {
} // namespace
-StackSamplerImpl::StackSamplerImpl(std::unique_ptr<StackCopier> stack_copier,
- std::unique_ptr<Unwinder> native_unwinder,
- ModuleCache* module_cache,
- StackSamplerTestDelegate* test_delegate)
+// |core_unwinders| is iterated backward since |core_unwinders| is passed in
+// increasing priority order while |unwinders_| is stored in decreasing priority
+// order.
+StackSamplerImpl::StackSamplerImpl(
+ std::unique_ptr<StackCopier> stack_copier,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
+ ModuleCache* module_cache,
+ StackSamplerTestDelegate* test_delegate)
: stack_copier_(std::move(stack_copier)),
+ unwinders_(std::make_move_iterator(core_unwinders.rbegin()),
+ std::make_move_iterator(core_unwinders.rend())),
module_cache_(module_cache),
test_delegate_(test_delegate) {
- DCHECK(native_unwinder);
- unwinders_.push_front(std::move(native_unwinder));
+ DCHECK(!unwinders_.empty());
+ for (const auto& unwinder : unwinders_)
+ unwinder->AddInitialModules(module_cache_);
}
StackSamplerImpl::~StackSamplerImpl() = default;
@@ -89,6 +96,8 @@ void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer,
RegisterContext thread_context;
uintptr_t stack_top;
TimeTicks timestamp;
+
+ bool copy_stack_succeeded;
{
// Make this scope as small as possible because |metadata_provider| is
// holding a lock.
@@ -96,11 +105,15 @@ void StackSamplerImpl::RecordStackFrames(StackBuffer* stack_buffer,
GetSampleMetadataRecorder());
StackCopierDelegate delegate(&unwinders_, profile_builder,
&metadata_provider);
- bool success = stack_copier_->CopyStack(
+ copy_stack_succeeded = stack_copier_->CopyStack(
stack_buffer, &stack_top, &timestamp, &thread_context, &delegate);
- if (!success)
- return;
}
+ if (!copy_stack_succeeded) {
+ profile_builder->OnSampleCompleted(
+ {}, timestamp.is_null() ? TimeTicks::Now() : timestamp);
+ return;
+ }
+
for (const auto& unwinder : unwinders_)
unwinder->UpdateModules(module_cache_);
@@ -155,8 +168,8 @@ std::vector<Frame> StackSamplerImpl::WalkStack(
result = unwinder->get()->TryUnwind(thread_context, stack_top, module_cache,
&stack);
- // The native unwinder should be the only one that returns COMPLETED
- // since the stack starts in native code.
+ // The unwinder with the lowest priority should be the only one that returns
+ // COMPLETED since the stack starts in native code.
DCHECK(result != UnwindResult::COMPLETED ||
unwinder->get() == unwinders.back().get());
} while (result != UnwindResult::ABORTED &&
diff --git a/chromium/base/profiler/stack_sampler_impl.h b/chromium/base/profiler/stack_sampler_impl.h
index cceee652f12..c7944303254 100644
--- a/chromium/base/profiler/stack_sampler_impl.h
+++ b/chromium/base/profiler/stack_sampler_impl.h
@@ -23,7 +23,7 @@ class Unwinder;
class BASE_EXPORT StackSamplerImpl : public StackSampler {
public:
StackSamplerImpl(std::unique_ptr<StackCopier> stack_copier,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
ModuleCache* module_cache,
StackSamplerTestDelegate* test_delegate = nullptr);
~StackSamplerImpl() override;
diff --git a/chromium/base/profiler/stack_sampler_impl_unittest.cc b/chromium/base/profiler/stack_sampler_impl_unittest.cc
index e3cd67ce59b..48b12c299ec 100644
--- a/chromium/base/profiler/stack_sampler_impl_unittest.cc
+++ b/chromium/base/profiler/stack_sampler_impl_unittest.cc
@@ -268,14 +268,21 @@ class FakeTestUnwinder : public Unwinder {
std::vector<Result> results_;
};
-base::circular_deque<std::unique_ptr<Unwinder>> MakeUnwinderList(
+std::vector<std::unique_ptr<Unwinder>> MakeUnwinderVector(
+ std::unique_ptr<Unwinder> unwinder) {
+ std::vector<std::unique_ptr<Unwinder>> unwinders;
+ unwinders.push_back(std::move(unwinder));
+ return unwinders;
+}
+
+base::circular_deque<std::unique_ptr<Unwinder>> MakeUnwinderCircularDeque(
std::unique_ptr<Unwinder> native_unwinder,
std::unique_ptr<Unwinder> aux_unwinder) {
base::circular_deque<std::unique_ptr<Unwinder>> unwinders;
- if (aux_unwinder)
- unwinders.push_back(std::move(aux_unwinder));
if (native_unwinder)
- unwinders.push_back(std::move(native_unwinder));
+ unwinders.push_front(std::move(native_unwinder));
+ if (aux_unwinder)
+ unwinders.push_front(std::move(aux_unwinder));
return unwinders;
}
@@ -294,7 +301,9 @@ TEST(StackSamplerImplTest, MAYBE_CopyStack) {
std::vector<uintptr_t> stack_copy;
StackSamplerImpl stack_sampler_impl(
std::make_unique<TestStackCopier>(stack),
- std::make_unique<TestUnwinder>(stack.size(), &stack_copy), &module_cache);
+ MakeUnwinderVector(
+ std::make_unique<TestUnwinder>(stack.size(), &stack_copy)),
+ &module_cache);
std::unique_ptr<StackBuffer> stack_buffer =
std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t));
@@ -312,7 +321,9 @@ TEST(StackSamplerImplTest, CopyStackTimestamp) {
TimeTicks timestamp = TimeTicks::UnixEpoch();
StackSamplerImpl stack_sampler_impl(
std::make_unique<TestStackCopier>(stack, timestamp),
- std::make_unique<TestUnwinder>(stack.size(), &stack_copy), &module_cache);
+ MakeUnwinderVector(
+ std::make_unique<TestUnwinder>(stack.size(), &stack_copy)),
+ &module_cache);
std::unique_ptr<StackBuffer> stack_buffer =
std::make_unique<StackBuffer>(stack.size() * sizeof(uintptr_t));
@@ -330,7 +341,7 @@ TEST(StackSamplerImplTest, UnwinderInvokedWhileRecordingStackFrames) {
TestProfileBuilder profile_builder(&module_cache);
StackSamplerImpl stack_sampler_impl(
std::make_unique<DelegateInvokingStackCopier>(),
- std::move(owned_unwinder), &module_cache);
+ MakeUnwinderVector(std::move(owned_unwinder)), &module_cache);
stack_sampler_impl.RecordStackFrames(stack_buffer.get(), &profile_builder);
@@ -344,7 +355,8 @@ TEST(StackSamplerImplTest, AuxUnwinderInvokedWhileRecordingStackFrames) {
TestProfileBuilder profile_builder(&module_cache);
StackSamplerImpl stack_sampler_impl(
std::make_unique<DelegateInvokingStackCopier>(),
- std::make_unique<CallRecordingUnwinder>(), &module_cache);
+ MakeUnwinderVector(std::make_unique<CallRecordingUnwinder>()),
+ &module_cache);
auto owned_aux_unwinder = std::make_unique<CallRecordingUnwinder>();
CallRecordingUnwinder* aux_unwinder = owned_aux_unwinder.get();
@@ -367,7 +379,7 @@ TEST(StackSamplerImplTest, WalkStack_Completed) {
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(std::move(native_unwinder), nullptr));
+ MakeUnwinderCircularDeque(std::move(native_unwinder), nullptr));
ASSERT_EQ(2u, stack.size());
EXPECT_EQ(1u, stack[1].instruction_pointer);
@@ -384,7 +396,7 @@ TEST(StackSamplerImplTest, WalkStack_Aborted) {
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(std::move(native_unwinder), nullptr));
+ MakeUnwinderCircularDeque(std::move(native_unwinder), nullptr));
ASSERT_EQ(2u, stack.size());
EXPECT_EQ(1u, stack[1].instruction_pointer);
@@ -400,7 +412,7 @@ TEST(StackSamplerImplTest, WalkStack_NotUnwound) {
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(std::move(native_unwinder), nullptr));
+ MakeUnwinderCircularDeque(std::move(native_unwinder), nullptr));
ASSERT_EQ(1u, stack.size());
}
@@ -421,7 +433,7 @@ TEST(StackSamplerImplTest, WalkStack_AuxUnwind) {
WrapUnique(new FakeTestUnwinder({{UnwindResult::ABORTED, {1u}}}));
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(nullptr, std::move(aux_unwinder)));
+ MakeUnwinderCircularDeque(nullptr, std::move(aux_unwinder)));
ASSERT_EQ(2u, stack.size());
EXPECT_EQ(GetTestInstructionPointer(), stack[0].instruction_pointer);
@@ -447,7 +459,8 @@ TEST(StackSamplerImplTest, WalkStack_AuxThenNative) {
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(std::move(native_unwinder), std::move(aux_unwinder)));
+ MakeUnwinderCircularDeque(std::move(native_unwinder),
+ std::move(aux_unwinder)));
ASSERT_EQ(3u, stack.size());
EXPECT_EQ(0u, stack[0].instruction_pointer);
@@ -477,7 +490,8 @@ TEST(StackSamplerImplTest, WalkStack_NativeThenAux) {
std::vector<Frame> stack = StackSamplerImpl::WalkStackForTesting(
&module_cache, &thread_context, 0u,
- MakeUnwinderList(std::move(native_unwinder), std::move(aux_unwinder)));
+ MakeUnwinderCircularDeque(std::move(native_unwinder),
+ std::move(aux_unwinder)));
ASSERT_EQ(4u, stack.size());
EXPECT_EQ(0u, stack[0].instruction_pointer);
diff --git a/chromium/base/profiler/stack_sampler_ios.cc b/chromium/base/profiler/stack_sampler_ios.cc
index 82ad01f3028..99e68cecc2d 100644
--- a/chromium/base/profiler/stack_sampler_ios.cc
+++ b/chromium/base/profiler/stack_sampler_ios.cc
@@ -13,7 +13,7 @@ namespace base {
std::unique_ptr<StackSampler> StackSampler::Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate) {
return nullptr;
}
diff --git a/chromium/base/profiler/stack_sampler_mac.cc b/chromium/base/profiler/stack_sampler_mac.cc
index 109f6425835..e88dfc57904 100644
--- a/chromium/base/profiler/stack_sampler_mac.cc
+++ b/chromium/base/profiler/stack_sampler_mac.cc
@@ -16,14 +16,14 @@ namespace base {
std::unique_ptr<StackSampler> StackSampler::Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate) {
- DCHECK(!native_unwinder);
+ DCHECK(core_unwinders.empty());
+ core_unwinders.push_back(std::make_unique<NativeUnwinderMac>(module_cache));
return std::make_unique<StackSamplerImpl>(
std::make_unique<StackCopierSuspend>(
std::make_unique<SuspendableThreadDelegateMac>(thread_token)),
- std::make_unique<NativeUnwinderMac>(module_cache), module_cache,
- test_delegate);
+ std::move(core_unwinders), module_cache, test_delegate);
}
// static
diff --git a/chromium/base/profiler/stack_sampler_posix.cc b/chromium/base/profiler/stack_sampler_posix.cc
index 44215298c63..ae67f25cc4b 100644
--- a/chromium/base/profiler/stack_sampler_posix.cc
+++ b/chromium/base/profiler/stack_sampler_posix.cc
@@ -14,7 +14,7 @@ namespace base {
std::unique_ptr<StackSampler> StackSampler::Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate) {
return nullptr;
}
diff --git a/chromium/base/profiler/stack_sampler_win.cc b/chromium/base/profiler/stack_sampler_win.cc
index c19009b77e4..2a29d4c0ee8 100644
--- a/chromium/base/profiler/stack_sampler_win.cc
+++ b/chromium/base/profiler/stack_sampler_win.cc
@@ -17,14 +17,15 @@ namespace base {
std::unique_ptr<StackSampler> StackSampler::Create(
SamplingProfilerThreadToken thread_token,
ModuleCache* module_cache,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders,
StackSamplerTestDelegate* test_delegate) {
- DCHECK(!native_unwinder);
+ DCHECK(core_unwinders.empty());
#if defined(ARCH_CPU_X86_64) || defined(ARCH_CPU_ARM64)
+ core_unwinders.push_back(std::make_unique<NativeUnwinderWin>());
return std::make_unique<StackSamplerImpl>(
std::make_unique<StackCopierSuspend>(
std::make_unique<SuspendableThreadDelegateWin>(thread_token)),
- std::make_unique<NativeUnwinderWin>(), module_cache, test_delegate);
+ std::move(core_unwinders), module_cache, test_delegate);
#else
return nullptr;
#endif
diff --git a/chromium/base/profiler/stack_sampling_profiler.cc b/chromium/base/profiler/stack_sampling_profiler.cc
index 46634d18aea..f0ad057d521 100644
--- a/chromium/base/profiler/stack_sampling_profiler.cc
+++ b/chromium/base/profiler/stack_sampling_profiler.cc
@@ -5,6 +5,7 @@
#include "base/profiler/stack_sampling_profiler.h"
#include <algorithm>
+#include <cmath>
#include <map>
#include <utility>
@@ -26,7 +27,7 @@
#include "base/threading/thread_restrictions.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -47,6 +48,36 @@ constexpr WaitableEvent::ResetPolicy kResetPolicy =
// for referencing the active collection to the SamplingThread.
const int kNullProfilerId = -1;
+TimeTicks GetNextSampleTimeImpl(TimeTicks scheduled_current_sample_time,
+ TimeDelta sampling_interval,
+ TimeTicks now) {
+ // Schedule the next sample at the next sampling_interval-aligned time in
+ // the future that's sufficiently far enough from the current sample. In the
+ // general case this will be one sampling_interval from the current
+ // sample. In cases where sample tasks were unable to be executed, such as
+ // during system suspend or bad system-wide jank, we may have missed some
+ // samples. The right thing to do for those cases is to skip the missed
+ // samples since the rest of the systems also wasn't executing.
+
+ // Ensure that the next sample time is at least half a sampling interval
+ // away. This causes the second sample after resume to be taken between 0.5
+ // and 1.5 samples after the first, or 1 sample interval on average. The delay
+ // also serves to provide a grace period in the normal sampling case where the
+ // current sample may be taken slightly later than its scheduled time.
+ const TimeTicks earliest_next_sample_time = now + sampling_interval / 2;
+
+ const TimeDelta minimum_time_delta_to_next_sample =
+ earliest_next_sample_time - scheduled_current_sample_time;
+
+ // The minimum number of sampling intervals required to get from the scheduled
+ // current sample time to the earliest next sample time.
+ const int64_t required_sampling_intervals = static_cast<int64_t>(
+ std::ceil(minimum_time_delta_to_next_sample.InMicrosecondsF() /
+ sampling_interval.InMicroseconds()));
+ return scheduled_current_sample_time +
+ required_sampling_intervals * sampling_interval;
+}
+
} // namespace
// StackSamplingProfiler::SamplingThread --------------------------------------
@@ -597,9 +628,9 @@ void StackSamplingProfiler::SamplingThread::RecordSampleTask(
// Schedule the next sample recording if there is one.
if (++collection->sample_count < collection->params.samples_per_profile) {
- if (!collection->params.keep_consistent_sampling_interval)
- collection->next_sample_time = TimeTicks::Now();
- collection->next_sample_time += collection->params.sampling_interval;
+ collection->next_sample_time = GetNextSampleTimeImpl(
+ collection->next_sample_time, collection->params.sampling_interval,
+ TimeTicks::Now());
bool success = GetTaskRunnerOnSamplingThread()->PostDelayedTask(
FROM_HERE,
BindOnce(&SamplingThread::RecordSampleTask, Unretained(this),
@@ -689,16 +720,25 @@ void StackSamplingProfiler::TestPeer::PerformSamplingThreadIdleShutdown(
SamplingThread::TestPeer::ShutdownAssumingIdle(simulate_intervening_start);
}
+// static
+TimeTicks StackSamplingProfiler::TestPeer::GetNextSampleTime(
+ TimeTicks scheduled_current_sample_time,
+ TimeDelta sampling_interval,
+ TimeTicks now) {
+ return GetNextSampleTimeImpl(scheduled_current_sample_time, sampling_interval,
+ now);
+}
+
StackSamplingProfiler::StackSamplingProfiler(
SamplingProfilerThreadToken thread_token,
const SamplingParams& params,
std::unique_ptr<ProfileBuilder> profile_builder,
- std::unique_ptr<Unwinder> native_unwinder,
+ std::vector<std::unique_ptr<Unwinder>> unwinders,
StackSamplerTestDelegate* test_delegate)
: StackSamplingProfiler(params, std::move(profile_builder), nullptr) {
sampler_ =
StackSampler::Create(thread_token, profile_builder_->GetModuleCache(),
- std::move(native_unwinder), test_delegate);
+ std::move(unwinders), test_delegate);
}
StackSamplingProfiler::StackSamplingProfiler(
@@ -753,9 +793,6 @@ void StackSamplingProfiler::Start() {
if (!sampler_)
return;
- if (pending_aux_unwinder_)
- sampler_->AddAuxUnwinder(std::move(pending_aux_unwinder_));
-
// The IsSignaled() check below requires that the WaitableEvent be manually
// reset, to avoid signaling the event in IsSignaled() itself.
static_assert(kResetPolicy == WaitableEvent::ResetPolicy::MANUAL,
@@ -789,9 +826,10 @@ void StackSamplingProfiler::Stop() {
void StackSamplingProfiler::AddAuxUnwinder(std::unique_ptr<Unwinder> unwinder) {
if (profiler_id_ == kNullProfilerId) {
- // We haven't started sampling, and so don't have a sampler to which we can
- // pass the unwinder yet. Save it on the instance until we do.
- pending_aux_unwinder_ = std::move(unwinder);
+ // We haven't started sampling, and so we can add |unwinder| to the sampler
+ // directly
+ if (sampler_)
+ sampler_->AddAuxUnwinder(std::move(unwinder));
return;
}
diff --git a/chromium/base/profiler/stack_sampling_profiler.h b/chromium/base/profiler/stack_sampling_profiler.h
index c6784b8a8f4..1171cda73fc 100644
--- a/chromium/base/profiler/stack_sampling_profiler.h
+++ b/chromium/base/profiler/stack_sampling_profiler.h
@@ -72,30 +72,22 @@ class BASE_EXPORT StackSamplingProfiler {
// Interval between samples during a sampling profile. This is the desired
// duration from the start of one sample to the start of the next sample.
TimeDelta sampling_interval = TimeDelta::FromMilliseconds(100);
-
- // When true, keeps the average sampling interval = |sampling_interval|,
- // irrespective of how long each sample takes. If a sample takes too long,
- // keeping the interval constant will lock out the sampled thread. When
- // false, sample is created with an interval of |sampling_interval|,
- // excluding the time taken by a sample. The metrics collected will not be
- // accurate, since sampling could take arbitrary amount of time, but makes
- // sure that the sampled thread gets at least the interval amount of time to
- // run between samples.
- bool keep_consistent_sampling_interval = true;
};
- // Creates a profiler for the specified thread. |native_unwinder| is required
- // on Android since the unwinder is provided outside StackSamplingProfiler,
- // but must be null on other platforms. An optional |test_delegate| can be
- // supplied by tests.
+ // Creates a profiler for the specified thread. |unwinders| is required on
+ // Android since the unwinder is provided outside StackSamplingProfiler, but
+ // must be empty on other platforms. When attempting to unwind, the relative
+ // priority of unwinders is the inverse of the order in |unwinders|. An
+ // optional |test_delegate| can be supplied by tests.
//
// The caller must ensure that this object gets destroyed before the thread
// exits.
- StackSamplingProfiler(SamplingProfilerThreadToken thread_token,
- const SamplingParams& params,
- std::unique_ptr<ProfileBuilder> profile_builder,
- std::unique_ptr<Unwinder> native_unwinder = nullptr,
- StackSamplerTestDelegate* test_delegate = nullptr);
+ StackSamplingProfiler(
+ SamplingProfilerThreadToken thread_token,
+ const SamplingParams& params,
+ std::unique_ptr<ProfileBuilder> profile_builder,
+ std::vector<std::unique_ptr<Unwinder>> core_unwinders = {},
+ StackSamplerTestDelegate* test_delegate = nullptr);
// Same as above function, with custom |sampler| implementation. The sampler
// on Android is not implemented in base.
@@ -153,6 +145,11 @@ class BASE_EXPORT StackSamplingProfiler {
// runs.
static void PerformSamplingThreadIdleShutdown(
bool simulate_intervening_start);
+
+ // Provides access to the method computing the next sample time.
+ static TimeTicks GetNextSampleTime(TimeTicks scheduled_current_sample_time,
+ TimeDelta sampling_interval,
+ TimeTicks now);
};
private:
@@ -191,10 +188,6 @@ class BASE_EXPORT StackSamplingProfiler {
// thread sampling starts.
std::unique_ptr<StackSampler> sampler_;
- // If an AuxUnwinder is added before Start() it will be saved here until it
- // can be passed to the sampling thread when thread sampling starts.
- std::unique_ptr<Unwinder> pending_aux_unwinder_;
-
// This starts "signaled", is reset when sampling begins, and is signaled
// when that sampling is complete and the profile_builder_'s
// OnProfileCompleted function has executed.
diff --git a/chromium/base/profiler/stack_sampling_profiler_test_util.cc b/chromium/base/profiler/stack_sampling_profiler_test_util.cc
index 9043d0627be..3ea67f52672 100644
--- a/chromium/base/profiler/stack_sampling_profiler_test_util.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_test_util.cc
@@ -18,6 +18,13 @@
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE)
+#include "base/android/apk_assets.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/profiler/chrome_unwinder_android.h"
+#include "base/profiler/native_unwinder_android.h"
+#endif
+
#if defined(OS_WIN)
// Windows doesn't provide an alloca function like Linux does.
// Fortunately, it provides _alloca, which functions identically.
@@ -27,6 +34,13 @@
#include <alloca.h>
#endif
+extern "C" {
+// The address of |__executable_start| gives the start address of the
+// executable or shared library. This value is used to find the offset address
+// of the instruction in binary from PC.
+extern char __executable_start;
+}
+
namespace base {
namespace {
@@ -79,6 +93,67 @@ void OtherLibraryCallback(void* arg) {
ALLOW_UNUSED_LOCAL(i);
}
+#if defined(OS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE)
+std::unique_ptr<NativeUnwinderAndroid> CreateNativeUnwinderAndroidForTesting(
+ uintptr_t exclude_module_with_base_address) {
+ class NativeUnwinderAndroidForTesting : public NativeUnwinderAndroid {
+ public:
+ explicit NativeUnwinderAndroidForTesting(
+ std::unique_ptr<unwindstack::Maps> memory_regions_map,
+ std::unique_ptr<unwindstack::Memory> process_memory,
+ uintptr_t exclude_module_with_base_address)
+ : NativeUnwinderAndroid(memory_regions_map.get(),
+ process_memory.get(),
+ exclude_module_with_base_address),
+ memory_regions_map_(std::move(memory_regions_map)),
+ process_memory_(std::move(process_memory)) {}
+ ~NativeUnwinderAndroidForTesting() override = default;
+
+ private:
+ std::unique_ptr<unwindstack::Maps> memory_regions_map_;
+ std::unique_ptr<unwindstack::Memory> process_memory_;
+ };
+ auto maps = NativeUnwinderAndroid::CreateMaps();
+ auto memory = NativeUnwinderAndroid::CreateProcessMemory();
+ return std::make_unique<NativeUnwinderAndroidForTesting>(
+ std::move(maps), std::move(memory), exclude_module_with_base_address);
+}
+
+std::unique_ptr<Unwinder> CreateChromeUnwinderAndroidForTesting(
+ uintptr_t chrome_module_base_address) {
+ static constexpr char kCfiFileName[] = "assets/unwind_cfi_32";
+ class ChromeUnwinderAndroidForTesting : public ChromeUnwinderAndroid {
+ public:
+ ChromeUnwinderAndroidForTesting(std::unique_ptr<MemoryMappedFile> cfi_file,
+ std::unique_ptr<ArmCFITable> cfi_table,
+ uintptr_t chrome_module_base_address)
+ : ChromeUnwinderAndroid(cfi_table.get(), chrome_module_base_address),
+ cfi_file_(std::move(cfi_file)),
+ cfi_table_(std::move(cfi_table)) {}
+ ~ChromeUnwinderAndroidForTesting() override = default;
+
+ private:
+ std::unique_ptr<MemoryMappedFile> cfi_file_;
+ std::unique_ptr<ArmCFITable> cfi_table_;
+ };
+
+ MemoryMappedFile::Region cfi_region;
+ int fd = base::android::OpenApkAsset(kCfiFileName, &cfi_region);
+ if (fd < 0)
+ return nullptr;
+ auto cfi_file = std::make_unique<MemoryMappedFile>();
+ if (!cfi_file->Initialize(base::File(fd), cfi_region))
+ return nullptr;
+ std::unique_ptr<ArmCFITable> cfi_table =
+ ArmCFITable::Parse({cfi_file->data(), cfi_file->length()});
+ if (!cfi_table)
+ return nullptr;
+
+ return std::make_unique<ChromeUnwinderAndroidForTesting>(
+ std::move(cfi_file), std::move(cfi_table), chrome_module_base_address);
+}
+#endif // #if defined(OS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE)
+
} // namespace
TargetThread::TargetThread(OnceClosure to_run) : to_run_(std::move(to_run)) {}
@@ -240,7 +315,8 @@ std::vector<Frame> SampleScenario(UnwindScenario* scenario,
std::vector<Frame> result_sample) {
sample = std::move(result_sample);
sampling_thread_completed.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache));
if (aux_unwinder_factory)
profiler.AddAuxUnwinder(std::move(aux_unwinder_factory).Run());
profiler.Start();
@@ -338,4 +414,18 @@ uintptr_t GetAddressInOtherLibrary(NativeLibrary library) {
return address;
}
+std::vector<std::unique_ptr<Unwinder>> CreateCoreUnwindersForTesting(
+ ModuleCache* module_cache) {
+#if defined(OS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE)
+ std::vector<std::unique_ptr<Unwinder>> unwinders;
+ unwinders.push_back(CreateNativeUnwinderAndroidForTesting(
+ reinterpret_cast<uintptr_t>(&__executable_start)));
+ unwinders.push_back(CreateChromeUnwinderAndroidForTesting(
+ reinterpret_cast<uintptr_t>(&__executable_start)));
+ return unwinders;
+#else
+ return {};
+#endif
+}
+
} // namespace base
diff --git a/chromium/base/profiler/stack_sampling_profiler_test_util.h b/chromium/base/profiler/stack_sampling_profiler_test_util.h
index ff2daebfe14..4337393ff73 100644
--- a/chromium/base/profiler/stack_sampling_profiler_test_util.h
+++ b/chromium/base/profiler/stack_sampling_profiler_test_util.h
@@ -12,13 +12,13 @@
#include "base/native_library.h"
#include "base/profiler/frame.h"
#include "base/profiler/sampling_profiler_thread_token.h"
-#include "base/profiler/stack_sampler.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/platform_thread.h"
namespace base {
class Unwinder;
+class ModuleCache;
// A thread to target for profiling that will run the supplied closure.
class TargetThread : public PlatformThread::Delegate {
@@ -140,6 +140,12 @@ NativeLibrary LoadOtherLibrary();
uintptr_t GetAddressInOtherLibrary(NativeLibrary library);
+// Creates a list of core unwinders required for StackSamplingProfilerTest.
+// This is useful notably on Android, which requires ChromeUnwinderAndroid in
+// addition to the native one.
+std::vector<std::unique_ptr<Unwinder>> CreateCoreUnwindersForTesting(
+ ModuleCache* module_cache);
+
} // namespace base
#endif // BASE_PROFILER_STACK_SAMPLING_PROFILER_TEST_UTIL_H_
diff --git a/chromium/base/profiler/stack_sampling_profiler_unittest.cc b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
index b9fc305dd91..dc011172aea 100644
--- a/chromium/base/profiler/stack_sampling_profiler_unittest.cc
+++ b/chromium/base/profiler/stack_sampling_profiler_unittest.cc
@@ -47,7 +47,8 @@
// STACK_SAMPLING_PROFILER_SUPPORTED is used to conditionally enable the tests
// below for supported platforms (currently Win x64 and Mac x64).
-#if defined(_WIN64) || (defined(OS_MACOSX) && !defined(OS_IOS))
+#if defined(_WIN64) || (defined(OS_MACOSX) && !defined(OS_IOS)) || \
+ (defined(OS_ANDROID) && BUILDFLAG(ENABLE_ARM_CFI_TABLE))
#define STACK_SAMPLING_PROFILER_SUPPORTED 1
#endif
@@ -186,8 +187,8 @@ void SynchronousUnloadNativeLibrary(NativeLibrary library) {
::GetLastError() != ERROR_MOD_NOT_FOUND) {
PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
}
-#elif defined(OS_MACOSX)
-// Unloading a library on the Mac is synchronous.
+#elif defined(OS_MACOSX) || defined(OS_ANDROID)
+// Unloading a library on Mac and Android is synchronous.
#else
NOTIMPLEMENTED();
#endif
@@ -213,7 +214,7 @@ struct TestProfilerInfo {
profile = std::move(result_profile);
completed.Signal();
})),
- nullptr,
+ CreateCoreUnwindersForTesting(module_cache),
delegate) {}
// The order here is important to ensure objects being referenced don't get
@@ -347,7 +348,7 @@ void TestLibraryUnload(bool wait_until_unloaded, ModuleCache* module_cache) {
profile = std::move(result_profile);
sampling_thread_completed.Signal();
})),
- nullptr, &test_delegate);
+ CreateCoreUnwindersForTesting(module_cache), &test_delegate);
profiler.Start();
@@ -488,10 +489,12 @@ class TestAuxUnwinder : public Unwinder {
// Checks that the profiler handles stacks containing dynamically-allocated
// stack memory.
// macOS ASAN is not yet supported - crbug.com/718628.
-#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
-#define MAYBE_Alloca Alloca
-#else
+// Android is not supported since Chrome unwind tables don't support dynamic
+// frames.
+#if (defined(ADDRESS_SANITIZER) && defined(OS_MACOSX)) || defined(OS_ANDROID)
#define MAYBE_Alloca DISABLED_Alloca
+#else
+#define MAYBE_Alloca Alloca
#endif
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Alloca) {
UnwindScenario scenario(BindRepeating(&CallWithAlloca));
@@ -506,10 +509,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_Alloca) {
// Checks that a stack that runs through another library produces a stack with
// the expected functions.
// macOS ASAN is not yet supported - crbug.com/718628.
-#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
-#define MAYBE_OtherLibrary OtherLibrary
-#else
+// Android is not supported when EXCLUDE_UNWIND_TABLES |other_library| doesn't
+// have unwind tables.
+#if (defined(ADDRESS_SANITIZER) && defined(OS_MACOSX)) || \
+ (defined(OS_ANDROID) && BUILDFLAG(EXCLUDE_UNWIND_TABLES))
#define MAYBE_OtherLibrary DISABLED_OtherLibrary
+#else
+#define MAYBE_OtherLibrary OtherLibrary
#endif
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
ScopedNativeLibrary other_library(LoadOtherLibrary());
@@ -526,10 +532,13 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_OtherLibrary) {
// Checks that a stack that runs through a library that is unloading produces a
// stack, and doesn't crash.
// Unloading is synchronous on the Mac, so this test is inapplicable.
-#if !defined(OS_MACOSX)
-#define MAYBE_UnloadingLibrary UnloadingLibrary
-#else
+// Android is not supported when EXCLUDE_UNWIND_TABLES |other_library| doesn't
+// have unwind tables.
+#if defined(OS_MACOSX) || \
+ (defined(OS_ANDROID) && BUILDFLAG(EXCLUDE_UNWIND_TABLES))
#define MAYBE_UnloadingLibrary DISABLED_UnloadingLibrary
+#else
+#define MAYBE_UnloadingLibrary UnloadingLibrary
#endif
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_UnloadingLibrary) {
TestLibraryUnload(false, module_cache());
@@ -538,10 +547,11 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_UnloadingLibrary) {
// Checks that a stack that runs through a library that has been unloaded
// produces a stack, and doesn't crash.
// macOS ASAN is not yet supported - crbug.com/718628.
-#if !(defined(ADDRESS_SANITIZER) && defined(OS_MACOSX))
-#define MAYBE_UnloadedLibrary UnloadedLibrary
-#else
+// Android is not supported since modules are found before unwinding.
+#if (defined(ADDRESS_SANITIZER) && defined(OS_MACOSX)) || defined(OS_ANDROID)
#define MAYBE_UnloadedLibrary DISABLED_UnloadedLibrary
+#else
+#define MAYBE_UnloadedLibrary UnloadedLibrary
#endif
PROFILER_TEST_F(StackSamplingProfilerTest, MAYBE_UnloadedLibrary) {
TestLibraryUnload(true, module_cache());
@@ -568,7 +578,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopWithoutStarting) {
[&profile, &sampling_completed](Profile result_profile) {
profile = std::move(result_profile);
sampling_completed.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
profiler.Stop(); // Constructed but never started.
EXPECT_FALSE(sampling_completed.IsSignaled());
@@ -706,6 +717,73 @@ PROFILER_TEST_F(StackSamplingProfilerTest, StopDuringInterSampleInterval) {
}));
}
+PROFILER_TEST_F(StackSamplingProfilerTest, GetNextSampleTime_NormalExecution) {
+ const auto& GetNextSampleTime =
+ StackSamplingProfiler::TestPeer::GetNextSampleTime;
+
+ const TimeTicks scheduled_current_sample_time = TimeTicks::UnixEpoch();
+ const TimeDelta sampling_interval = TimeDelta::FromMilliseconds(10);
+
+ // When executing the sample at exactly the scheduled time the next sample
+ // should be one interval later.
+ EXPECT_EQ(scheduled_current_sample_time + sampling_interval,
+ GetNextSampleTime(scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time));
+
+ // When executing the sample less than half an interval after the scheduled
+ // time the next sample also should be one interval later.
+ EXPECT_EQ(scheduled_current_sample_time + sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 0.4 * sampling_interval));
+
+ // When executing the sample less than half an interval before the scheduled
+ // time the next sample also should be one interval later. This is not
+ // expected to occur in practice since delayed tasks never run early.
+ EXPECT_EQ(scheduled_current_sample_time + sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time - 0.4 * sampling_interval));
+}
+
+PROFILER_TEST_F(StackSamplingProfilerTest, GetNextSampleTime_DelayedExecution) {
+ const auto& GetNextSampleTime =
+ StackSamplingProfiler::TestPeer::GetNextSampleTime;
+
+ const TimeTicks scheduled_current_sample_time = TimeTicks::UnixEpoch();
+ const TimeDelta sampling_interval = TimeDelta::FromMilliseconds(10);
+
+ // When executing the sample between 0.5 and 1.5 intervals after the scheduled
+ // time the next sample should be two intervals later.
+ EXPECT_EQ(scheduled_current_sample_time + 2 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 0.6 * sampling_interval));
+ EXPECT_EQ(scheduled_current_sample_time + 2 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 1.0 * sampling_interval));
+ EXPECT_EQ(scheduled_current_sample_time + 2 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 1.4 * sampling_interval));
+
+ // Similarly when executing the sample between 9.5 and 10.5 intervals after
+ // the scheduled time the next sample should be 11 intervals later.
+ EXPECT_EQ(scheduled_current_sample_time + 11 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 9.6 * sampling_interval));
+ EXPECT_EQ(scheduled_current_sample_time + 11 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 10.0 * sampling_interval));
+ EXPECT_EQ(scheduled_current_sample_time + 11 * sampling_interval,
+ GetNextSampleTime(
+ scheduled_current_sample_time, sampling_interval,
+ scheduled_current_sample_time + 10.4 * sampling_interval));
+}
+
// Checks that we can destroy the profiler while profiling.
PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
SamplingParams params;
@@ -720,8 +798,9 @@ PROFILER_TEST_F(StackSamplingProfilerTest, DestroyProfilerWhileProfiling) {
BindLambdaForTesting([&profile](Profile result_profile) {
profile = std::move(result_profile);
}));
- profiler.reset(new StackSamplingProfiler(target_thread_token, params,
- std::move(profile_builder)));
+ profiler.reset(new StackSamplingProfiler(
+ target_thread_token, params, std::move(profile_builder),
+ CreateCoreUnwindersForTesting(module_cache())));
profiler->Start();
profiler.reset();
@@ -1072,7 +1151,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleSampledThreads) {
[&profile1, &sampling_thread_completed1](Profile result_profile) {
profile1 = std::move(result_profile);
sampling_thread_completed1.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
WaitableEvent sampling_thread_completed2(
WaitableEvent::ResetPolicy::MANUAL,
@@ -1085,7 +1165,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, MultipleSampledThreads) {
[&profile2, &sampling_thread_completed2](Profile result_profile) {
profile2 = std::move(result_profile);
sampling_thread_completed2.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
// Finally the real work.
profiler1.Start();
@@ -1120,8 +1201,8 @@ class ProfilerThread : public SimpleThread {
BindLambdaForTesting([this](Profile result_profile) {
profile_ = std::move(result_profile);
completed_.Signal();
- }))) {}
-
+ })),
+ CreateCoreUnwindersForTesting(module_cache)) {}
void Run() override {
run_.Wait();
profiler_.Start();
@@ -1206,7 +1287,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, AddAuxUnwinder_BeforeStart) {
Profile result_profile) {
profile = std::move(result_profile);
sampling_thread_completed.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
profiler.AddAuxUnwinder(
std::make_unique<TestAuxUnwinder>(Frame(23, nullptr)));
profiler.Start();
@@ -1246,7 +1328,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, AddAuxUnwinder_AfterStart) {
Profile result_profile) {
profile = std::move(result_profile);
sampling_thread_completed.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
profiler.Start();
profiler.AddAuxUnwinder(
std::make_unique<TestAuxUnwinder>(Frame(23, nullptr)));
@@ -1286,7 +1369,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest, AddAuxUnwinder_AfterStop) {
Profile result_profile) {
profile = std::move(result_profile);
sampling_thread_completed.Signal();
- })));
+ })),
+ CreateCoreUnwindersForTesting(module_cache()));
profiler.Start();
profiler.Stop();
profiler.AddAuxUnwinder(
@@ -1360,7 +1444,8 @@ PROFILER_TEST_F(StackSamplingProfilerTest,
BindLambdaForTesting([&profile](Profile result_profile) {
profile = std::move(result_profile);
})),
- nullptr, &post_sample_invoker);
+ CreateCoreUnwindersForTesting(module_cache()),
+ &post_sample_invoker);
profiler.Start();
// Wait for 5 samples to be collected.
for (int i = 0; i < 5; ++i)
diff --git a/chromium/base/profiler/unwindstack_internal_android.cc b/chromium/base/profiler/unwindstack_internal_android.cc
deleted file mode 100644
index 92328590aa4..00000000000
--- a/chromium/base/profiler/unwindstack_internal_android.cc
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/profiler/unwindstack_internal_android.h"
-
-#include <string.h>
-
-#include "base/logging.h"
-
-namespace base {
-
-UnwindStackMemoryAndroid::UnwindStackMemoryAndroid(uintptr_t stack_ptr,
- uintptr_t stack_top)
- : stack_ptr_(stack_ptr), stack_top_(stack_top) {
- DCHECK_LE(stack_ptr_, stack_top_);
-}
-
-UnwindStackMemoryAndroid::~UnwindStackMemoryAndroid() = default;
-
-size_t UnwindStackMemoryAndroid::Read(uint64_t addr, void* dst, size_t size) {
- if (addr < stack_ptr_)
- return 0;
- if (size >= stack_top_ || addr > stack_top_ - size)
- return 0;
- memcpy(dst, reinterpret_cast<void*>(addr), size);
- return size;
-}
-
-} // namespace base \ No newline at end of file
diff --git a/chromium/base/profiler/unwindstack_internal_android.h b/chromium/base/profiler/unwindstack_internal_android.h
deleted file mode 100644
index 75058613fc7..00000000000
--- a/chromium/base/profiler/unwindstack_internal_android.h
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2020 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_
-#define BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_
-
-#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Maps.h"
-#include "third_party/libunwindstack/src/libunwindstack/include/unwindstack/Memory.h"
-
-// Avoid including this file directly in a header as it leaks headers from
-// libunwindstack. In particular, it's not to be included directly or
-// transitively from native_unwinder_android.h
-
-namespace base {
-
-// Implementation of unwindstack::Memory that restricts memory access to a stack
-// buffer, used by NativeUnwinderAndroid. While unwinding, only memory accesses
-// within the stack should be performed to restore registers.
-class UnwindStackMemoryAndroid : public unwindstack::Memory {
- public:
- UnwindStackMemoryAndroid(uintptr_t stack_ptr, uintptr_t stack_top);
- ~UnwindStackMemoryAndroid() override;
-
- size_t Read(uint64_t addr, void* dst, size_t size) override;
-
- private:
- const uintptr_t stack_ptr_;
- const uintptr_t stack_top_;
-};
-
-} // namespace base
-
-#endif // BASE_PROFILER_UNWINDSTACK_INTERNAL_ANDROID_H_ \ No newline at end of file
diff --git a/chromium/base/profiler/win32_stack_frame_unwinder.cc b/chromium/base/profiler/win32_stack_frame_unwinder.cc
index 700e747957c..553c1e3d55d 100644
--- a/chromium/base/profiler/win32_stack_frame_unwinder.cc
+++ b/chromium/base/profiler/win32_stack_frame_unwinder.cc
@@ -9,6 +9,7 @@
#include <utility>
#include "base/macros.h"
+#include "base/notreached.h"
#include "build/build_config.h"
namespace base {
diff --git a/chromium/base/run_loop.cc b/chromium/base/run_loop.cc
index 234e3fdc47b..af1ca56f5b4 100644
--- a/chromium/base/run_loop.cc
+++ b/chromium/base/run_loop.cc
@@ -136,7 +136,7 @@ void RunLoop::RunUntilIdle() {
void RunLoop::Quit() {
// Thread-safe.
- // This can only be hit if run_loop->Quit() is called directly (QuitClosure()
+ // This can only be hit if RunLoop::Quit() is called directly (QuitClosure()
// proxies through ProxyToTaskRunner() as it can only deref its WeakPtr on
// |origin_task_runner_|).
if (!origin_task_runner_->RunsTasksInCurrentSequence()) {
@@ -155,7 +155,7 @@ void RunLoop::Quit() {
void RunLoop::QuitWhenIdle() {
// Thread-safe.
- // This can only be hit if run_loop->QuitWhenIdle() is called directly
+ // This can only be hit if RunLoop::QuitWhenIdle() is called directly
// (QuitWhenIdleClosure() proxies through ProxyToTaskRunner() as it can only
// deref its WeakPtr on |origin_task_runner_|).
if (!origin_task_runner_->RunsTasksInCurrentSequence()) {
@@ -168,9 +168,9 @@ void RunLoop::QuitWhenIdle() {
}
RepeatingClosure RunLoop::QuitClosure() {
- // Obtaining the QuitClosure() is not thread-safe; either post the
- // QuitClosure() from the run thread or invoke Quit() directly (which is
- // thread-safe).
+ // Obtaining the QuitClosure() is not thread-safe; either obtain the
+ // QuitClosure() from the owning thread before Run() or invoke Quit() directly
+ // (which is thread-safe).
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
allow_quit_current_deprecated_ = false;
@@ -180,9 +180,9 @@ RepeatingClosure RunLoop::QuitClosure() {
}
RepeatingClosure RunLoop::QuitWhenIdleClosure() {
- // Obtaining the QuitWhenIdleClosure() is not thread-safe; either post the
- // QuitWhenIdleClosure() from the run thread or invoke QuitWhenIdle() directly
- // (which is thread-safe).
+ // Obtaining the QuitWhenIdleClosure() is not thread-safe; either obtain the
+ // QuitWhenIdleClosure() from the owning thread before Run() or invoke
+ // QuitWhenIdle() directly (which is thread-safe).
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
allow_quit_current_deprecated_ = false;
diff --git a/chromium/base/run_loop.h b/chromium/base/run_loop.h
index b8ee4375c08..c2c39e3f3aa 100644
--- a/chromium/base/run_loop.h
+++ b/chromium/base/run_loop.h
@@ -44,7 +44,7 @@ class SingleThreadTaskRunner;
// member and static methods unless explicitly indicated otherwise (e.g.
// IsRunning/IsNestedOnCurrentThread()). RunLoop::Run can only be called once
// per RunLoop lifetime. Create a RunLoop on the stack and call Run/Quit to run
-// a nested RunLoop but please do not use nested loops in production code!
+// a nested RunLoop but please avoid nested loops in production code!
class BASE_EXPORT RunLoop {
public:
// The type of RunLoop: a kDefault RunLoop at the top-level (non-nested) will
@@ -77,9 +77,8 @@ class BASE_EXPORT RunLoop {
RunLoop(Type type = Type::kDefault);
~RunLoop();
- // Run the current RunLoop::Delegate. This blocks until Quit is called. Before
- // calling Run, be sure to grab the QuitClosure in order to stop the
- // RunLoop::Delegate asynchronously.
+ // Run the current RunLoop::Delegate. This blocks until Quit is called
+ // (directly or by running the RunLoop::QuitClosure).
void Run();
// Run the current RunLoop::Delegate until it doesn't find any tasks or
@@ -99,31 +98,30 @@ class BASE_EXPORT RunLoop {
return running_;
}
- // Quit() quits an earlier call to Run() immediately. QuitWhenIdle() quits an
- // earlier call to Run() when there aren't any tasks or messages in the queue.
+ // Quit() transitions this RunLoop to a state where no more tasks will be
+ // allowed to run at the run-loop-level of this RunLoop. If invoked from the
+ // owning thread, the effect is immediate; otherwise it is thread-safe but
+ // asynchronous. When the transition takes effect, the underlying message loop
+ // quits this run-loop-level if it is topmost (otherwise the desire to quit
+ // this level is saved until run-levels nested above it are quit).
//
- // These methods are thread-safe but note that Quit() is asynchronous when
- // called from another thread (will quit soon but tasks that were already
- // queued on this RunLoop will get to run first).
+ // QuitWhenIdle() results in this RunLoop returning true from
+ // ShouldQuitWhenIdle() at this run-level (the delegate decides when "idle" is
+ // reached). This is also thread-safe.
//
- // There can be other nested RunLoops servicing the same task queue. Quitting
- // one RunLoop has no bearing on the others. Quit() and QuitWhenIdle() can be
- // called before, during or after Run(). If called before Run(), Run() will
- // return immediately when called. Calling Quit() or QuitWhenIdle() after the
- // RunLoop has already finished running has no effect.
- //
- // WARNING: You must NEVER assume that a call to Quit() or QuitWhenIdle() will
- // terminate the targeted message loop. If a nested RunLoop continues
- // running, the target may NEVER terminate. It is very easy to livelock (run
- // forever) in such a case.
+ // There can be other nested RunLoops servicing the same task queue. As
+ // mentioned above, quitting one RunLoop has no bearing on the others. Hence,
+ // you may never assume that a call to Quit() will terminate the underlying
+ // message loop. If a nested RunLoop continues running, the target may NEVER
+ // terminate.
void Quit();
void QuitWhenIdle();
// Returns a RepeatingClosure that safely calls Quit() or QuitWhenIdle() (has
// no effect if the RunLoop instance is gone).
//
- // These methods must be called from the thread on which the RunLoop was
- // created.
+ // The closures must be obtained from the thread owning the RunLoop but may
+ // then be invoked from any thread.
//
// Returned closures may be safely:
// * Passed to other threads.
@@ -284,7 +282,8 @@ class BASE_EXPORT RunLoop {
};
private:
- FRIEND_TEST_ALL_PREFIXES(MessageLoopTypedTest, RunLoopQuitOrderAfter);
+ FRIEND_TEST_ALL_PREFIXES(SingleThreadTaskExecutorTypedTest,
+ RunLoopQuitOrderAfter);
#if defined(OS_ANDROID)
// Android doesn't support the blocking RunLoop::Run, so it calls
diff --git a/chromium/base/safe_numerics_unittest.cc b/chromium/base/safe_numerics_unittest.cc
index 44675cf72cc..aafcb58c9fd 100644
--- a/chromium/base/safe_numerics_unittest.cc
+++ b/chromium/base/safe_numerics_unittest.cc
@@ -298,12 +298,26 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(-1).UnsignedAbs());
// Modulus is legal only for integers.
- TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>() % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) % -1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(0) % -2);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % 2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % -1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(1) % -2);
TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(-1) % -1);
TEST_EXPECTED_VALUE(-1, CheckedNumeric<Dst>(-1) % -2);
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % -1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::lowest()) % -2);
TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % 2);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::max()) % 1);
+ TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(DstLimits::max()) % -1);
+ TEST_EXPECTED_VALUE(1, CheckedNumeric<Dst>(DstLimits::max()) % -2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, CheckedNumeric<Dst>(1) % CheckedNumeric<Dst>(1));
TEST_EXPECTED_VALUE(0, 1 % CheckedNumeric<Dst>(1));
@@ -334,12 +348,26 @@ static void TestSpecializedArithmetic(
TEST_EXPECTED_FAILURE(CheckedNumeric<Dst>(1) >> negative_one);
// Modulus is legal only for integers.
- TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>() % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) % 2);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) % -1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(0) % -2);
+ TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) % 2);
TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % -1);
+ TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(1) % -2);
TEST_EXPECTED_VALUE(-1, ClampedNumeric<Dst>(-1) % 2);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(-1) % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(-1) % -1);
TEST_EXPECTED_VALUE(-1, ClampedNumeric<Dst>(-1) % -2);
TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % 2);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % -1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::lowest()) % -2);
TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(DstLimits::max()) % 2);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::max()) % 1);
+ TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(DstLimits::max()) % -1);
+ TEST_EXPECTED_VALUE(1, ClampedNumeric<Dst>(DstLimits::max()) % -2);
// Test all the different modulus combinations.
TEST_EXPECTED_VALUE(0, ClampedNumeric<Dst>(1) % ClampedNumeric<Dst>(1));
TEST_EXPECTED_VALUE(0, 1 % ClampedNumeric<Dst>(1));
diff --git a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
index 1007859d7e2..3e045ceaeb4 100644
--- a/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
+++ b/chromium/base/sampling_heap_profiler/lock_free_address_hash_set.h
@@ -9,8 +9,8 @@
#include <cstdint>
#include <vector>
+#include "base/check_op.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
namespace base {
diff --git a/chromium/base/scoped_clear_last_error_unittest.cc b/chromium/base/scoped_clear_last_error_unittest.cc
index e7bae2b60a0..60c92152c3b 100644
--- a/chromium/base/scoped_clear_last_error_unittest.cc
+++ b/chromium/base/scoped_clear_last_error_unittest.cc
@@ -4,6 +4,7 @@
#include "base/scoped_clear_last_error.h"
+#include "base/logging.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/scoped_generic.h b/chromium/base/scoped_generic.h
index a2050218ee6..0328306044b 100644
--- a/chromium/base/scoped_generic.h
+++ b/chromium/base/scoped_generic.h
@@ -10,8 +10,8 @@
#include <algorithm>
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/macros.h"
namespace base {
diff --git a/chromium/base/scoped_observer.h b/chromium/base/scoped_observer.h
index e6b15be24f7..5bf76341a5e 100644
--- a/chromium/base/scoped_observer.h
+++ b/chromium/base/scoped_observer.h
@@ -10,7 +10,7 @@
#include <algorithm>
#include <vector>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/stl_util.h"
diff --git a/chromium/base/sequence_checker.h b/chromium/base/sequence_checker.h
index 9b910eb72ae..d7e9afab2fe 100644
--- a/chromium/base/sequence_checker.h
+++ b/chromium/base/sequence_checker.h
@@ -5,8 +5,8 @@
#ifndef BASE_SEQUENCE_CHECKER_H_
#define BASE_SEQUENCE_CHECKER_H_
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/sequence_checker_impl.h"
#include "base/strings/string_piece.h"
#include "build/build_config.h"
@@ -77,13 +77,9 @@
scoped_validate_sequence_checker_)(name, ##__VA_ARGS__)
#define DETACH_FROM_SEQUENCE(name) (name).DetachFromSequence()
#else // DCHECK_IS_ON()
-#if __OBJC__ && defined(OS_IOS) && !HAS_FEATURE(objc_cxx_static_assert)
-// TODO(thakis): Remove this branch once Xcode's clang has clang r356148.
-#define SEQUENCE_CHECKER(name)
-#else
+// A no-op expansion that can be followed by a semicolon at class level.
#define SEQUENCE_CHECKER(name) static_assert(true, "")
-#endif
-#define DCHECK_CALLED_ON_VALID_SEQUENCE(name, ...) EAT_STREAM_PARAMETERS
+#define DCHECK_CALLED_ON_VALID_SEQUENCE(name, ...) EAT_CHECK_STREAM_PARAMS()
#define DETACH_FROM_SEQUENCE(name)
#endif // DCHECK_IS_ON()
diff --git a/chromium/base/stl_util.h b/chromium/base/stl_util.h
index 8269b0b38ec..d8db9b141b8 100644
--- a/chromium/base/stl_util.h
+++ b/chromium/base/stl_util.h
@@ -23,7 +23,7 @@
#include <utility>
#include <vector>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/optional.h"
#include "base/template_util.h"
@@ -561,14 +561,6 @@ size_t EraseIf(std::vector<T, Allocator>& container, Predicate pred) {
return removed;
}
-template <class T, class Allocator, class Value>
-size_t Erase(std::forward_list<T, Allocator>& container, const Value& value) {
- // Unlike std::forward_list::remove, this function template accepts
- // heterogeneous types and does not force a conversion to the container's
- // value type before invoking the == operator.
- return EraseIf(container, [&](const T& cur) { return cur == value; });
-}
-
template <class T, class Allocator, class Predicate>
size_t EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
// Note: std::forward_list does not have a size() API, thus we need to use the
@@ -579,14 +571,6 @@ size_t EraseIf(std::forward_list<T, Allocator>& container, Predicate pred) {
return old_size - std::distance(container.begin(), container.end());
}
-template <class T, class Allocator, class Value>
-size_t Erase(std::list<T, Allocator>& container, const Value& value) {
- // Unlike std::list::remove, this function template accepts heterogeneous
- // types and does not force a conversion to the container's value type before
- // invoking the == operator.
- return EraseIf(container, [&](const T& cur) { return cur == value; });
-}
-
template <class T, class Allocator, class Predicate>
size_t EraseIf(std::list<T, Allocator>& container, Predicate pred) {
size_t old_size = container.size();
@@ -661,6 +645,22 @@ size_t EraseIf(
return internal::IterateAndEraseIf(container, pred);
}
+template <class T, class Allocator, class Value>
+size_t Erase(std::forward_list<T, Allocator>& container, const Value& value) {
+ // Unlike std::forward_list::remove, this function template accepts
+ // heterogeneous types and does not force a conversion to the container's
+ // value type before invoking the == operator.
+ return EraseIf(container, [&](const T& cur) { return cur == value; });
+}
+
+template <class T, class Allocator, class Value>
+size_t Erase(std::list<T, Allocator>& container, const Value& value) {
+ // Unlike std::list::remove, this function template accepts heterogeneous
+ // types and does not force a conversion to the container's value type before
+ // invoking the == operator.
+ return EraseIf(container, [&](const T& cur) { return cur == value; });
+}
+
// A helper class to be used as the predicate with |EraseIf| to implement
// in-place set intersection. Helps implement the algorithm of going through
// each container an element at a time, erasing elements from the first
diff --git a/chromium/base/strings/no_trigraphs_unittest.cc b/chromium/base/strings/no_trigraphs_unittest.cc
new file mode 100644
index 00000000000..736679b9055
--- /dev/null
+++ b/chromium/base/strings/no_trigraphs_unittest.cc
@@ -0,0 +1,10 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+TEST(NoTrigraphs, Basic) {
+ EXPECT_EQ("??=", base::StrCat({"?", "?", "="}));
+}
diff --git a/chromium/base/strings/strcat.cc b/chromium/base/strings/strcat.cc
index 35231ef691d..d94c2ea9148 100644
--- a/chromium/base/strings/strcat.cc
+++ b/chromium/base/strings/strcat.cc
@@ -4,82 +4,42 @@
#include "base/strings/strcat.h"
-namespace base {
-
-namespace {
-
-// Reserves an additional amount of capacity in the given string, growing by at
-// least 2x if necessary. Used by StrAppendT().
-//
-// The "at least 2x" growing rule duplicates the exponential growth of
-// std::string. The problem is that most implementations of reserve() will grow
-// exactly to the requested amount instead of exponentially growing like would
-// happen when appending normally. If we didn't do this, an append after the
-// call to StrAppend() would definitely cause a reallocation, and loops with
-// StrAppend() calls would have O(n^2) complexity to execute. Instead, we want
-// StrAppend() to have the same semantics as std::string::append().
-template <typename String>
-void ReserveAdditionalIfNeeded(String* str,
- typename String::size_type additional) {
- const size_t required = str->size() + additional;
- // Check whether we need to reserve additional capacity at all.
- if (required <= str->capacity())
- return;
-
- str->reserve(std::max(required, str->capacity() * 2));
-}
-
-template <typename DestString, typename InputString>
-void StrAppendT(DestString* dest, span<const InputString> pieces) {
- size_t additional_size = 0;
- for (const auto& cur : pieces)
- additional_size += cur.size();
- ReserveAdditionalIfNeeded(dest, additional_size);
+#include <string>
- for (const auto& cur : pieces)
- dest->append(cur.data(), cur.size());
-}
+#include "base/strings/strcat_internal.h"
-} // namespace
+namespace base {
std::string StrCat(span<const StringPiece> pieces) {
- std::string result;
- StrAppendT(&result, pieces);
- return result;
+ return internal::StrCatT(pieces);
}
string16 StrCat(span<const StringPiece16> pieces) {
- string16 result;
- StrAppendT(&result, pieces);
- return result;
+ return internal::StrCatT(pieces);
}
std::string StrCat(span<const std::string> pieces) {
- std::string result;
- StrAppendT(&result, pieces);
- return result;
+ return internal::StrCatT(pieces);
}
string16 StrCat(span<const string16> pieces) {
- string16 result;
- StrAppendT(&result, pieces);
- return result;
+ return internal::StrCatT(pieces);
}
void StrAppend(std::string* dest, span<const StringPiece> pieces) {
- StrAppendT(dest, pieces);
+ internal::StrAppendT(dest, pieces);
}
void StrAppend(string16* dest, span<const StringPiece16> pieces) {
- StrAppendT(dest, pieces);
+ internal::StrAppendT(dest, pieces);
}
void StrAppend(std::string* dest, span<const std::string> pieces) {
- StrAppendT(dest, pieces);
+ internal::StrAppendT(dest, pieces);
}
void StrAppend(string16* dest, span<const string16> pieces) {
- StrAppendT(dest, pieces);
+ internal::StrAppendT(dest, pieces);
}
} // namespace base
diff --git a/chromium/base/strings/strcat.h b/chromium/base/strings/strcat.h
index b7c76215ab2..5d9c38a075c 100644
--- a/chromium/base/strings/strcat.h
+++ b/chromium/base/strings/strcat.h
@@ -69,10 +69,11 @@ BASE_EXPORT string16 StrCat(span<const string16> pieces) WARN_UNUSED_RESULT;
// Initializer list forwards to the array version.
inline std::string StrCat(std::initializer_list<StringPiece> pieces) {
- return StrCat(make_span(pieces.begin(), pieces.size()));
+ return StrCat(make_span(pieces));
}
+
inline string16 StrCat(std::initializer_list<StringPiece16> pieces) {
- return StrCat(make_span(pieces.begin(), pieces.size()));
+ return StrCat(make_span(pieces));
}
// StrAppend -------------------------------------------------------------------
@@ -91,13 +92,18 @@ BASE_EXPORT void StrAppend(string16* dest, span<const string16> pieces);
// Initializer list forwards to the array version.
inline void StrAppend(std::string* dest,
std::initializer_list<StringPiece> pieces) {
- return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+ StrAppend(dest, make_span(pieces));
}
+
inline void StrAppend(string16* dest,
std::initializer_list<StringPiece16> pieces) {
- return StrAppend(dest, make_span(pieces.begin(), pieces.size()));
+ StrAppend(dest, make_span(pieces));
}
} // namespace base
+#if defined(OS_WIN)
+#include "base/strings/strcat_win.h"
+#endif
+
#endif // BASE_STRINGS_STRCAT_H_
diff --git a/chromium/base/strings/strcat_internal.h b/chromium/base/strings/strcat_internal.h
new file mode 100644
index 00000000000..f5e52f08667
--- /dev/null
+++ b/chromium/base/strings/strcat_internal.h
@@ -0,0 +1,60 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRCAT_INTERNAL_H_
+#define BASE_STRINGS_STRCAT_INTERNAL_H_
+
+#include <string>
+
+#include "base/containers/span.h"
+
+namespace base {
+
+namespace internal {
+
+// Reserves an additional amount of capacity in the given string, growing by at
+// least 2x if necessary. Used by StrAppendT().
+//
+// The "at least 2x" growing rule duplicates the exponential growth of
+// std::string. The problem is that most implementations of reserve() will grow
+// exactly to the requested amount instead of exponentially growing like would
+// happen when appending normally. If we didn't do this, an append after the
+// call to StrAppend() would definitely cause a reallocation, and loops with
+// StrAppend() calls would have O(n^2) complexity to execute. Instead, we want
+// StrAppend() to have the same semantics as std::string::append().
+template <typename String>
+void ReserveAdditionalIfNeeded(String* str,
+ typename String::size_type additional) {
+ const size_t required = str->size() + additional;
+ // Check whether we need to reserve additional capacity at all.
+ if (required <= str->capacity())
+ return;
+
+ str->reserve(std::max(required, str->capacity() * 2));
+}
+
+template <typename DestString, typename InputString>
+void StrAppendT(DestString* dest, span<const InputString> pieces) {
+ size_t additional_size = 0;
+ for (const auto& cur : pieces)
+ additional_size += cur.size();
+ ReserveAdditionalIfNeeded(dest, additional_size);
+
+ for (const auto& cur : pieces)
+ dest->append(cur.data(), cur.size());
+}
+
+template <typename StringT>
+auto StrCatT(span<const StringT> pieces) {
+ std::basic_string<typename StringT::value_type, typename StringT::traits_type>
+ result;
+ StrAppendT(&result, pieces);
+ return result;
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRCAT_INTERNAL_H_
diff --git a/chromium/base/strings/strcat_win.cc b/chromium/base/strings/strcat_win.cc
new file mode 100644
index 00000000000..ad2f2e16ab1
--- /dev/null
+++ b/chromium/base/strings/strcat_win.cc
@@ -0,0 +1,35 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/strcat_win.h"
+
+#include <string>
+
+#include "base/containers/span.h"
+#include "base/strings/strcat_internal.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+
+std::wstring StrCat(span<const WStringPiece> pieces) {
+ return internal::StrCatT(pieces);
+}
+
+std::wstring StrCat(span<const std::wstring> pieces) {
+ return internal::StrCatT(pieces);
+}
+
+void StrAppend(std::wstring* dest, span<const WStringPiece> pieces) {
+ internal::StrAppendT(dest, pieces);
+}
+
+void StrAppend(std::wstring* dest, span<const std::wstring> pieces) {
+ internal::StrAppendT(dest, pieces);
+}
+
+#endif
+
+} // namespace base
diff --git a/chromium/base/strings/strcat_win.h b/chromium/base/strings/strcat_win.h
new file mode 100644
index 00000000000..4b8f0290e51
--- /dev/null
+++ b/chromium/base/strings/strcat_win.h
@@ -0,0 +1,45 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRCAT_WIN_H_
+#define BASE_STRINGS_STRCAT_WIN_H_
+
+#include <initializer_list>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/containers/span.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// The following section contains overloads of the cross-platform APIs for
+// std::wstring and base::WStringPiece. These are only enabled if std::wstring
+// and base::string16 are distinct types, as otherwise this would result in an
+// ODR violation.
+// TODO(crbug.com/911896): Remove those guards once base::string16 is
+// std::u16string.
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT void StrAppend(std::wstring* dest, span<const WStringPiece> pieces);
+BASE_EXPORT void StrAppend(std::wstring* dest, span<const std::wstring> pieces);
+
+inline void StrAppend(std::wstring* dest,
+ std::initializer_list<WStringPiece> pieces) {
+ StrAppend(dest, make_span(pieces));
+}
+
+BASE_EXPORT std::wstring StrCat(span<const WStringPiece> pieces)
+ WARN_UNUSED_RESULT;
+BASE_EXPORT std::wstring StrCat(span<const std::wstring> pieces)
+ WARN_UNUSED_RESULT;
+
+inline std::wstring StrCat(std::initializer_list<WStringPiece> pieces) {
+ return StrCat(make_span(pieces));
+}
+#endif // defined(BASE_STRING16_IS_STD_U16STRING)
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRCAT_WIN_H_
diff --git a/chromium/base/strings/string_number_conversions.cc b/chromium/base/strings/string_number_conversions.cc
index bd0a7e435f6..e2ef8acf66f 100644
--- a/chromium/base/strings/string_number_conversions.cc
+++ b/chromium/base/strings/string_number_conversions.cc
@@ -4,458 +4,120 @@
#include "base/strings/string_number_conversions.h"
-#include <ctype.h>
-#include <errno.h>
-#include <stdlib.h>
-#include <wctype.h>
+#include <iterator>
+#include <string>
-#include <limits>
-#include <type_traits>
-
-#include "base/check_op.h"
-#include "base/no_destructor.h"
-#include "base/numerics/safe_math.h"
-#include "base/strings/string_util.h"
-#include "base/strings/utf_string_conversions.h"
-#include "base/third_party/double_conversion/double-conversion/double-conversion.h"
+#include "base/containers/span.h"
+#include "base/logging.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_number_conversions_internal.h"
+#include "base/strings/string_piece.h"
namespace base {
-namespace {
-
-template <typename STR, typename INT>
-struct IntToStringT {
- static STR IntToString(INT value) {
- // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
- // So round up to allocate 3 output characters per byte, plus 1 for '-'.
- const size_t kOutputBufSize =
- 3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
-
- // Create the string in a temporary buffer, write it back to front, and
- // then return the substr of what we ended up using.
- using CHR = typename STR::value_type;
- CHR outbuf[kOutputBufSize];
-
- // The ValueOrDie call below can never fail, because UnsignedAbs is valid
- // for all valid inputs.
- typename std::make_unsigned<INT>::type res =
- CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
-
- CHR* end = outbuf + kOutputBufSize;
- CHR* i = end;
- do {
- --i;
- DCHECK(i != outbuf);
- *i = static_cast<CHR>((res % 10) + '0');
- res /= 10;
- } while (res != 0);
- if (IsValueNegative(value)) {
- --i;
- DCHECK(i != outbuf);
- *i = static_cast<CHR>('-');
- }
- return STR(i, end);
- }
-};
-
-// Utility to convert a character to a digit in a given base
-template<typename CHAR, int BASE, bool BASE_LTE_10> class BaseCharToDigit {
-};
-
-// Faster specialization for bases <= 10
-template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, true> {
- public:
- static bool Convert(CHAR c, uint8_t* digit) {
- if (c >= '0' && c < '0' + BASE) {
- *digit = static_cast<uint8_t>(c - '0');
- return true;
- }
- return false;
- }
-};
-
-// Specialization for bases where 10 < base <= 36
-template<typename CHAR, int BASE> class BaseCharToDigit<CHAR, BASE, false> {
- public:
- static bool Convert(CHAR c, uint8_t* digit) {
- if (c >= '0' && c <= '9') {
- *digit = c - '0';
- } else if (c >= 'a' && c < 'a' + BASE - 10) {
- *digit = c - 'a' + 10;
- } else if (c >= 'A' && c < 'A' + BASE - 10) {
- *digit = c - 'A' + 10;
- } else {
- return false;
- }
- return true;
- }
-};
-
-template <int BASE, typename CHAR>
-bool CharToDigit(CHAR c, uint8_t* digit) {
- return BaseCharToDigit<CHAR, BASE, BASE <= 10>::Convert(c, digit);
-}
-
-// There is an IsUnicodeWhitespace for wchars defined in string_util.h, but it
-// is locale independent, whereas the functions we are replacing were
-// locale-dependent. TBD what is desired, but for the moment let's not
-// introduce a change in behaviour.
-template<typename CHAR> class WhitespaceHelper {
-};
-
-template<> class WhitespaceHelper<char> {
- public:
- static bool Invoke(char c) {
- return 0 != isspace(static_cast<unsigned char>(c));
- }
-};
-
-template<> class WhitespaceHelper<char16> {
- public:
- static bool Invoke(char16 c) {
- return 0 != iswspace(c);
- }
-};
-
-template<typename CHAR> bool LocalIsWhitespace(CHAR c) {
- return WhitespaceHelper<CHAR>::Invoke(c);
-}
-
-// IteratorRangeToNumberTraits should provide:
-// - a typedef for iterator_type, the iterator type used as input.
-// - a typedef for value_type, the target numeric type.
-// - static functions min, max (returning the minimum and maximum permitted
-// values)
-// - constant kBase, the base in which to interpret the input
-template<typename IteratorRangeToNumberTraits>
-class IteratorRangeToNumber {
- public:
- typedef IteratorRangeToNumberTraits traits;
- typedef typename traits::iterator_type const_iterator;
- typedef typename traits::value_type value_type;
-
- // Generalized iterator-range-to-number conversion.
- //
- static bool Invoke(const_iterator begin,
- const_iterator end,
- value_type* output) {
- bool valid = true;
-
- while (begin != end && LocalIsWhitespace(*begin)) {
- valid = false;
- ++begin;
- }
-
- if (begin != end && *begin == '-') {
- if (!std::numeric_limits<value_type>::is_signed) {
- *output = 0;
- valid = false;
- } else if (!Negative::Invoke(begin + 1, end, output)) {
- valid = false;
- }
- } else {
- if (begin != end && *begin == '+') {
- ++begin;
- }
- if (!Positive::Invoke(begin, end, output)) {
- valid = false;
- }
- }
-
- return valid;
- }
-
- private:
- // Sign provides:
- // - a static function, CheckBounds, that determines whether the next digit
- // causes an overflow/underflow
- // - a static function, Increment, that appends the next digit appropriately
- // according to the sign of the number being parsed.
- template<typename Sign>
- class Base {
- public:
- static bool Invoke(const_iterator begin, const_iterator end,
- typename traits::value_type* output) {
- *output = 0;
-
- if (begin == end) {
- return false;
- }
-
- // Note: no performance difference was found when using template
- // specialization to remove this check in bases other than 16
- if (traits::kBase == 16 && end - begin > 2 && *begin == '0' &&
- (*(begin + 1) == 'x' || *(begin + 1) == 'X')) {
- begin += 2;
- }
-
- for (const_iterator current = begin; current != end; ++current) {
- uint8_t new_digit = 0;
-
- if (!CharToDigit<traits::kBase>(*current, &new_digit)) {
- return false;
- }
-
- if (current != begin) {
- if (!Sign::CheckBounds(output, new_digit)) {
- return false;
- }
- *output *= traits::kBase;
- }
-
- Sign::Increment(new_digit, output);
- }
- return true;
- }
- };
-
- class Positive : public Base<Positive> {
- public:
- static bool CheckBounds(value_type* output, uint8_t new_digit) {
- if (*output > static_cast<value_type>(traits::max() / traits::kBase) ||
- (*output == static_cast<value_type>(traits::max() / traits::kBase) &&
- new_digit > traits::max() % traits::kBase)) {
- *output = traits::max();
- return false;
- }
- return true;
- }
- static void Increment(uint8_t increment, value_type* output) {
- *output += increment;
- }
- };
-
- class Negative : public Base<Negative> {
- public:
- static bool CheckBounds(value_type* output, uint8_t new_digit) {
- if (*output < traits::min() / traits::kBase ||
- (*output == traits::min() / traits::kBase &&
- new_digit > 0 - traits::min() % traits::kBase)) {
- *output = traits::min();
- return false;
- }
- return true;
- }
- static void Increment(uint8_t increment, value_type* output) {
- *output -= increment;
- }
- };
-};
-
-template<typename ITERATOR, typename VALUE, int BASE>
-class BaseIteratorRangeToNumberTraits {
- public:
- typedef ITERATOR iterator_type;
- typedef VALUE value_type;
- static value_type min() {
- return std::numeric_limits<value_type>::min();
- }
- static value_type max() {
- return std::numeric_limits<value_type>::max();
- }
- static const int kBase = BASE;
-};
-
-template<typename ITERATOR>
-class BaseHexIteratorRangeToIntTraits
- : public BaseIteratorRangeToNumberTraits<ITERATOR, int, 16> {
-};
-
-template <typename ITERATOR>
-class BaseHexIteratorRangeToUIntTraits
- : public BaseIteratorRangeToNumberTraits<ITERATOR, uint32_t, 16> {};
-
-template <typename ITERATOR>
-class BaseHexIteratorRangeToInt64Traits
- : public BaseIteratorRangeToNumberTraits<ITERATOR, int64_t, 16> {};
-
-template <typename ITERATOR>
-class BaseHexIteratorRangeToUInt64Traits
- : public BaseIteratorRangeToNumberTraits<ITERATOR, uint64_t, 16> {};
-
-typedef BaseHexIteratorRangeToIntTraits<StringPiece::const_iterator>
- HexIteratorRangeToIntTraits;
-
-typedef BaseHexIteratorRangeToUIntTraits<StringPiece::const_iterator>
- HexIteratorRangeToUIntTraits;
-
-typedef BaseHexIteratorRangeToInt64Traits<StringPiece::const_iterator>
- HexIteratorRangeToInt64Traits;
-
-typedef BaseHexIteratorRangeToUInt64Traits<StringPiece::const_iterator>
- HexIteratorRangeToUInt64Traits;
-
-template <typename VALUE, int BASE>
-class StringPieceToNumberTraits
- : public BaseIteratorRangeToNumberTraits<StringPiece::const_iterator,
- VALUE,
- BASE> {
-};
-
-template <typename VALUE>
-bool StringToIntImpl(StringPiece input, VALUE* output) {
- return IteratorRangeToNumber<StringPieceToNumberTraits<VALUE, 10> >::Invoke(
- input.begin(), input.end(), output);
-}
-
-template <typename VALUE, int BASE>
-class StringPiece16ToNumberTraits
- : public BaseIteratorRangeToNumberTraits<StringPiece16::const_iterator,
- VALUE,
- BASE> {
-};
-
-template <typename VALUE>
-bool String16ToIntImpl(StringPiece16 input, VALUE* output) {
- return IteratorRangeToNumber<StringPiece16ToNumberTraits<VALUE, 10> >::Invoke(
- input.begin(), input.end(), output);
-}
-
-} // namespace
-
std::string NumberToString(int value) {
- return IntToStringT<std::string, int>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(int value) {
- return IntToStringT<string16, int>::IntToString(value);
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(unsigned value) {
- return IntToStringT<std::string, unsigned>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(unsigned value) {
- return IntToStringT<string16, unsigned>::IntToString(value);
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(long value) {
- return IntToStringT<std::string, long>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(long value) {
- return IntToStringT<string16, long>::IntToString(value);
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(unsigned long value) {
- return IntToStringT<std::string, unsigned long>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(unsigned long value) {
- return IntToStringT<string16, unsigned long>::IntToString(value);
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(long long value) {
- return IntToStringT<std::string, long long>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(long long value) {
- return IntToStringT<string16, long long>::IntToString(value);
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(unsigned long long value) {
- return IntToStringT<std::string, unsigned long long>::IntToString(value);
+ return internal::IntToStringT<std::string>(value);
}
string16 NumberToString16(unsigned long long value) {
- return IntToStringT<string16, unsigned long long>::IntToString(value);
-}
-
-static const double_conversion::DoubleToStringConverter*
-GetDoubleToStringConverter() {
- static NoDestructor<double_conversion::DoubleToStringConverter> converter(
- double_conversion::DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN,
- nullptr, nullptr, 'e', -6, 12, 0, 0);
- return converter.get();
+ return internal::IntToStringT<string16>(value);
}
std::string NumberToString(double value) {
- char buffer[32];
- double_conversion::StringBuilder builder(buffer, sizeof(buffer));
- GetDoubleToStringConverter()->ToShortest(value, &builder);
- return std::string(buffer, builder.position());
+ return internal::DoubleToStringT<std::string>(value);
}
-base::string16 NumberToString16(double value) {
- char buffer[32];
- double_conversion::StringBuilder builder(buffer, sizeof(buffer));
- GetDoubleToStringConverter()->ToShortest(value, &builder);
-
- // The number will be ASCII. This creates the string using the "input
- // iterator" variant which promotes from 8-bit to 16-bit via "=".
- return base::string16(&buffer[0], &buffer[builder.position()]);
+string16 NumberToString16(double value) {
+ return internal::DoubleToStringT<string16>(value);
}
bool StringToInt(StringPiece input, int* output) {
- return StringToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToInt(StringPiece16 input, int* output) {
- return String16ToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToUint(StringPiece input, unsigned* output) {
- return StringToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToUint(StringPiece16 input, unsigned* output) {
- return String16ToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToInt64(StringPiece input, int64_t* output) {
- return StringToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToInt64(StringPiece16 input, int64_t* output) {
- return String16ToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToUint64(StringPiece input, uint64_t* output) {
- return StringToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToUint64(StringPiece16 input, uint64_t* output) {
- return String16ToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToSizeT(StringPiece input, size_t* output) {
- return StringToIntImpl(input, output);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToSizeT(StringPiece16 input, size_t* output) {
- return String16ToIntImpl(input, output);
-}
-
-template <typename STRING, typename CHAR>
-bool StringToDoubleImpl(STRING input, const CHAR* data, double* output) {
- static NoDestructor<double_conversion::StringToDoubleConverter> converter(
- double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
- double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK,
- 0.0, 0, nullptr, nullptr);
-
- int processed_characters_count;
- *output = converter->StringToDouble(data, input.size(),
- &processed_characters_count);
-
- // Cases to return false:
- // - If the input string is empty, there was nothing to parse.
- // - If the value saturated to HUGE_VAL.
- // - If the entire string was not processed, there are either characters
- // remaining in the string after a parsed number, or the string does not
- // begin with a parseable number.
- // - If the first character is a space, there was leading whitespace
- return !input.empty() && *output != HUGE_VAL && *output != -HUGE_VAL &&
- static_cast<size_t>(processed_characters_count) == input.size() &&
- !IsUnicodeWhitespace(input[0]);
+ return internal::StringToIntImpl(input, *output);
}
bool StringToDouble(StringPiece input, double* output) {
- return StringToDoubleImpl(input, input.data(), output);
+ return internal::StringToDoubleImpl(input, input.data(), *output);
}
bool StringToDouble(StringPiece16 input, double* output) {
- return StringToDoubleImpl(
- input, reinterpret_cast<const uint16_t*>(input.data()), output);
+ return internal::StringToDoubleImpl(
+ input, reinterpret_cast<const uint16_t*>(input.data()), *output);
}
std::string HexEncode(const void* bytes, size_t size) {
@@ -477,69 +139,36 @@ std::string HexEncode(base::span<const uint8_t> bytes) {
}
bool HexStringToInt(StringPiece input, int* output) {
- return IteratorRangeToNumber<HexIteratorRangeToIntTraits>::Invoke(
- input.begin(), input.end(), output);
+ return internal::HexStringToIntImpl(input, *output);
}
bool HexStringToUInt(StringPiece input, uint32_t* output) {
- return IteratorRangeToNumber<HexIteratorRangeToUIntTraits>::Invoke(
- input.begin(), input.end(), output);
+ return internal::HexStringToIntImpl(input, *output);
}
bool HexStringToInt64(StringPiece input, int64_t* output) {
- return IteratorRangeToNumber<HexIteratorRangeToInt64Traits>::Invoke(
- input.begin(), input.end(), output);
+ return internal::HexStringToIntImpl(input, *output);
}
bool HexStringToUInt64(StringPiece input, uint64_t* output) {
- return IteratorRangeToNumber<HexIteratorRangeToUInt64Traits>::Invoke(
- input.begin(), input.end(), output);
-}
-
-template <typename Container>
-static bool HexStringToByteContainer(StringPiece input, Container* output) {
- DCHECK_EQ(output->size(), 0u);
- size_t count = input.size();
- if (count == 0 || (count % 2) != 0)
- return false;
- for (uintptr_t i = 0; i < count / 2; ++i) {
- uint8_t msb = 0; // most significant 4 bits
- uint8_t lsb = 0; // least significant 4 bits
- if (!CharToDigit<16>(input[i * 2], &msb) ||
- !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
- return false;
- }
- output->push_back((msb << 4) | lsb);
- }
- return true;
+ return internal::HexStringToIntImpl(input, *output);
}
bool HexStringToBytes(StringPiece input, std::vector<uint8_t>* output) {
- return HexStringToByteContainer(input, output);
+ DCHECK(output->empty());
+ return internal::HexStringToByteContainer(input, std::back_inserter(*output));
}
bool HexStringToString(StringPiece input, std::string* output) {
- return HexStringToByteContainer(input, output);
+ DCHECK(output->empty());
+ return internal::HexStringToByteContainer(input, std::back_inserter(*output));
}
bool HexStringToSpan(StringPiece input, base::span<uint8_t> output) {
- size_t count = input.size();
- if (count == 0 || (count % 2) != 0)
+ if (input.size() / 2 != output.size())
return false;
- if (count / 2 != output.size())
- return false;
-
- for (uintptr_t i = 0; i < count / 2; ++i) {
- uint8_t msb = 0; // most significant 4 bits
- uint8_t lsb = 0; // least significant 4 bits
- if (!CharToDigit<16>(input[i * 2], &msb) ||
- !CharToDigit<16>(input[i * 2 + 1], &lsb)) {
- return false;
- }
- output[i] = (msb << 4) | lsb;
- }
- return true;
+ return internal::HexStringToByteContainer(input, output.begin());
}
} // namespace base
diff --git a/chromium/base/strings/string_number_conversions.h b/chromium/base/strings/string_number_conversions.h
index 87df24e21c9..f001641d186 100644
--- a/chromium/base/strings/string_number_conversions.h
+++ b/chromium/base/strings/string_number_conversions.h
@@ -20,10 +20,6 @@
// ----------------------------------------------------------------------------
// IMPORTANT MESSAGE FROM YOUR SPONSOR
//
-// This file contains no "wstring" variants. New code should use string16. If
-// you need to make old code work, use the UTF8 version and convert. Please do
-// not add wstring variants.
-//
// Please do not add "convenience" functions for converting strings to integers
// that return the value and ignore success/failure. That encourages people to
// write code that doesn't properly handle the error conditions.
@@ -154,4 +150,8 @@ BASE_EXPORT bool HexStringToSpan(StringPiece input, base::span<uint8_t> output);
} // namespace base
+#if defined(OS_WIN)
+#include "base/strings/string_number_conversions_win.h"
+#endif
+
#endif // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_H_
diff --git a/chromium/base/strings/string_number_conversions_internal.h b/chromium/base/strings/string_number_conversions_internal.h
new file mode 100644
index 00000000000..1aa0c74c0b5
--- /dev/null
+++ b/chromium/base/strings/string_number_conversions_internal.h
@@ -0,0 +1,303 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_NUMBER_CONVERSIONS_INTERNAL_H_
+#define BASE_STRINGS_STRING_NUMBER_CONVERSIONS_INTERNAL_H_
+
+#include <ctype.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <wctype.h>
+
+#include <limits>
+
+#include "base/check_op.h"
+#include "base/logging.h"
+#include "base/no_destructor.h"
+#include "base/numerics/safe_math.h"
+#include "base/strings/string_util.h"
+#include "base/third_party/double_conversion/double-conversion/double-conversion.h"
+
+namespace base {
+
+namespace internal {
+
+template <typename STR, typename INT>
+static STR IntToStringT(INT value) {
+ // log10(2) ~= 0.3 bytes needed per bit or per byte log10(2**8) ~= 2.4.
+ // So round up to allocate 3 output characters per byte, plus 1 for '-'.
+ const size_t kOutputBufSize =
+ 3 * sizeof(INT) + std::numeric_limits<INT>::is_signed;
+
+ // Create the string in a temporary buffer, write it back to front, and
+ // then return the substr of what we ended up using.
+ using CHR = typename STR::value_type;
+ CHR outbuf[kOutputBufSize];
+
+ // The ValueOrDie call below can never fail, because UnsignedAbs is valid
+ // for all valid inputs.
+ std::make_unsigned_t<INT> res =
+ CheckedNumeric<INT>(value).UnsignedAbs().ValueOrDie();
+
+ CHR* end = outbuf + kOutputBufSize;
+ CHR* i = end;
+ do {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>((res % 10) + '0');
+ res /= 10;
+ } while (res != 0);
+ if (IsValueNegative(value)) {
+ --i;
+ DCHECK(i != outbuf);
+ *i = static_cast<CHR>('-');
+ }
+ return STR(i, end);
+}
+
+// Utility to convert a character to a digit in a given base
+template <int BASE, typename CHAR>
+Optional<uint8_t> CharToDigit(CHAR c) {
+ static_assert(1 <= BASE && BASE <= 36, "BASE needs to be in [1, 36]");
+ if (c >= '0' && c < '0' + std::min(BASE, 10))
+ return c - '0';
+
+ if (c >= 'a' && c < 'a' + BASE - 10)
+ return c - 'a' + 10;
+
+ if (c >= 'A' && c < 'A' + BASE - 10)
+ return c - 'A' + 10;
+
+ return base::nullopt;
+}
+
+// There is an IsUnicodeWhitespace for wchars defined in string_util.h, but it
+// is locale independent, whereas the functions we are replacing were
+// locale-dependent. TBD what is desired, but for the moment let's not
+// introduce a change in behaviour.
+template <typename CHAR>
+class WhitespaceHelper {};
+
+template <>
+class WhitespaceHelper<char> {
+ public:
+ static bool Invoke(char c) {
+ return 0 != isspace(static_cast<unsigned char>(c));
+ }
+};
+
+template <>
+class WhitespaceHelper<char16> {
+ public:
+ static bool Invoke(char16 c) { return 0 != iswspace(c); }
+};
+
+template <typename CHAR>
+bool LocalIsWhitespace(CHAR c) {
+ return WhitespaceHelper<CHAR>::Invoke(c);
+}
+
+template <typename Number, int kBase>
+class StringToNumberParser {
+ public:
+ struct Result {
+ Number value = 0;
+ bool valid = false;
+ };
+
+ static constexpr Number kMin = std::numeric_limits<Number>::min();
+ static constexpr Number kMax = std::numeric_limits<Number>::max();
+
+ // Sign provides:
+ // - a static function, CheckBounds, that determines whether the next digit
+ // causes an overflow/underflow
+ // - a static function, Increment, that appends the next digit appropriately
+ // according to the sign of the number being parsed.
+ template <typename Sign>
+ class Base {
+ public:
+ template <typename Iter>
+ static Result Invoke(Iter begin, Iter end) {
+ Number value = 0;
+
+ if (begin == end) {
+ return {value, false};
+ }
+
+ // Note: no performance difference was found when using template
+ // specialization to remove this check in bases other than 16
+ if (kBase == 16 && end - begin > 2 && *begin == '0' &&
+ (*(begin + 1) == 'x' || *(begin + 1) == 'X')) {
+ begin += 2;
+ }
+
+ for (Iter current = begin; current != end; ++current) {
+ Optional<uint8_t> new_digit = CharToDigit<kBase>(*current);
+
+ if (!new_digit) {
+ return {value, false};
+ }
+
+ if (current != begin) {
+ Result result = Sign::CheckBounds(value, *new_digit);
+ if (!result.valid)
+ return result;
+
+ value *= kBase;
+ }
+
+ value = Sign::Increment(value, *new_digit);
+ }
+ return {value, true};
+ }
+ };
+
+ class Positive : public Base<Positive> {
+ public:
+ static Result CheckBounds(Number value, uint8_t new_digit) {
+ if (value > static_cast<Number>(kMax / kBase) ||
+ (value == static_cast<Number>(kMax / kBase) &&
+ new_digit > kMax % kBase)) {
+ return {kMax, false};
+ }
+ return {value, true};
+ }
+ static Number Increment(Number lhs, uint8_t rhs) { return lhs + rhs; }
+ };
+
+ class Negative : public Base<Negative> {
+ public:
+ static Result CheckBounds(Number value, uint8_t new_digit) {
+ if (value < kMin / kBase ||
+ (value == kMin / kBase && new_digit > 0 - kMin % kBase)) {
+ return {kMin, false};
+ }
+ return {value, true};
+ }
+ static Number Increment(Number lhs, uint8_t rhs) { return lhs - rhs; }
+ };
+};
+
+template <typename Number, int kBase, typename Str>
+auto StringToNumber(BasicStringPiece<Str> input) {
+ using Parser = StringToNumberParser<Number, kBase>;
+ using Result = typename Parser::Result;
+
+ bool has_leading_whitespace = false;
+ auto begin = input.begin();
+ auto end = input.end();
+
+ while (begin != end && LocalIsWhitespace(*begin)) {
+ has_leading_whitespace = true;
+ ++begin;
+ }
+
+ if (begin != end && *begin == '-') {
+ if (!std::numeric_limits<Number>::is_signed) {
+ return Result{0, false};
+ }
+
+ Result result = Parser::Negative::Invoke(begin + 1, end);
+ result.valid &= !has_leading_whitespace;
+ return result;
+ }
+
+ if (begin != end && *begin == '+') {
+ ++begin;
+ }
+
+ Result result = Parser::Positive::Invoke(begin, end);
+ result.valid &= !has_leading_whitespace;
+ return result;
+}
+
+template <typename STR, typename VALUE>
+bool StringToIntImpl(BasicStringPiece<STR> input, VALUE& output) {
+ auto result = StringToNumber<VALUE, 10>(input);
+ output = result.value;
+ return result.valid;
+}
+
+template <typename STR, typename VALUE>
+bool HexStringToIntImpl(BasicStringPiece<STR> input, VALUE& output) {
+ auto result = StringToNumber<VALUE, 16>(input);
+ output = result.value;
+ return result.valid;
+}
+
+static const double_conversion::DoubleToStringConverter*
+GetDoubleToStringConverter() {
+ static NoDestructor<double_conversion::DoubleToStringConverter> converter(
+ double_conversion::DoubleToStringConverter::EMIT_POSITIVE_EXPONENT_SIGN,
+ nullptr, nullptr, 'e', -6, 12, 0, 0);
+ return converter.get();
+}
+
+// Converts a given (data, size) pair to a desired string type. For
+// performance reasons, this dispatches to a different constructor if the
+// passed-in data matches the string's value_type.
+template <typename StringT>
+StringT ToString(const typename StringT::value_type* data, size_t size) {
+ return StringT(data, size);
+}
+
+template <typename StringT, typename CharT>
+StringT ToString(const CharT* data, size_t size) {
+ return StringT(data, data + size);
+}
+
+template <typename StringT>
+StringT DoubleToStringT(double value) {
+ char buffer[32];
+ double_conversion::StringBuilder builder(buffer, sizeof(buffer));
+ GetDoubleToStringConverter()->ToShortest(value, &builder);
+ return ToString<StringT>(buffer, builder.position());
+}
+
+template <typename STRING, typename CHAR>
+bool StringToDoubleImpl(STRING input, const CHAR* data, double& output) {
+ static NoDestructor<double_conversion::StringToDoubleConverter> converter(
+ double_conversion::StringToDoubleConverter::ALLOW_LEADING_SPACES |
+ double_conversion::StringToDoubleConverter::ALLOW_TRAILING_JUNK,
+ 0.0, 0, nullptr, nullptr);
+
+ int processed_characters_count;
+ output = converter->StringToDouble(data, input.size(),
+ &processed_characters_count);
+
+ // Cases to return false:
+ // - If the input string is empty, there was nothing to parse.
+ // - If the value saturated to HUGE_VAL.
+ // - If the entire string was not processed, there are either characters
+ // remaining in the string after a parsed number, or the string does not
+ // begin with a parseable number.
+ // - If the first character is a space, there was leading whitespace
+ return !input.empty() && output != HUGE_VAL && output != -HUGE_VAL &&
+ static_cast<size_t>(processed_characters_count) == input.size() &&
+ !IsUnicodeWhitespace(input[0]);
+}
+
+template <typename OutIter>
+static bool HexStringToByteContainer(StringPiece input, OutIter output) {
+ size_t count = input.size();
+ if (count == 0 || (count % 2) != 0)
+ return false;
+ for (uintptr_t i = 0; i < count / 2; ++i) {
+ // most significant 4 bits
+ Optional<uint8_t> msb = CharToDigit<16>(input[i * 2]);
+ // least significant 4 bits
+ Optional<uint8_t> lsb = CharToDigit<16>(input[i * 2 + 1]);
+ if (!msb || !lsb) {
+ return false;
+ }
+ *(output++) = (*msb << 4) | *lsb;
+ }
+ return true;
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_INTERNAL_H_
diff --git a/chromium/base/strings/string_number_conversions_win.cc b/chromium/base/strings/string_number_conversions_win.cc
new file mode 100644
index 00000000000..8a1a3bea917
--- /dev/null
+++ b/chromium/base/strings/string_number_conversions_win.cc
@@ -0,0 +1,79 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_number_conversions_win.h"
+
+#include <string>
+
+#include "base/strings/string_number_conversions_internal.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+std::wstring NumberToWString(int value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(unsigned value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(long value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(unsigned long value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(long long value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(unsigned long long value) {
+ return internal::IntToStringT<std::wstring>(value);
+}
+
+std::wstring NumberToWString(double value) {
+ return internal::DoubleToStringT<std::wstring>(value);
+}
+
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+namespace internal {
+
+template <>
+class WhitespaceHelper<wchar_t> {
+ public:
+ static bool Invoke(wchar_t c) { return 0 != iswspace(c); }
+};
+
+} // namespace internal
+
+bool StringToInt(WStringPiece input, int* output) {
+ return internal::StringToIntImpl(input, *output);
+}
+
+bool StringToUint(WStringPiece input, unsigned* output) {
+ return internal::StringToIntImpl(input, *output);
+}
+
+bool StringToInt64(WStringPiece input, int64_t* output) {
+ return internal::StringToIntImpl(input, *output);
+}
+
+bool StringToUint64(WStringPiece input, uint64_t* output) {
+ return internal::StringToIntImpl(input, *output);
+}
+
+bool StringToSizeT(WStringPiece input, size_t* output) {
+ return internal::StringToIntImpl(input, *output);
+}
+
+bool StringToDouble(WStringPiece input, double* output) {
+ return internal::StringToDoubleImpl(
+ input, reinterpret_cast<const uint16_t*>(input.data()), *output);
+}
+#endif // defined(BASE_STRING16_IS_STD_U16STRING)
+
+} // namespace base
diff --git a/chromium/base/strings/string_number_conversions_win.h b/chromium/base/strings/string_number_conversions_win.h
new file mode 100644
index 00000000000..5abcc291130
--- /dev/null
+++ b/chromium/base/strings/string_number_conversions_win.h
@@ -0,0 +1,40 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_NUMBER_CONVERSIONS_WIN_H_
+#define BASE_STRINGS_STRING_NUMBER_CONVERSIONS_WIN_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+BASE_EXPORT std::wstring NumberToWString(int value);
+BASE_EXPORT std::wstring NumberToWString(unsigned int value);
+BASE_EXPORT std::wstring NumberToWString(long value);
+BASE_EXPORT std::wstring NumberToWString(unsigned long value);
+BASE_EXPORT std::wstring NumberToWString(long long value);
+BASE_EXPORT std::wstring NumberToWString(unsigned long long value);
+BASE_EXPORT std::wstring NumberToWString(double value);
+
+// The following section contains overloads of the cross-platform APIs for
+// std::wstring and base::WStringPiece. These are only enabled if std::wstring
+// and base::string16 are distinct types, as otherwise this would result in an
+// ODR violation.
+// TODO(crbug.com/911896): Remove those guards once base::string16 is
+// std::u16string.
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT bool StringToInt(WStringPiece input, int* output);
+BASE_EXPORT bool StringToUint(WStringPiece input, unsigned* output);
+BASE_EXPORT bool StringToInt64(WStringPiece input, int64_t* output);
+BASE_EXPORT bool StringToUint64(WStringPiece input, uint64_t* output);
+BASE_EXPORT bool StringToSizeT(WStringPiece input, size_t* output);
+BASE_EXPORT bool StringToDouble(WStringPiece input, double* output);
+#endif // defined(BASE_STRING16_IS_STD_U16STRING)
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_NUMBER_CONVERSIONS_WIN_H_
diff --git a/chromium/base/strings/string_piece.h b/chromium/base/strings/string_piece.h
index bc24b4d1e90..f60af47177b 100644
--- a/chromium/base/strings/string_piece.h
+++ b/chromium/base/strings/string_piece.h
@@ -25,11 +25,12 @@
#include <stddef.h>
#include <iosfwd>
+#include <ostream>
#include <string>
#include <type_traits>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/strings/char_traits.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece_forward.h"
@@ -148,6 +149,7 @@ template <typename STRING_TYPE> class BasicStringPiece {
public:
// Standard STL container boilerplate.
typedef size_t size_type;
+ typedef typename STRING_TYPE::traits_type traits_type;
typedef typename STRING_TYPE::value_type value_type;
typedef const value_type* pointer;
typedef const value_type& reference;
@@ -162,7 +164,7 @@ template <typename STRING_TYPE> class BasicStringPiece {
// We provide non-explicit singleton constructors so users can pass
// in a "const char*" or a "string" wherever a "StringPiece" is
// expected (likewise for char16, string16, StringPiece16).
- constexpr BasicStringPiece() : ptr_(NULL), length_(0) {}
+ constexpr BasicStringPiece() : ptr_(nullptr), length_(0) {}
// TODO(crbug.com/1049498): Construction from nullptr is not allowed for
// std::basic_string_view, so remove the special handling for it.
// Note: This doesn't just use STRING_TYPE::traits_type::length(), since that
diff --git a/chromium/base/strings/string_split.cc b/chromium/base/strings/string_split.cc
index a968e802e8c..4ba0412cc2c 100644
--- a/chromium/base/strings/string_split.cc
+++ b/chromium/base/strings/string_split.cc
@@ -7,6 +7,7 @@
#include <stddef.h>
#include "base/logging.h"
+#include "base/strings/string_split_internal.h"
#include "base/strings/string_util.h"
#include "base/third_party/icu/icu_utf.h"
@@ -14,56 +15,6 @@ namespace base {
namespace {
-// Returns either the ASCII or UTF-16 whitespace.
-template<typename Str> BasicStringPiece<Str> WhitespaceForType();
-#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
-template <>
-WStringPiece WhitespaceForType<std::wstring>() {
- return kWhitespaceWide;
-}
-#endif
-
-template<> StringPiece16 WhitespaceForType<string16>() {
- return kWhitespaceUTF16;
-}
-template<> StringPiece WhitespaceForType<std::string>() {
- return kWhitespaceASCII;
-}
-
-// General string splitter template. Can take 8- or 16-bit input, can produce
-// the corresponding string or StringPiece output.
-template <typename OutputStringType, typename Str>
-static std::vector<OutputStringType> SplitStringT(
- BasicStringPiece<Str> str,
- BasicStringPiece<Str> delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- std::vector<OutputStringType> result;
- if (str.empty())
- return result;
-
- size_t start = 0;
- while (start != Str::npos) {
- size_t end = str.find_first_of(delimiter, start);
-
- BasicStringPiece<Str> piece;
- if (end == Str::npos) {
- piece = str.substr(start);
- start = Str::npos;
- } else {
- piece = str.substr(start, end - start);
- start = end + 1;
- }
-
- if (whitespace == TRIM_WHITESPACE)
- piece = TrimString(piece, WhitespaceForType<Str>(), TRIM_ALL);
-
- if (result_type == SPLIT_WANT_ALL || !piece.empty())
- result.emplace_back(piece);
- }
- return result;
-}
-
bool AppendStringKeyValue(StringPiece input,
char delimiter,
StringPairs* result) {
@@ -94,67 +45,38 @@ bool AppendStringKeyValue(StringPiece input,
return true;
}
-template <typename OutputStringType, typename Str>
-std::vector<OutputStringType> SplitStringUsingSubstrT(
- BasicStringPiece<Str> input,
- BasicStringPiece<Str> delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- using Piece = BasicStringPiece<Str>;
- using size_type = typename Piece::size_type;
-
- std::vector<OutputStringType> result;
- if (delimiter.size() == 0) {
- result.emplace_back(input);
- return result;
- }
-
- for (size_type begin_index = 0, end_index = 0; end_index != Piece::npos;
- begin_index = end_index + delimiter.size()) {
- end_index = input.find(delimiter, begin_index);
- Piece term = end_index == Piece::npos
- ? input.substr(begin_index)
- : input.substr(begin_index, end_index - begin_index);
-
- if (whitespace == TRIM_WHITESPACE)
- term = TrimString(term, WhitespaceForType<Str>(), TRIM_ALL);
-
- if (result_type == SPLIT_WANT_ALL || !term.empty())
- result.emplace_back(term);
- }
-
- return result;
-}
-
} // namespace
std::vector<std::string> SplitString(StringPiece input,
StringPiece separators,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringT<std::string>(input, separators, whitespace, result_type);
+ return internal::SplitStringT<std::string>(input, separators, whitespace,
+ result_type);
}
std::vector<string16> SplitString(StringPiece16 input,
StringPiece16 separators,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringT<string16>(input, separators, whitespace, result_type);
+ return internal::SplitStringT<string16>(input, separators, whitespace,
+ result_type);
}
std::vector<StringPiece> SplitStringPiece(StringPiece input,
StringPiece separators,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringT<StringPiece>(input, separators, whitespace, result_type);
+ return internal::SplitStringT<StringPiece>(input, separators, whitespace,
+ result_type);
}
std::vector<StringPiece16> SplitStringPiece(StringPiece16 input,
StringPiece16 separators,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringT<StringPiece16>(input, separators, whitespace,
- result_type);
+ return internal::SplitStringT<StringPiece16>(input, separators, whitespace,
+ result_type);
}
bool SplitStringIntoKeyValuePairs(StringPiece input,
@@ -192,16 +114,16 @@ std::vector<string16> SplitStringUsingSubstr(StringPiece16 input,
StringPiece16 delimiter,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringUsingSubstrT<string16>(input, delimiter, whitespace,
- result_type);
+ return internal::SplitStringUsingSubstrT<string16>(input, delimiter,
+ whitespace, result_type);
}
std::vector<std::string> SplitStringUsingSubstr(StringPiece input,
StringPiece delimiter,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringUsingSubstrT<std::string>(input, delimiter, whitespace,
- result_type);
+ return internal::SplitStringUsingSubstrT<std::string>(
+ input, delimiter, whitespace, result_type);
}
std::vector<StringPiece16> SplitStringPieceUsingSubstr(
@@ -210,8 +132,8 @@ std::vector<StringPiece16> SplitStringPieceUsingSubstr(
WhitespaceHandling whitespace,
SplitResult result_type) {
std::vector<StringPiece16> result;
- return SplitStringUsingSubstrT<StringPiece16>(input, delimiter, whitespace,
- result_type);
+ return internal::SplitStringUsingSubstrT<StringPiece16>(
+ input, delimiter, whitespace, result_type);
}
std::vector<StringPiece> SplitStringPieceUsingSubstr(
@@ -219,41 +141,8 @@ std::vector<StringPiece> SplitStringPieceUsingSubstr(
StringPiece delimiter,
WhitespaceHandling whitespace,
SplitResult result_type) {
- return SplitStringUsingSubstrT<StringPiece>(input, delimiter, whitespace,
- result_type);
-}
-
-#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
-std::vector<std::wstring> SplitString(WStringPiece input,
- WStringPiece separators,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- return SplitStringT<std::wstring>(input, separators, whitespace, result_type);
-}
-
-std::vector<WStringPiece> SplitStringPiece(WStringPiece input,
- WStringPiece separators,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- return SplitStringT<WStringPiece>(input, separators, whitespace, result_type);
-}
-
-std::vector<std::wstring> SplitStringUsingSubstr(WStringPiece input,
- WStringPiece delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- return SplitStringUsingSubstrT<std::wstring>(input, delimiter, whitespace,
- result_type);
-}
-
-std::vector<WStringPiece> SplitStringPieceUsingSubstr(
- WStringPiece input,
- WStringPiece delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) {
- return SplitStringUsingSubstrT<WStringPiece>(input, delimiter, whitespace,
- result_type);
+ return internal::SplitStringUsingSubstrT<StringPiece>(
+ input, delimiter, whitespace, result_type);
}
-#endif
} // namespace base
diff --git a/chromium/base/strings/string_split.h b/chromium/base/strings/string_split.h
index efa8b199fe0..73c15d79f1b 100644
--- a/chromium/base/strings/string_split.h
+++ b/chromium/base/strings/string_split.h
@@ -138,32 +138,10 @@ BASE_EXPORT std::vector<StringPiece> SplitStringPieceUsingSubstr(
WhitespaceHandling whitespace,
SplitResult result_type) WARN_UNUSED_RESULT;
-#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
-BASE_EXPORT std::vector<std::wstring> SplitString(WStringPiece input,
- WStringPiece separators,
- WhitespaceHandling whitespace,
- SplitResult result_type)
- WARN_UNUSED_RESULT;
-
-BASE_EXPORT std::vector<WStringPiece> SplitStringPiece(
- WStringPiece input,
- WStringPiece separators,
- WhitespaceHandling whitespace,
- SplitResult result_type) WARN_UNUSED_RESULT;
-
-BASE_EXPORT std::vector<std::wstring> SplitStringUsingSubstr(
- WStringPiece input,
- WStringPiece delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) WARN_UNUSED_RESULT;
+} // namespace base
-BASE_EXPORT std::vector<WStringPiece> SplitStringPieceUsingSubstr(
- WStringPiece input,
- WStringPiece delimiter,
- WhitespaceHandling whitespace,
- SplitResult result_type) WARN_UNUSED_RESULT;
+#if defined(OS_WIN)
+#include "base/strings/string_split_win.h"
#endif
-} // namespace base
-
#endif // BASE_STRINGS_STRING_SPLIT_H_
diff --git a/chromium/base/strings/string_split_internal.h b/chromium/base/strings/string_split_internal.h
new file mode 100644
index 00000000000..71d8030b3d2
--- /dev/null
+++ b/chromium/base/strings/string_split_internal.h
@@ -0,0 +1,100 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_SPLIT_INTERNAL_H_
+#define BASE_STRINGS_STRING_SPLIT_INTERNAL_H_
+
+#include <vector>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace internal {
+
+// Returns either the ASCII or UTF-16 whitespace.
+template <typename Str>
+BasicStringPiece<Str> WhitespaceForType();
+
+template <>
+inline StringPiece16 WhitespaceForType<string16>() {
+ return kWhitespaceUTF16;
+}
+template <>
+inline StringPiece WhitespaceForType<std::string>() {
+ return kWhitespaceASCII;
+}
+
+// General string splitter template. Can take 8- or 16-bit input, can produce
+// the corresponding string or StringPiece output.
+template <typename OutputStringType, typename Str>
+static std::vector<OutputStringType> SplitStringT(
+ BasicStringPiece<Str> str,
+ BasicStringPiece<Str> delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ std::vector<OutputStringType> result;
+ if (str.empty())
+ return result;
+
+ size_t start = 0;
+ while (start != Str::npos) {
+ size_t end = str.find_first_of(delimiter, start);
+
+ BasicStringPiece<Str> piece;
+ if (end == Str::npos) {
+ piece = str.substr(start);
+ start = Str::npos;
+ } else {
+ piece = str.substr(start, end - start);
+ start = end + 1;
+ }
+
+ if (whitespace == TRIM_WHITESPACE)
+ piece = TrimString(piece, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !piece.empty())
+ result.emplace_back(piece);
+ }
+ return result;
+}
+
+template <typename OutputStringType, typename Str>
+std::vector<OutputStringType> SplitStringUsingSubstrT(
+ BasicStringPiece<Str> input,
+ BasicStringPiece<Str> delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ using Piece = BasicStringPiece<Str>;
+ using size_type = typename Piece::size_type;
+
+ std::vector<OutputStringType> result;
+ if (delimiter.size() == 0) {
+ result.emplace_back(input);
+ return result;
+ }
+
+ for (size_type begin_index = 0, end_index = 0; end_index != Piece::npos;
+ begin_index = end_index + delimiter.size()) {
+ end_index = input.find(delimiter, begin_index);
+ Piece term = end_index == Piece::npos
+ ? input.substr(begin_index)
+ : input.substr(begin_index, end_index - begin_index);
+
+ if (whitespace == TRIM_WHITESPACE)
+ term = TrimString(term, WhitespaceForType<Str>(), TRIM_ALL);
+
+ if (result_type == SPLIT_WANT_ALL || !term.empty())
+ result.emplace_back(term);
+ }
+
+ return result;
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_SPLIT_INTERNAL_H_
diff --git a/chromium/base/strings/string_split_win.cc b/chromium/base/strings/string_split_win.cc
new file mode 100644
index 00000000000..91184bd058e
--- /dev/null
+++ b/chromium/base/strings/string_split_win.cc
@@ -0,0 +1,59 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_split_win.h"
+
+#include <string>
+#include <vector>
+
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split_internal.h"
+
+namespace base {
+
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+namespace internal {
+
+template <>
+inline WStringPiece WhitespaceForType<std::wstring>() {
+ return kWhitespaceWide;
+}
+
+} // namespace internal
+
+std::vector<std::wstring> SplitString(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return internal::SplitStringT<std::wstring>(input, separators, whitespace,
+ result_type);
+}
+
+std::vector<WStringPiece> SplitStringPiece(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return internal::SplitStringT<WStringPiece>(input, separators, whitespace,
+ result_type);
+}
+
+std::vector<std::wstring> SplitStringUsingSubstr(WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return internal::SplitStringUsingSubstrT<std::wstring>(
+ input, delimiter, whitespace, result_type);
+}
+
+std::vector<WStringPiece> SplitStringPieceUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) {
+ return internal::SplitStringUsingSubstrT<WStringPiece>(
+ input, delimiter, whitespace, result_type);
+}
+#endif
+
+} // namespace base
diff --git a/chromium/base/strings/string_split_win.h b/chromium/base/strings/string_split_win.h
new file mode 100644
index 00000000000..51627d9eeb8
--- /dev/null
+++ b/chromium/base/strings/string_split_win.h
@@ -0,0 +1,53 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_SPLIT_WIN_H_
+#define BASE_STRINGS_STRING_SPLIT_WIN_H_
+
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/compiler_specific.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_split.h"
+
+namespace base {
+
+// The following section contains overloads of the cross-platform APIs for
+// std::wstring and base::WStringPiece. These are only enabled if std::wstring
+// and base::string16 are distinct types, as otherwise this would result in an
+// ODR violation.
+// TODO(crbug.com/911896): Remove those guards once base::string16 is
+// std::u16string.
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT std::vector<std::wstring> SplitString(WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type)
+ WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<WStringPiece> SplitStringPiece(
+ WStringPiece input,
+ WStringPiece separators,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<std::wstring> SplitStringUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+
+BASE_EXPORT std::vector<WStringPiece> SplitStringPieceUsingSubstr(
+ WStringPiece input,
+ WStringPiece delimiter,
+ WhitespaceHandling whitespace,
+ SplitResult result_type) WARN_UNUSED_RESULT;
+#endif
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_SPLIT_WIN_H_
diff --git a/chromium/base/strings/string_util.cc b/chromium/base/strings/string_util.cc
index 924455491a6..a883c97eca4 100644
--- a/chromium/base/strings/string_util.cc
+++ b/chromium/base/strings/string_util.cc
@@ -18,11 +18,13 @@
#include <algorithm>
#include <limits>
+#include <type_traits>
#include <vector>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/no_destructor.h"
#include "base/stl_util.h"
+#include "base/strings/string_util_internal.h"
#include "base/strings/utf_string_conversion_utils.h"
#include "base/strings/utf_string_conversions.h"
#include "base/third_party/icu/icu_utf.h"
@@ -30,60 +32,6 @@
namespace base {
-namespace {
-
-// Used by ReplaceStringPlaceholders to track the position in the string of
-// replaced parameters.
-struct ReplacementOffset {
- ReplacementOffset(uintptr_t parameter, size_t offset)
- : parameter(parameter),
- offset(offset) {}
-
- // Index of the parameter.
- uintptr_t parameter;
-
- // Starting position in the string.
- size_t offset;
-};
-
-static bool CompareParameter(const ReplacementOffset& elem1,
- const ReplacementOffset& elem2) {
- return elem1.parameter < elem2.parameter;
-}
-
-// Assuming that a pointer is the size of a "machine word", then
-// uintptr_t is an integer type that is also a machine word.
-using MachineWord = uintptr_t;
-
-inline bool IsMachineWordAligned(const void* pointer) {
- return !(reinterpret_cast<MachineWord>(pointer) & (sizeof(MachineWord) - 1));
-}
-
-template <typename CharacterType>
-struct NonASCIIMask;
-template <>
-struct NonASCIIMask<char> {
- static constexpr MachineWord value() {
- return static_cast<MachineWord>(0x8080808080808080ULL);
- }
-};
-template <>
-struct NonASCIIMask<char16> {
- static constexpr MachineWord value() {
- return static_cast<MachineWord>(0xFF80FF80FF80FF80ULL);
- }
-};
-#if defined(WCHAR_T_IS_UTF32)
-template <>
-struct NonASCIIMask<wchar_t> {
- static constexpr MachineWord value() {
- return static_cast<MachineWord>(0xFFFFFF80FFFFFF80ULL);
- }
-};
-#endif // WCHAR_T_IS_UTF32
-
-} // namespace
-
bool IsWprintfFormatPortable(const wchar_t* format) {
for (const wchar_t* position = format; *position != '\0'; ++position) {
if (*position == '%') {
@@ -119,89 +67,38 @@ bool IsWprintfFormatPortable(const wchar_t* format) {
return true;
}
-namespace {
-
-template<typename StringType>
-StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
- StringType ret;
- ret.reserve(str.size());
- for (size_t i = 0; i < str.size(); i++)
- ret.push_back(ToLowerASCII(str[i]));
- return ret;
-}
-
-template<typename StringType>
-StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
- StringType ret;
- ret.reserve(str.size());
- for (size_t i = 0; i < str.size(); i++)
- ret.push_back(ToUpperASCII(str[i]));
- return ret;
-}
-
-} // namespace
-
std::string ToLowerASCII(StringPiece str) {
- return ToLowerASCIIImpl<std::string>(str);
+ return internal::ToLowerASCIIImpl(str);
}
string16 ToLowerASCII(StringPiece16 str) {
- return ToLowerASCIIImpl<string16>(str);
+ return internal::ToLowerASCIIImpl(str);
}
std::string ToUpperASCII(StringPiece str) {
- return ToUpperASCIIImpl<std::string>(str);
+ return internal::ToUpperASCIIImpl(str);
}
string16 ToUpperASCII(StringPiece16 str) {
- return ToUpperASCIIImpl<string16>(str);
-}
-
-template<class StringType>
-int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
- BasicStringPiece<StringType> b) {
- // Find the first characters that aren't equal and compare them. If the end
- // of one of the strings is found before a nonequal character, the lengths
- // of the strings are compared.
- size_t i = 0;
- while (i < a.length() && i < b.length()) {
- typename StringType::value_type lower_a = ToLowerASCII(a[i]);
- typename StringType::value_type lower_b = ToLowerASCII(b[i]);
- if (lower_a < lower_b)
- return -1;
- if (lower_a > lower_b)
- return 1;
- i++;
- }
-
- // End of one string hit before finding a different character. Expect the
- // common case to be "strings equal" at this point so check that first.
- if (a.length() == b.length())
- return 0;
-
- if (a.length() < b.length())
- return -1;
- return 1;
+ return internal::ToUpperASCIIImpl(str);
}
int CompareCaseInsensitiveASCII(StringPiece a, StringPiece b) {
- return CompareCaseInsensitiveASCIIT<std::string>(a, b);
+ return internal::CompareCaseInsensitiveASCIIT(a, b);
}
int CompareCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
- return CompareCaseInsensitiveASCIIT<string16>(a, b);
+ return internal::CompareCaseInsensitiveASCIIT(a, b);
}
bool EqualsCaseInsensitiveASCII(StringPiece a, StringPiece b) {
- if (a.length() != b.length())
- return false;
- return CompareCaseInsensitiveASCIIT<std::string>(a, b) == 0;
+ return a.size() == b.size() &&
+ internal::CompareCaseInsensitiveASCIIT(a, b) == 0;
}
bool EqualsCaseInsensitiveASCII(StringPiece16 a, StringPiece16 b) {
- if (a.length() != b.length())
- return false;
- return CompareCaseInsensitiveASCIIT<string16>(a, b) == 0;
+ return a.size() == b.size() &&
+ internal::CompareCaseInsensitiveASCIIT(a, b) == 0;
}
const std::string& EmptyString() {
@@ -214,107 +111,56 @@ const string16& EmptyString16() {
return *s16;
}
-template <class StringType>
-bool ReplaceCharsT(const StringType& input,
- BasicStringPiece<StringType> find_any_of_these,
- BasicStringPiece<StringType> replace_with,
- StringType* output);
-
-bool ReplaceChars(const string16& input,
+bool ReplaceChars(StringPiece16 input,
StringPiece16 replace_chars,
StringPiece16 replace_with,
string16* output) {
- return ReplaceCharsT(input, replace_chars, replace_with, output);
+ return internal::ReplaceCharsT(input, replace_chars, replace_with, output);
}
-bool ReplaceChars(const std::string& input,
+bool ReplaceChars(StringPiece input,
StringPiece replace_chars,
StringPiece replace_with,
std::string* output) {
- return ReplaceCharsT(input, replace_chars, replace_with, output);
+ return internal::ReplaceCharsT(input, replace_chars, replace_with, output);
}
-bool RemoveChars(const string16& input,
+bool RemoveChars(StringPiece16 input,
StringPiece16 remove_chars,
string16* output) {
- return ReplaceCharsT(input, remove_chars, StringPiece16(), output);
+ return internal::ReplaceCharsT(input, remove_chars, StringPiece16(), output);
}
-bool RemoveChars(const std::string& input,
+bool RemoveChars(StringPiece input,
StringPiece remove_chars,
std::string* output) {
- return ReplaceCharsT(input, remove_chars, StringPiece(), output);
-}
-
-template <typename Str>
-TrimPositions TrimStringT(BasicStringPiece<Str> input,
- BasicStringPiece<Str> trim_chars,
- TrimPositions positions,
- Str* output) {
- // Find the edges of leading/trailing whitespace as desired. Need to use
- // a StringPiece version of input to be able to call find* on it with the
- // StringPiece version of trim_chars (normally the trim_chars will be a
- // constant so avoid making a copy).
- const size_t last_char = input.length() - 1;
- const size_t first_good_char =
- (positions & TRIM_LEADING) ? input.find_first_not_of(trim_chars) : 0;
- const size_t last_good_char = (positions & TRIM_TRAILING)
- ? input.find_last_not_of(trim_chars)
- : last_char;
-
- // When the string was all trimmed, report that we stripped off characters
- // from whichever position the caller was interested in. For empty input, we
- // stripped no characters, but we still need to clear |output|.
- if (input.empty() || first_good_char == Str::npos ||
- last_good_char == Str::npos) {
- bool input_was_empty = input.empty(); // in case output == &input
- output->clear();
- return input_was_empty ? TRIM_NONE : positions;
- }
-
- // Trim.
- output->assign(input.data() + first_good_char,
- last_good_char - first_good_char + 1);
-
- // Return where we trimmed from.
- return static_cast<TrimPositions>(
- (first_good_char == 0 ? TRIM_NONE : TRIM_LEADING) |
- (last_good_char == last_char ? TRIM_NONE : TRIM_TRAILING));
+ return internal::ReplaceCharsT(input, remove_chars, StringPiece(), output);
}
bool TrimString(StringPiece16 input,
StringPiece16 trim_chars,
string16* output) {
- return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
+ return internal::TrimStringT(input, trim_chars, TRIM_ALL, output) !=
+ TRIM_NONE;
}
bool TrimString(StringPiece input,
StringPiece trim_chars,
std::string* output) {
- return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
-}
-
-template<typename Str>
-BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
- BasicStringPiece<Str> trim_chars,
- TrimPositions positions) {
- size_t begin = (positions & TRIM_LEADING) ?
- input.find_first_not_of(trim_chars) : 0;
- size_t end = (positions & TRIM_TRAILING) ?
- input.find_last_not_of(trim_chars) + 1 : input.size();
- return input.substr(begin, end - begin);
+ return internal::TrimStringT(input, trim_chars, TRIM_ALL, output) !=
+ TRIM_NONE;
}
StringPiece16 TrimString(StringPiece16 input,
StringPiece16 trim_chars,
TrimPositions positions) {
- return TrimStringPieceT(input, trim_chars, positions);
+ return internal::TrimStringPieceT(input, trim_chars, positions);
}
StringPiece TrimString(StringPiece input,
StringPiece trim_chars,
TrimPositions positions) {
- return TrimStringPieceT(input, trim_chars, positions);
+ return internal::TrimStringPieceT(input, trim_chars, positions);
}
void TruncateUTF8ToByteSize(const std::string& input,
@@ -357,74 +203,36 @@ void TruncateUTF8ToByteSize(const std::string& input,
TrimPositions TrimWhitespace(StringPiece16 input,
TrimPositions positions,
string16* output) {
- return TrimStringT(input, StringPiece16(kWhitespaceUTF16), positions, output);
+ return internal::TrimStringT(input, StringPiece16(kWhitespaceUTF16),
+ positions, output);
}
StringPiece16 TrimWhitespace(StringPiece16 input,
TrimPositions positions) {
- return TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16), positions);
+ return internal::TrimStringPieceT(input, StringPiece16(kWhitespaceUTF16),
+ positions);
}
TrimPositions TrimWhitespaceASCII(StringPiece input,
TrimPositions positions,
std::string* output) {
- return TrimStringT(input, StringPiece(kWhitespaceASCII), positions, output);
+ return internal::TrimStringT(input, StringPiece(kWhitespaceASCII), positions,
+ output);
}
StringPiece TrimWhitespaceASCII(StringPiece input, TrimPositions positions) {
- return TrimStringPieceT(input, StringPiece(kWhitespaceASCII), positions);
+ return internal::TrimStringPieceT(input, StringPiece(kWhitespaceASCII),
+ positions);
}
-template<typename STR>
-STR CollapseWhitespaceT(const STR& text,
- bool trim_sequences_with_line_breaks) {
- STR result;
- result.resize(text.size());
-
- // Set flags to pretend we're already in a trimmed whitespace sequence, so we
- // will trim any leading whitespace.
- bool in_whitespace = true;
- bool already_trimmed = true;
-
- int chars_written = 0;
- for (typename STR::const_iterator i(text.begin()); i != text.end(); ++i) {
- if (IsUnicodeWhitespace(*i)) {
- if (!in_whitespace) {
- // Reduce all whitespace sequences to a single space.
- in_whitespace = true;
- result[chars_written++] = L' ';
- }
- if (trim_sequences_with_line_breaks && !already_trimmed &&
- ((*i == '\n') || (*i == '\r'))) {
- // Whitespace sequences containing CR or LF are eliminated entirely.
- already_trimmed = true;
- --chars_written;
- }
- } else {
- // Non-whitespace chracters are copied straight across.
- in_whitespace = false;
- already_trimmed = false;
- result[chars_written++] = *i;
- }
- }
-
- if (in_whitespace && !already_trimmed) {
- // Any trailing whitespace is eliminated.
- --chars_written;
- }
-
- result.resize(chars_written);
- return result;
-}
-
-string16 CollapseWhitespace(const string16& text,
+string16 CollapseWhitespace(StringPiece16 text,
bool trim_sequences_with_line_breaks) {
- return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+ return internal::CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
}
-std::string CollapseWhitespaceASCII(const std::string& text,
+std::string CollapseWhitespaceASCII(StringPiece text,
bool trim_sequences_with_line_breaks) {
- return CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+ return internal::CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
}
bool ContainsOnlyChars(StringPiece input, StringPiece characters) {
@@ -435,198 +243,63 @@ bool ContainsOnlyChars(StringPiece16 input, StringPiece16 characters) {
return input.find_first_not_of(characters) == StringPiece16::npos;
}
-template <class Char>
-inline bool DoIsStringASCII(const Char* characters, size_t length) {
- if (!length)
- return true;
- constexpr MachineWord non_ascii_bit_mask = NonASCIIMask<Char>::value();
- MachineWord all_char_bits = 0;
- const Char* end = characters + length;
-
- // Prologue: align the input.
- while (!IsMachineWordAligned(characters) && characters < end)
- all_char_bits |= *characters++;
- if (all_char_bits & non_ascii_bit_mask)
- return false;
-
- // Compare the values of CPU word size.
- constexpr size_t chars_per_word = sizeof(MachineWord) / sizeof(Char);
- constexpr int batch_count = 16;
- while (characters <= end - batch_count * chars_per_word) {
- all_char_bits = 0;
- for (int i = 0; i < batch_count; ++i) {
- all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
- characters += chars_per_word;
- }
- if (all_char_bits & non_ascii_bit_mask)
- return false;
- }
-
- // Process the remaining words.
- all_char_bits = 0;
- while (characters <= end - chars_per_word) {
- all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
- characters += chars_per_word;
- }
-
- // Process the remaining bytes.
- while (characters < end)
- all_char_bits |= *characters++;
-
- return !(all_char_bits & non_ascii_bit_mask);
-}
bool IsStringASCII(StringPiece str) {
- return DoIsStringASCII(str.data(), str.length());
+ return internal::DoIsStringASCII(str.data(), str.length());
}
bool IsStringASCII(StringPiece16 str) {
- return DoIsStringASCII(str.data(), str.length());
+ return internal::DoIsStringASCII(str.data(), str.length());
}
#if defined(WCHAR_T_IS_UTF32)
bool IsStringASCII(WStringPiece str) {
- return DoIsStringASCII(str.data(), str.length());
+ return internal::DoIsStringASCII(str.data(), str.length());
}
#endif
-template <bool (*Validator)(uint32_t)>
-inline static bool DoIsStringUTF8(StringPiece str) {
- const char* src = str.data();
- int32_t src_len = static_cast<int32_t>(str.length());
- int32_t char_index = 0;
-
- while (char_index < src_len) {
- int32_t code_point;
- CBU8_NEXT(src, char_index, src_len, code_point);
- if (!Validator(code_point))
- return false;
- }
- return true;
-}
-
bool IsStringUTF8(StringPiece str) {
- return DoIsStringUTF8<IsValidCharacter>(str);
+ return internal::DoIsStringUTF8<IsValidCharacter>(str);
}
bool IsStringUTF8AllowingNoncharacters(StringPiece str) {
- return DoIsStringUTF8<IsValidCodepoint>(str);
-}
-
-// Implementation note: Normally this function will be called with a hardcoded
-// constant for the lowercase_ascii parameter. Constructing a StringPiece from
-// a C constant requires running strlen, so the result will be two passes
-// through the buffers, one to file the length of lowercase_ascii, and one to
-// compare each letter.
-//
-// This function could have taken a const char* to avoid this and only do one
-// pass through the string. But the strlen is faster than the case-insensitive
-// compares and lets us early-exit in the case that the strings are different
-// lengths (will often be the case for non-matches). So whether one approach or
-// the other will be faster depends on the case.
-//
-// The hardcoded strings are typically very short so it doesn't matter, and the
-// string piece gives additional flexibility for the caller (doesn't have to be
-// null terminated) so we choose the StringPiece route.
-template<typename Str>
-static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
- StringPiece lowercase_ascii) {
- if (str.size() != lowercase_ascii.size())
- return false;
- for (size_t i = 0; i < str.size(); i++) {
- if (ToLowerASCII(str[i]) != lowercase_ascii[i])
- return false;
- }
- return true;
+ return internal::DoIsStringUTF8<IsValidCodepoint>(str);
}
bool LowerCaseEqualsASCII(StringPiece str, StringPiece lowercase_ascii) {
- return DoLowerCaseEqualsASCII<std::string>(str, lowercase_ascii);
+ return internal::DoLowerCaseEqualsASCII(str, lowercase_ascii);
}
bool LowerCaseEqualsASCII(StringPiece16 str, StringPiece lowercase_ascii) {
- return DoLowerCaseEqualsASCII<string16>(str, lowercase_ascii);
+ return internal::DoLowerCaseEqualsASCII(str, lowercase_ascii);
}
bool EqualsASCII(StringPiece16 str, StringPiece ascii) {
- if (str.length() != ascii.length())
- return false;
- return std::equal(ascii.begin(), ascii.end(), str.begin());
-}
-
-template<typename Str>
-bool StartsWithT(BasicStringPiece<Str> str,
- BasicStringPiece<Str> search_for,
- CompareCase case_sensitivity) {
- if (search_for.size() > str.size())
- return false;
-
- BasicStringPiece<Str> source = str.substr(0, search_for.size());
-
- switch (case_sensitivity) {
- case CompareCase::SENSITIVE:
- return source == search_for;
-
- case CompareCase::INSENSITIVE_ASCII:
- return std::equal(
- search_for.begin(), search_for.end(),
- source.begin(),
- CaseInsensitiveCompareASCII<typename Str::value_type>());
-
- default:
- NOTREACHED();
- return false;
- }
+ return std::equal(ascii.begin(), ascii.end(), str.begin(), str.end());
}
bool StartsWith(StringPiece str,
StringPiece search_for,
CompareCase case_sensitivity) {
- return StartsWithT<std::string>(str, search_for, case_sensitivity);
+ return internal::StartsWithT(str, search_for, case_sensitivity);
}
bool StartsWith(StringPiece16 str,
StringPiece16 search_for,
CompareCase case_sensitivity) {
- return StartsWithT<string16>(str, search_for, case_sensitivity);
-}
-
-template <typename Str>
-bool EndsWithT(BasicStringPiece<Str> str,
- BasicStringPiece<Str> search_for,
- CompareCase case_sensitivity) {
- if (search_for.size() > str.size())
- return false;
-
- BasicStringPiece<Str> source = str.substr(str.size() - search_for.size(),
- search_for.size());
-
- switch (case_sensitivity) {
- case CompareCase::SENSITIVE:
- return source == search_for;
-
- case CompareCase::INSENSITIVE_ASCII:
- return std::equal(
- source.begin(), source.end(),
- search_for.begin(),
- CaseInsensitiveCompareASCII<typename Str::value_type>());
-
- default:
- NOTREACHED();
- return false;
- }
+ return internal::StartsWithT(str, search_for, case_sensitivity);
}
bool EndsWith(StringPiece str,
StringPiece search_for,
CompareCase case_sensitivity) {
- return EndsWithT<std::string>(str, search_for, case_sensitivity);
+ return internal::EndsWithT(str, search_for, case_sensitivity);
}
bool EndsWith(StringPiece16 str,
StringPiece16 search_for,
CompareCase case_sensitivity) {
- return EndsWithT<string16>(str, search_for, case_sensitivity);
+ return internal::EndsWithT(str, search_for, case_sensitivity);
}
char HexDigitToInt(wchar_t c) {
@@ -680,384 +353,93 @@ string16 FormatBytesUnlocalized(int64_t bytes) {
return ASCIIToUTF16(buf);
}
-// A Matcher for DoReplaceMatchesAfterOffset() that matches substrings.
-template <class StringType>
-struct SubstringMatcher {
- BasicStringPiece<StringType> find_this;
-
- size_t Find(const StringType& input, size_t pos) {
- return input.find(find_this.data(), pos, find_this.length());
- }
- size_t MatchSize() { return find_this.length(); }
-};
-
-// A Matcher for DoReplaceMatchesAfterOffset() that matches single characters.
-template <class StringType>
-struct CharacterMatcher {
- BasicStringPiece<StringType> find_any_of_these;
-
- size_t Find(const StringType& input, size_t pos) {
- return input.find_first_of(find_any_of_these.data(), pos,
- find_any_of_these.length());
- }
- constexpr size_t MatchSize() { return 1; }
-};
-
-enum class ReplaceType { REPLACE_ALL, REPLACE_FIRST };
-
-// Runs in O(n) time in the length of |str|, and transforms the string without
-// reallocating when possible. Returns |true| if any matches were found.
-//
-// This is parameterized on a |Matcher| traits type, so that it can be the
-// implementation for both ReplaceChars() and ReplaceSubstringsAfterOffset().
-template <class StringType, class Matcher>
-bool DoReplaceMatchesAfterOffset(StringType* str,
- size_t initial_offset,
- Matcher matcher,
- BasicStringPiece<StringType> replace_with,
- ReplaceType replace_type) {
- using CharTraits = typename StringType::traits_type;
-
- const size_t find_length = matcher.MatchSize();
- if (!find_length)
- return false;
-
- // If the find string doesn't appear, there's nothing to do.
- size_t first_match = matcher.Find(*str, initial_offset);
- if (first_match == StringType::npos)
- return false;
-
- // If we're only replacing one instance, there's no need to do anything
- // complicated.
- const size_t replace_length = replace_with.length();
- if (replace_type == ReplaceType::REPLACE_FIRST) {
- str->replace(first_match, find_length, replace_with.data(), replace_length);
- return true;
- }
-
- // If the find and replace strings are the same length, we can simply use
- // replace() on each instance, and finish the entire operation in O(n) time.
- if (find_length == replace_length) {
- auto* buffer = &((*str)[0]);
- for (size_t offset = first_match; offset != StringType::npos;
- offset = matcher.Find(*str, offset + replace_length)) {
- CharTraits::copy(buffer + offset, replace_with.data(), replace_length);
- }
- return true;
- }
-
- // Since the find and replace strings aren't the same length, a loop like the
- // one above would be O(n^2) in the worst case, as replace() will shift the
- // entire remaining string each time. We need to be more clever to keep things
- // O(n).
- //
- // When the string is being shortened, it's possible to just shift the matches
- // down in one pass while finding, and truncate the length at the end of the
- // search.
- //
- // If the string is being lengthened, more work is required. The strategy used
- // here is to make two find() passes through the string. The first pass counts
- // the number of matches to determine the new size. The second pass will
- // either construct the new string into a new buffer (if the existing buffer
- // lacked capacity), or else -- if there is room -- create a region of scratch
- // space after |first_match| by shifting the tail of the string to a higher
- // index, and doing in-place moves from the tail to lower indices thereafter.
- size_t str_length = str->length();
- size_t expansion = 0;
- if (replace_length > find_length) {
- // This operation lengthens the string; determine the new length by counting
- // matches.
- const size_t expansion_per_match = (replace_length - find_length);
- size_t num_matches = 0;
- for (size_t match = first_match; match != StringType::npos;
- match = matcher.Find(*str, match + find_length)) {
- expansion += expansion_per_match;
- ++num_matches;
- }
- const size_t final_length = str_length + expansion;
-
- if (str->capacity() < final_length) {
- // If we'd have to allocate a new buffer to grow the string, build the
- // result directly into the new allocation via append().
- StringType src(str->get_allocator());
- str->swap(src);
- str->reserve(final_length);
-
- size_t pos = 0;
- for (size_t match = first_match;; match = matcher.Find(src, pos)) {
- str->append(src, pos, match - pos);
- str->append(replace_with.data(), replace_length);
- pos = match + find_length;
-
- // A mid-loop test/break enables skipping the final Find() call; the
- // number of matches is known, so don't search past the last one.
- if (!--num_matches)
- break;
- }
-
- // Handle substring after the final match.
- str->append(src, pos, str_length - pos);
- return true;
- }
-
- // Prepare for the copy/move loop below -- expand the string to its final
- // size by shifting the data after the first match to the end of the resized
- // string.
- size_t shift_src = first_match + find_length;
- size_t shift_dst = shift_src + expansion;
-
- // Big |expansion| factors (relative to |str_length|) require padding up to
- // |shift_dst|.
- if (shift_dst > str_length)
- str->resize(shift_dst);
-
- str->replace(shift_dst, str_length - shift_src, *str, shift_src,
- str_length - shift_src);
- str_length = final_length;
- }
-
- // We can alternate replacement and move operations. This won't overwrite the
- // unsearched region of the string so long as |write_offset| <= |read_offset|;
- // that condition is always satisfied because:
- //
- // (a) If the string is being shortened, |expansion| is zero and
- // |write_offset| grows slower than |read_offset|.
- //
- // (b) If the string is being lengthened, |write_offset| grows faster than
- // |read_offset|, but |expansion| is big enough so that |write_offset|
- // will only catch up to |read_offset| at the point of the last match.
- auto* buffer = &((*str)[0]);
- size_t write_offset = first_match;
- size_t read_offset = first_match + expansion;
- do {
- if (replace_length) {
- CharTraits::copy(buffer + write_offset, replace_with.data(),
- replace_length);
- write_offset += replace_length;
- }
- read_offset += find_length;
-
- // min() clamps StringType::npos (the largest unsigned value) to str_length.
- size_t match = std::min(matcher.Find(*str, read_offset), str_length);
-
- size_t length = match - read_offset;
- if (length) {
- CharTraits::move(buffer + write_offset, buffer + read_offset, length);
- write_offset += length;
- read_offset += length;
- }
- } while (read_offset < str_length);
-
- // If we're shortening the string, truncate it now.
- str->resize(write_offset);
- return true;
-}
-
-template <class StringType>
-bool ReplaceCharsT(const StringType& input,
- BasicStringPiece<StringType> find_any_of_these,
- BasicStringPiece<StringType> replace_with,
- StringType* output) {
- // Commonly, this is called with output and input being the same string; in
- // that case, this assignment is inexpensive.
- *output = input;
-
- return DoReplaceMatchesAfterOffset(
- output, 0, CharacterMatcher<StringType>{find_any_of_these}, replace_with,
- ReplaceType::REPLACE_ALL);
-}
-
void ReplaceFirstSubstringAfterOffset(string16* str,
size_t start_offset,
StringPiece16 find_this,
StringPiece16 replace_with) {
- DoReplaceMatchesAfterOffset(str, start_offset,
- SubstringMatcher<string16>{find_this},
- replace_with, ReplaceType::REPLACE_FIRST);
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<string16>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_FIRST);
}
void ReplaceFirstSubstringAfterOffset(std::string* str,
size_t start_offset,
StringPiece find_this,
StringPiece replace_with) {
- DoReplaceMatchesAfterOffset(str, start_offset,
- SubstringMatcher<std::string>{find_this},
- replace_with, ReplaceType::REPLACE_FIRST);
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<std::string>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_FIRST);
}
void ReplaceSubstringsAfterOffset(string16* str,
size_t start_offset,
StringPiece16 find_this,
StringPiece16 replace_with) {
- DoReplaceMatchesAfterOffset(str, start_offset,
- SubstringMatcher<string16>{find_this},
- replace_with, ReplaceType::REPLACE_ALL);
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<string16>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_ALL);
}
void ReplaceSubstringsAfterOffset(std::string* str,
size_t start_offset,
StringPiece find_this,
StringPiece replace_with) {
- DoReplaceMatchesAfterOffset(str, start_offset,
- SubstringMatcher<std::string>{find_this},
- replace_with, ReplaceType::REPLACE_ALL);
-}
-
-template <class string_type>
-inline typename string_type::value_type* WriteIntoT(string_type* str,
- size_t length_with_null) {
- DCHECK_GE(length_with_null, 1u);
- str->reserve(length_with_null);
- str->resize(length_with_null - 1);
- return &((*str)[0]);
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<std::string>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_ALL);
}
char* WriteInto(std::string* str, size_t length_with_null) {
- return WriteIntoT(str, length_with_null);
+ return internal::WriteIntoT(str, length_with_null);
}
char16* WriteInto(string16* str, size_t length_with_null) {
- return WriteIntoT(str, length_with_null);
+ return internal::WriteIntoT(str, length_with_null);
}
-// Generic version for all JoinString overloads. |list_type| must be a sequence
-// (std::vector or std::initializer_list) of strings/StringPieces (std::string,
-// string16, StringPiece or StringPiece16). |string_type| is either std::string
-// or string16.
-template <typename list_type, typename string_type>
-static string_type JoinStringT(const list_type& parts,
- BasicStringPiece<string_type> sep) {
- if (base::empty(parts))
- return string_type();
-
- // Pre-allocate the eventual size of the string. Start with the size of all of
- // the separators (note that this *assumes* parts.size() > 0).
- size_t total_size = (parts.size() - 1) * sep.size();
- for (const auto& part : parts)
- total_size += part.size();
- string_type result;
- result.reserve(total_size);
-
- auto iter = parts.begin();
- DCHECK(iter != parts.end());
- result.append(iter->data(), iter->size());
- ++iter;
-
- for (; iter != parts.end(); ++iter) {
- result.append(sep.data(), sep.size());
- result.append(iter->data(), iter->size());
- }
-
- // Sanity-check that we pre-allocated correctly.
- DCHECK_EQ(total_size, result.size());
-
- return result;
+std::string JoinString(span<const std::string> parts, StringPiece separator) {
+ return internal::JoinStringT(parts, separator);
}
-std::string JoinString(const std::vector<std::string>& parts,
- StringPiece separator) {
- return JoinStringT(parts, separator);
+string16 JoinString(span<const string16> parts, StringPiece16 separator) {
+ return internal::JoinStringT(parts, separator);
}
-string16 JoinString(const std::vector<string16>& parts,
- StringPiece16 separator) {
- return JoinStringT(parts, separator);
-}
-
-std::string JoinString(const std::vector<StringPiece>& parts,
- StringPiece separator) {
- return JoinStringT(parts, separator);
+std::string JoinString(span<const StringPiece> parts, StringPiece separator) {
+ return internal::JoinStringT(parts, separator);
}
-string16 JoinString(const std::vector<StringPiece16>& parts,
- StringPiece16 separator) {
- return JoinStringT(parts, separator);
+string16 JoinString(span<const StringPiece16> parts, StringPiece16 separator) {
+ return internal::JoinStringT(parts, separator);
}
std::string JoinString(std::initializer_list<StringPiece> parts,
StringPiece separator) {
- return JoinStringT(parts, separator);
+ return internal::JoinStringT(parts, separator);
}
string16 JoinString(std::initializer_list<StringPiece16> parts,
StringPiece16 separator) {
- return JoinStringT(parts, separator);
+ return internal::JoinStringT(parts, separator);
}
-template<class FormatStringType, class OutStringType>
-OutStringType DoReplaceStringPlaceholders(
- const FormatStringType& format_string,
- const std::vector<OutStringType>& subst,
- std::vector<size_t>* offsets) {
- size_t substitutions = subst.size();
- DCHECK_LT(substitutions, 10U);
-
- size_t sub_length = 0;
- for (const auto& cur : subst)
- sub_length += cur.length();
-
- OutStringType formatted;
- formatted.reserve(format_string.length() + sub_length);
-
- std::vector<ReplacementOffset> r_offsets;
- for (auto i = format_string.begin(); i != format_string.end(); ++i) {
- if ('$' == *i) {
- if (i + 1 != format_string.end()) {
- ++i;
- if ('$' == *i) {
- while (i != format_string.end() && '$' == *i) {
- formatted.push_back('$');
- ++i;
- }
- --i;
- } else {
- if (*i < '1' || *i > '9') {
- DLOG(ERROR) << "Invalid placeholder: $" << *i;
- continue;
- }
- uintptr_t index = *i - '1';
- if (offsets) {
- ReplacementOffset r_offset(index,
- static_cast<int>(formatted.size()));
- r_offsets.insert(
- std::upper_bound(r_offsets.begin(), r_offsets.end(), r_offset,
- &CompareParameter),
- r_offset);
- }
- if (index < substitutions)
- formatted.append(subst.at(index));
- }
- }
- } else {
- formatted.push_back(*i);
- }
- }
- if (offsets) {
- for (const auto& cur : r_offsets)
- offsets->push_back(cur.offset);
- }
- return formatted;
-}
-
-string16 ReplaceStringPlaceholders(const string16& format_string,
+string16 ReplaceStringPlaceholders(StringPiece16 format_string,
const std::vector<string16>& subst,
std::vector<size_t>* offsets) {
- return DoReplaceStringPlaceholders(format_string, subst, offsets);
+ return internal::DoReplaceStringPlaceholders(format_string, subst, offsets);
}
std::string ReplaceStringPlaceholders(StringPiece format_string,
const std::vector<std::string>& subst,
std::vector<size_t>* offsets) {
- return DoReplaceStringPlaceholders(format_string, subst, offsets);
+ return internal::DoReplaceStringPlaceholders(format_string, subst, offsets);
}
string16 ReplaceStringPlaceholders(const string16& format_string,
const string16& a,
size_t* offset) {
std::vector<size_t> offsets;
- std::vector<string16> subst;
- subst.push_back(a);
- string16 result = ReplaceStringPlaceholders(format_string, subst, &offsets);
+ string16 result = ReplaceStringPlaceholders(format_string, {a}, &offsets);
DCHECK_EQ(1U, offsets.size());
if (offset)
@@ -1065,65 +447,11 @@ string16 ReplaceStringPlaceholders(const string16& format_string,
return result;
}
-#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
-
-TrimPositions TrimWhitespace(WStringPiece input,
- TrimPositions positions,
- std::wstring* output) {
- return TrimStringT(input, WStringPiece(kWhitespaceWide), positions, output);
-}
-
-WStringPiece TrimWhitespace(WStringPiece input, TrimPositions positions) {
- return TrimStringPieceT(input, WStringPiece(kWhitespaceWide), positions);
-}
-
-bool TrimString(WStringPiece input,
- WStringPiece trim_chars,
- std::wstring* output) {
- return TrimStringT(input, trim_chars, TRIM_ALL, output) != TRIM_NONE;
-}
-
-WStringPiece TrimString(WStringPiece input,
- WStringPiece trim_chars,
- TrimPositions positions) {
- return TrimStringPieceT(input, trim_chars, positions);
-}
-
-wchar_t* WriteInto(std::wstring* str, size_t length_with_null) {
- return WriteIntoT(str, length_with_null);
-}
-
-#endif
-
-// The following code is compatible with the OpenBSD lcpy interface. See:
-// http://www.gratisoft.us/todd/papers/strlcpy.html
-// ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
-
-namespace {
-
-template <typename CHAR>
-size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
- for (size_t i = 0; i < dst_size; ++i) {
- if ((dst[i] = src[i]) == 0) // We hit and copied the terminating NULL.
- return i;
- }
-
- // We were left off at dst_size. We over copied 1 byte. Null terminate.
- if (dst_size != 0)
- dst[dst_size - 1] = 0;
-
- // Count the rest of the |src|, and return it's length in characters.
- while (src[dst_size]) ++dst_size;
- return dst_size;
-}
-
-} // namespace
-
size_t strlcpy(char* dst, const char* src, size_t dst_size) {
- return lcpyT<char>(dst, src, dst_size);
+ return internal::lcpyT(dst, src, dst_size);
}
size_t wcslcpy(wchar_t* dst, const wchar_t* src, size_t dst_size) {
- return lcpyT<wchar_t>(dst, src, dst_size);
+ return internal::lcpyT(dst, src, dst_size);
}
} // namespace base
diff --git a/chromium/base/strings/string_util.h b/chromium/base/strings/string_util.h
index f9f5e10ade9..e5a0487cff1 100644
--- a/chromium/base/strings/string_util.h
+++ b/chromium/base/strings/string_util.h
@@ -14,10 +14,12 @@
#include <initializer_list>
#include <string>
+#include <type_traits>
#include <vector>
#include "base/base_export.h"
#include "base/compiler_specific.h"
+#include "base/containers/span.h"
#include "base/stl_util.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h" // For implicit conversions.
@@ -84,19 +86,17 @@ BASE_EXPORT bool IsWprintfFormatPortable(const wchar_t* format);
// ASCII-specific tolower. The standard library's tolower is locale sensitive,
// so we don't want to use it here.
-inline char ToLowerASCII(char c) {
- return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
-}
-inline char16 ToLowerASCII(char16 c) {
+template <typename CharT,
+ typename = std::enable_if_t<std::is_integral<CharT>::value>>
+CharT ToLowerASCII(CharT c) {
return (c >= 'A' && c <= 'Z') ? (c + ('a' - 'A')) : c;
}
// ASCII-specific toupper. The standard library's toupper is locale sensitive,
// so we don't want to use it here.
-inline char ToUpperASCII(char c) {
- return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
-}
-inline char16 ToUpperASCII(char16 c) {
+template <typename CharT,
+ typename = std::enable_if_t<std::is_integral<CharT>::value>>
+CharT ToUpperASCII(CharT c) {
return (c >= 'a' && c <= 'z') ? (c + ('A' - 'a')) : c;
}
@@ -170,10 +170,10 @@ BASE_EXPORT extern const char kUtf8ByteOrderMark[];
// Removes characters in |remove_chars| from anywhere in |input|. Returns true
// if any characters were removed. |remove_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
-BASE_EXPORT bool RemoveChars(const string16& input,
+BASE_EXPORT bool RemoveChars(StringPiece16 input,
StringPiece16 remove_chars,
string16* output);
-BASE_EXPORT bool RemoveChars(const std::string& input,
+BASE_EXPORT bool RemoveChars(StringPiece input,
StringPiece remove_chars,
std::string* output);
@@ -182,11 +182,11 @@ BASE_EXPORT bool RemoveChars(const std::string& input,
// the |replace_with| string. Returns true if any characters were replaced.
// |replace_chars| must be null-terminated.
// NOTE: Safe to use the same variable for both |input| and |output|.
-BASE_EXPORT bool ReplaceChars(const string16& input,
+BASE_EXPORT bool ReplaceChars(StringPiece16 input,
StringPiece16 replace_chars,
StringPiece16 replace_with,
string16* output);
-BASE_EXPORT bool ReplaceChars(const std::string& input,
+BASE_EXPORT bool ReplaceChars(StringPiece input,
StringPiece replace_chars,
StringPiece replace_with,
std::string* output);
@@ -226,69 +226,6 @@ BASE_EXPORT void TruncateUTF8ToByteSize(const std::string& input,
const size_t byte_size,
std::string* output);
-#if defined(WCHAR_T_IS_UTF16)
-// Utility functions to access the underlying string buffer as a wide char
-// pointer.
-//
-// Note: These functions violate strict aliasing when char16 and wchar_t are
-// unrelated types. We thus pass -fno-strict-aliasing to the compiler on
-// non-Windows platforms [1], and rely on it being off in Clang's CL mode [2].
-//
-// [1] https://crrev.com/b9a0976622/build/config/compiler/BUILD.gn#244
-// [2]
-// https://github.com/llvm/llvm-project/blob/1e28a66/clang/lib/Driver/ToolChains/Clang.cpp#L3949
-inline wchar_t* as_writable_wcstr(char16* str) {
- return reinterpret_cast<wchar_t*>(str);
-}
-
-inline wchar_t* as_writable_wcstr(string16& str) {
- return reinterpret_cast<wchar_t*>(data(str));
-}
-
-inline const wchar_t* as_wcstr(const char16* str) {
- return reinterpret_cast<const wchar_t*>(str);
-}
-
-inline const wchar_t* as_wcstr(StringPiece16 str) {
- return reinterpret_cast<const wchar_t*>(str.data());
-}
-
-// Utility functions to access the underlying string buffer as a char16 pointer.
-inline char16* as_writable_u16cstr(wchar_t* str) {
- return reinterpret_cast<char16*>(str);
-}
-
-inline char16* as_writable_u16cstr(std::wstring& str) {
- return reinterpret_cast<char16*>(data(str));
-}
-
-inline const char16* as_u16cstr(const wchar_t* str) {
- return reinterpret_cast<const char16*>(str);
-}
-
-inline const char16* as_u16cstr(WStringPiece str) {
- return reinterpret_cast<const char16*>(str.data());
-}
-
-// Utility functions to convert between base::WStringPiece and
-// base::StringPiece16.
-inline WStringPiece AsWStringPiece(StringPiece16 str) {
- return WStringPiece(as_wcstr(str.data()), str.size());
-}
-
-inline StringPiece16 AsStringPiece16(WStringPiece str) {
- return StringPiece16(as_u16cstr(str.data()), str.size());
-}
-
-inline std::wstring AsWString(StringPiece16 str) {
- return std::wstring(as_wcstr(str.data()), str.size());
-}
-
-inline string16 AsString16(WStringPiece str) {
- return string16(as_u16cstr(str.data()), str.size());
-}
-#endif // defined(WCHAR_T_IS_UTF16)
-
// Trims any whitespace from either end of the input string.
//
// The StringPiece versions return a substring referencing the input buffer.
@@ -315,11 +252,10 @@ BASE_EXPORT StringPiece TrimWhitespaceASCII(StringPiece input,
// (2) If |trim_sequences_with_line_breaks| is true, any other whitespace
// sequences containing a CR or LF are trimmed.
// (3) All other whitespace sequences are converted to single spaces.
-BASE_EXPORT string16 CollapseWhitespace(
- const string16& text,
- bool trim_sequences_with_line_breaks);
+BASE_EXPORT string16 CollapseWhitespace(StringPiece16 text,
+ bool trim_sequences_with_line_breaks);
BASE_EXPORT std::string CollapseWhitespaceASCII(
- const std::string& text,
+ StringPiece text,
bool trim_sequences_with_line_breaks);
// Returns true if |input| is empty or contains only characters found in
@@ -347,6 +283,7 @@ BASE_EXPORT bool IsStringUTF8AllowingNoncharacters(StringPiece str);
// does not leave early if it is not the case.
BASE_EXPORT bool IsStringASCII(StringPiece str);
BASE_EXPORT bool IsStringASCII(StringPiece16 str);
+
#if defined(WCHAR_T_IS_UTF32)
BASE_EXPORT bool IsStringASCII(WStringPiece str);
#endif
@@ -488,8 +425,8 @@ BASE_EXPORT void ReplaceSubstringsAfterOffset(
BASE_EXPORT char* WriteInto(std::string* str, size_t length_with_null);
BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
-// Joins a vector or list of strings into a single string, inserting |separator|
-// (which may be empty) in between all elements.
+// Joins a list of strings into a single string, inserting |separator| (which
+// may be empty) in between all elements.
//
// Note this is inverse of SplitString()/SplitStringPiece() defined in
// string_split.h.
@@ -501,13 +438,13 @@ BASE_EXPORT char16* WriteInto(string16* str, size_t length_with_null);
// copies of those strings are created until the final join operation.
//
// Use StrCat (in base/strings/strcat.h) if you don't need a separator.
-BASE_EXPORT std::string JoinString(const std::vector<std::string>& parts,
+BASE_EXPORT std::string JoinString(span<const std::string> parts,
StringPiece separator);
-BASE_EXPORT string16 JoinString(const std::vector<string16>& parts,
+BASE_EXPORT string16 JoinString(span<const string16> parts,
StringPiece16 separator);
-BASE_EXPORT std::string JoinString(const std::vector<StringPiece>& parts,
+BASE_EXPORT std::string JoinString(span<const StringPiece> parts,
StringPiece separator);
-BASE_EXPORT string16 JoinString(const std::vector<StringPiece16>& parts,
+BASE_EXPORT string16 JoinString(span<const StringPiece16> parts,
StringPiece16 separator);
// Explicit initializer_list overloads are required to break ambiguity when used
// with a literal initializer list (otherwise the compiler would not be able to
@@ -521,10 +458,10 @@ BASE_EXPORT string16 JoinString(std::initializer_list<StringPiece16> parts,
// Additionally, any number of consecutive '$' characters is replaced by that
// number less one. Eg $$->$, $$$->$$, etc. The offsets parameter here can be
// NULL. This only allows you to use up to nine replacements.
-BASE_EXPORT string16 ReplaceStringPlaceholders(
- const string16& format_string,
- const std::vector<string16>& subst,
- std::vector<size_t>* offsets);
+BASE_EXPORT string16
+ReplaceStringPlaceholders(StringPiece16 format_string,
+ const std::vector<string16>& subst,
+ std::vector<size_t>* offsets);
BASE_EXPORT std::string ReplaceStringPlaceholders(
StringPiece format_string,
@@ -536,25 +473,6 @@ BASE_EXPORT string16 ReplaceStringPlaceholders(const string16& format_string,
const string16& a,
size_t* offset);
-#if defined(OS_WIN) && defined(BASE_STRING16_IS_STD_U16STRING)
-BASE_EXPORT TrimPositions TrimWhitespace(WStringPiece input,
- TrimPositions positions,
- std::wstring* output);
-
-BASE_EXPORT WStringPiece TrimWhitespace(WStringPiece input,
- TrimPositions positions);
-
-BASE_EXPORT bool TrimString(WStringPiece input,
- WStringPiece trim_chars,
- std::wstring* output);
-
-BASE_EXPORT WStringPiece TrimString(WStringPiece input,
- WStringPiece trim_chars,
- TrimPositions positions);
-
-BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
-#endif
-
} // namespace base
#if defined(OS_WIN)
diff --git a/chromium/base/strings/string_util_internal.h b/chromium/base/strings/string_util_internal.h
new file mode 100644
index 00000000000..da3fb07cc30
--- /dev/null
+++ b/chromium/base/strings/string_util_internal.h
@@ -0,0 +1,625 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_STRINGS_STRING_UTIL_INTERNAL_H_
+#define BASE_STRINGS_STRING_UTIL_INTERNAL_H_
+
+#include "base/logging.h"
+#include "base/notreached.h"
+#include "base/strings/string_piece.h"
+#include "base/third_party/icu/icu_utf.h"
+
+namespace base {
+
+namespace internal {
+
+// Used by ReplaceStringPlaceholders to track the position in the string of
+// replaced parameters.
+struct ReplacementOffset {
+ ReplacementOffset(uintptr_t parameter, size_t offset)
+ : parameter(parameter), offset(offset) {}
+
+ // Index of the parameter.
+ uintptr_t parameter;
+
+ // Starting position in the string.
+ size_t offset;
+};
+
+static bool CompareParameter(const ReplacementOffset& elem1,
+ const ReplacementOffset& elem2) {
+ return elem1.parameter < elem2.parameter;
+}
+
+// Assuming that a pointer is the size of a "machine word", then
+// uintptr_t is an integer type that is also a machine word.
+using MachineWord = uintptr_t;
+
+inline bool IsMachineWordAligned(const void* pointer) {
+ return !(reinterpret_cast<MachineWord>(pointer) & (sizeof(MachineWord) - 1));
+}
+
+template <typename StringType>
+StringType ToLowerASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToLowerASCII(str[i]));
+ return ret;
+}
+
+template <typename StringType>
+StringType ToUpperASCIIImpl(BasicStringPiece<StringType> str) {
+ StringType ret;
+ ret.reserve(str.size());
+ for (size_t i = 0; i < str.size(); i++)
+ ret.push_back(ToUpperASCII(str[i]));
+ return ret;
+}
+
+template <class StringType>
+int CompareCaseInsensitiveASCIIT(BasicStringPiece<StringType> a,
+ BasicStringPiece<StringType> b) {
+ // Find the first characters that aren't equal and compare them. If the end
+ // of one of the strings is found before a nonequal character, the lengths
+ // of the strings are compared.
+ size_t i = 0;
+ while (i < a.length() && i < b.length()) {
+ typename StringType::value_type lower_a = ToLowerASCII(a[i]);
+ typename StringType::value_type lower_b = ToLowerASCII(b[i]);
+ if (lower_a < lower_b)
+ return -1;
+ if (lower_a > lower_b)
+ return 1;
+ i++;
+ }
+
+ // End of one string hit before finding a different character. Expect the
+ // common case to be "strings equal" at this point so check that first.
+ if (a.length() == b.length())
+ return 0;
+
+ if (a.length() < b.length())
+ return -1;
+ return 1;
+}
+
+template <typename Str>
+TrimPositions TrimStringT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions,
+ Str* output) {
+ // Find the edges of leading/trailing whitespace as desired. Need to use
+ // a StringPiece version of input to be able to call find* on it with the
+ // StringPiece version of trim_chars (normally the trim_chars will be a
+ // constant so avoid making a copy).
+ const size_t last_char = input.length() - 1;
+ const size_t first_good_char =
+ (positions & TRIM_LEADING) ? input.find_first_not_of(trim_chars) : 0;
+ const size_t last_good_char = (positions & TRIM_TRAILING)
+ ? input.find_last_not_of(trim_chars)
+ : last_char;
+
+ // When the string was all trimmed, report that we stripped off characters
+ // from whichever position the caller was interested in. For empty input, we
+ // stripped no characters, but we still need to clear |output|.
+ if (input.empty() || first_good_char == Str::npos ||
+ last_good_char == Str::npos) {
+ bool input_was_empty = input.empty(); // in case output == &input
+ output->clear();
+ return input_was_empty ? TRIM_NONE : positions;
+ }
+
+ // Trim.
+ output->assign(input.data() + first_good_char,
+ last_good_char - first_good_char + 1);
+
+ // Return where we trimmed from.
+ return static_cast<TrimPositions>(
+ (first_good_char == 0 ? TRIM_NONE : TRIM_LEADING) |
+ (last_good_char == last_char ? TRIM_NONE : TRIM_TRAILING));
+}
+
+template <typename Str>
+BasicStringPiece<Str> TrimStringPieceT(BasicStringPiece<Str> input,
+ BasicStringPiece<Str> trim_chars,
+ TrimPositions positions) {
+ size_t begin =
+ (positions & TRIM_LEADING) ? input.find_first_not_of(trim_chars) : 0;
+ size_t end = (positions & TRIM_TRAILING)
+ ? input.find_last_not_of(trim_chars) + 1
+ : input.size();
+ return input.substr(begin, end - begin);
+}
+
+template <typename STR>
+STR CollapseWhitespaceT(BasicStringPiece<STR> text,
+ bool trim_sequences_with_line_breaks) {
+ STR result;
+ result.resize(text.size());
+
+ // Set flags to pretend we're already in a trimmed whitespace sequence, so we
+ // will trim any leading whitespace.
+ bool in_whitespace = true;
+ bool already_trimmed = true;
+
+ int chars_written = 0;
+ for (auto c : text) {
+ if (IsUnicodeWhitespace(c)) {
+ if (!in_whitespace) {
+ // Reduce all whitespace sequences to a single space.
+ in_whitespace = true;
+ result[chars_written++] = L' ';
+ }
+ if (trim_sequences_with_line_breaks && !already_trimmed &&
+ ((c == '\n') || (c == '\r'))) {
+ // Whitespace sequences containing CR or LF are eliminated entirely.
+ already_trimmed = true;
+ --chars_written;
+ }
+ } else {
+ // Non-whitespace characters are copied straight across.
+ in_whitespace = false;
+ already_trimmed = false;
+ result[chars_written++] = c;
+ }
+ }
+
+ if (in_whitespace && !already_trimmed) {
+ // Any trailing whitespace is eliminated.
+ --chars_written;
+ }
+
+ result.resize(chars_written);
+ return result;
+}
+
+template <class Char>
+bool DoIsStringASCII(const Char* characters, size_t length) {
+ // Bitmasks to detect non ASCII characters for character sizes of 8, 16 and 32
+ // bits.
+ constexpr MachineWord NonASCIIMasks[] = {
+ 0, MachineWord(0x8080808080808080ULL), MachineWord(0xFF80FF80FF80FF80ULL),
+ 0, MachineWord(0xFFFFFF80FFFFFF80ULL),
+ };
+
+ if (!length)
+ return true;
+ constexpr MachineWord non_ascii_bit_mask = NonASCIIMasks[sizeof(Char)];
+ static_assert(non_ascii_bit_mask, "Error: Invalid Mask");
+ MachineWord all_char_bits = 0;
+ const Char* end = characters + length;
+
+ // Prologue: align the input.
+ while (!IsMachineWordAligned(characters) && characters < end)
+ all_char_bits |= *characters++;
+ if (all_char_bits & non_ascii_bit_mask)
+ return false;
+
+ // Compare the values of CPU word size.
+ constexpr size_t chars_per_word = sizeof(MachineWord) / sizeof(Char);
+ constexpr int batch_count = 16;
+ while (characters <= end - batch_count * chars_per_word) {
+ all_char_bits = 0;
+ for (int i = 0; i < batch_count; ++i) {
+ all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+ characters += chars_per_word;
+ }
+ if (all_char_bits & non_ascii_bit_mask)
+ return false;
+ }
+
+ // Process the remaining words.
+ all_char_bits = 0;
+ while (characters <= end - chars_per_word) {
+ all_char_bits |= *(reinterpret_cast<const MachineWord*>(characters));
+ characters += chars_per_word;
+ }
+
+ // Process the remaining bytes.
+ while (characters < end)
+ all_char_bits |= *characters++;
+
+ return !(all_char_bits & non_ascii_bit_mask);
+}
+
+template <bool (*Validator)(uint32_t)>
+inline static bool DoIsStringUTF8(StringPiece str) {
+ const char* src = str.data();
+ int32_t src_len = static_cast<int32_t>(str.length());
+ int32_t char_index = 0;
+
+ while (char_index < src_len) {
+ int32_t code_point;
+ CBU8_NEXT(src, char_index, src_len, code_point);
+ if (!Validator(code_point))
+ return false;
+ }
+ return true;
+}
+
+// Implementation note: Normally this function will be called with a hardcoded
+// constant for the lowercase_ascii parameter. Constructing a StringPiece from
+// a C constant requires running strlen, so the result will be two passes
+// through the buffers, one to file the length of lowercase_ascii, and one to
+// compare each letter.
+//
+// This function could have taken a const char* to avoid this and only do one
+// pass through the string. But the strlen is faster than the case-insensitive
+// compares and lets us early-exit in the case that the strings are different
+// lengths (will often be the case for non-matches). So whether one approach or
+// the other will be faster depends on the case.
+//
+// The hardcoded strings are typically very short so it doesn't matter, and the
+// string piece gives additional flexibility for the caller (doesn't have to be
+// null terminated) so we choose the StringPiece route.
+template <typename Str>
+static inline bool DoLowerCaseEqualsASCII(BasicStringPiece<Str> str,
+ StringPiece lowercase_ascii) {
+ return std::equal(
+ str.begin(), str.end(), lowercase_ascii.begin(), lowercase_ascii.end(),
+ [](auto lhs, auto rhs) { return ToLowerASCII(lhs) == rhs; });
+}
+
+template <typename Str>
+bool StartsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source = str.substr(0, search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ search_for.begin(), search_for.end(), source.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+template <typename Str>
+bool EndsWithT(BasicStringPiece<Str> str,
+ BasicStringPiece<Str> search_for,
+ CompareCase case_sensitivity) {
+ if (search_for.size() > str.size())
+ return false;
+
+ BasicStringPiece<Str> source =
+ str.substr(str.size() - search_for.size(), search_for.size());
+
+ switch (case_sensitivity) {
+ case CompareCase::SENSITIVE:
+ return source == search_for;
+
+ case CompareCase::INSENSITIVE_ASCII:
+ return std::equal(
+ source.begin(), source.end(), search_for.begin(),
+ CaseInsensitiveCompareASCII<typename Str::value_type>());
+
+ default:
+ NOTREACHED();
+ return false;
+ }
+}
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches substrings.
+template <class StringType>
+struct SubstringMatcher {
+ BasicStringPiece<StringType> find_this;
+
+ size_t Find(const StringType& input, size_t pos) {
+ return input.find(find_this.data(), pos, find_this.length());
+ }
+ size_t MatchSize() { return find_this.length(); }
+};
+
+// A Matcher for DoReplaceMatchesAfterOffset() that matches single characters.
+template <class StringType>
+struct CharacterMatcher {
+ BasicStringPiece<StringType> find_any_of_these;
+
+ size_t Find(const StringType& input, size_t pos) {
+ return input.find_first_of(find_any_of_these.data(), pos,
+ find_any_of_these.length());
+ }
+ constexpr size_t MatchSize() { return 1; }
+};
+
+enum class ReplaceType { REPLACE_ALL, REPLACE_FIRST };
+
+// Runs in O(n) time in the length of |str|, and transforms the string without
+// reallocating when possible. Returns |true| if any matches were found.
+//
+// This is parameterized on a |Matcher| traits type, so that it can be the
+// implementation for both ReplaceChars() and ReplaceSubstringsAfterOffset().
+template <class StringType, class Matcher>
+bool DoReplaceMatchesAfterOffset(StringType* str,
+ size_t initial_offset,
+ Matcher matcher,
+ BasicStringPiece<StringType> replace_with,
+ ReplaceType replace_type) {
+ using CharTraits = typename StringType::traits_type;
+
+ const size_t find_length = matcher.MatchSize();
+ if (!find_length)
+ return false;
+
+ // If the find string doesn't appear, there's nothing to do.
+ size_t first_match = matcher.Find(*str, initial_offset);
+ if (first_match == StringType::npos)
+ return false;
+
+ // If we're only replacing one instance, there's no need to do anything
+ // complicated.
+ const size_t replace_length = replace_with.length();
+ if (replace_type == ReplaceType::REPLACE_FIRST) {
+ str->replace(first_match, find_length, replace_with.data(), replace_length);
+ return true;
+ }
+
+ // If the find and replace strings are the same length, we can simply use
+ // replace() on each instance, and finish the entire operation in O(n) time.
+ if (find_length == replace_length) {
+ auto* buffer = &((*str)[0]);
+ for (size_t offset = first_match; offset != StringType::npos;
+ offset = matcher.Find(*str, offset + replace_length)) {
+ CharTraits::copy(buffer + offset, replace_with.data(), replace_length);
+ }
+ return true;
+ }
+
+ // Since the find and replace strings aren't the same length, a loop like the
+ // one above would be O(n^2) in the worst case, as replace() will shift the
+ // entire remaining string each time. We need to be more clever to keep things
+ // O(n).
+ //
+ // When the string is being shortened, it's possible to just shift the matches
+ // down in one pass while finding, and truncate the length at the end of the
+ // search.
+ //
+ // If the string is being lengthened, more work is required. The strategy used
+ // here is to make two find() passes through the string. The first pass counts
+ // the number of matches to determine the new size. The second pass will
+ // either construct the new string into a new buffer (if the existing buffer
+ // lacked capacity), or else -- if there is room -- create a region of scratch
+ // space after |first_match| by shifting the tail of the string to a higher
+ // index, and doing in-place moves from the tail to lower indices thereafter.
+ size_t str_length = str->length();
+ size_t expansion = 0;
+ if (replace_length > find_length) {
+ // This operation lengthens the string; determine the new length by counting
+ // matches.
+ const size_t expansion_per_match = (replace_length - find_length);
+ size_t num_matches = 0;
+ for (size_t match = first_match; match != StringType::npos;
+ match = matcher.Find(*str, match + find_length)) {
+ expansion += expansion_per_match;
+ ++num_matches;
+ }
+ const size_t final_length = str_length + expansion;
+
+ if (str->capacity() < final_length) {
+ // If we'd have to allocate a new buffer to grow the string, build the
+ // result directly into the new allocation via append().
+ StringType src(str->get_allocator());
+ str->swap(src);
+ str->reserve(final_length);
+
+ size_t pos = 0;
+ for (size_t match = first_match;; match = matcher.Find(src, pos)) {
+ str->append(src, pos, match - pos);
+ str->append(replace_with.data(), replace_length);
+ pos = match + find_length;
+
+ // A mid-loop test/break enables skipping the final Find() call; the
+ // number of matches is known, so don't search past the last one.
+ if (!--num_matches)
+ break;
+ }
+
+ // Handle substring after the final match.
+ str->append(src, pos, str_length - pos);
+ return true;
+ }
+
+ // Prepare for the copy/move loop below -- expand the string to its final
+ // size by shifting the data after the first match to the end of the resized
+ // string.
+ size_t shift_src = first_match + find_length;
+ size_t shift_dst = shift_src + expansion;
+
+ // Big |expansion| factors (relative to |str_length|) require padding up to
+ // |shift_dst|.
+ if (shift_dst > str_length)
+ str->resize(shift_dst);
+
+ str->replace(shift_dst, str_length - shift_src, *str, shift_src,
+ str_length - shift_src);
+ str_length = final_length;
+ }
+
+ // We can alternate replacement and move operations. This won't overwrite the
+ // unsearched region of the string so long as |write_offset| <= |read_offset|;
+ // that condition is always satisfied because:
+ //
+ // (a) If the string is being shortened, |expansion| is zero and
+ // |write_offset| grows slower than |read_offset|.
+ //
+ // (b) If the string is being lengthened, |write_offset| grows faster than
+ // |read_offset|, but |expansion| is big enough so that |write_offset|
+ // will only catch up to |read_offset| at the point of the last match.
+ auto* buffer = &((*str)[0]);
+ size_t write_offset = first_match;
+ size_t read_offset = first_match + expansion;
+ do {
+ if (replace_length) {
+ CharTraits::copy(buffer + write_offset, replace_with.data(),
+ replace_length);
+ write_offset += replace_length;
+ }
+ read_offset += find_length;
+
+ // min() clamps StringType::npos (the largest unsigned value) to str_length.
+ size_t match = std::min(matcher.Find(*str, read_offset), str_length);
+
+ size_t length = match - read_offset;
+ if (length) {
+ CharTraits::move(buffer + write_offset, buffer + read_offset, length);
+ write_offset += length;
+ read_offset += length;
+ }
+ } while (read_offset < str_length);
+
+ // If we're shortening the string, truncate it now.
+ str->resize(write_offset);
+ return true;
+}
+
+template <class StringType>
+bool ReplaceCharsT(BasicStringPiece<StringType> input,
+ BasicStringPiece<StringType> find_any_of_these,
+ BasicStringPiece<StringType> replace_with,
+ StringType* output) {
+ // Commonly, this is called with output and input being the same string; in
+ // that case, skip the copy.
+ if (input.data() != output->data() || input.size() != output->size())
+ output->assign(input.data(), input.size());
+
+ return DoReplaceMatchesAfterOffset(
+ output, 0, CharacterMatcher<StringType>{find_any_of_these}, replace_with,
+ ReplaceType::REPLACE_ALL);
+}
+
+template <class string_type>
+inline typename string_type::value_type* WriteIntoT(string_type* str,
+ size_t length_with_null) {
+ DCHECK_GE(length_with_null, 1u);
+ str->reserve(length_with_null);
+ str->resize(length_with_null - 1);
+ return &((*str)[0]);
+}
+
+// Generic version for all JoinString overloads. |list_type| must be a sequence
+// (base::span or std::initializer_list) of strings/StringPieces (std::string,
+// string16, StringPiece or StringPiece16). |string_type| is either std::string
+// or string16.
+template <typename list_type, typename string_type>
+static string_type JoinStringT(list_type parts,
+ BasicStringPiece<string_type> sep) {
+ if (base::empty(parts))
+ return string_type();
+
+ // Pre-allocate the eventual size of the string. Start with the size of all of
+ // the separators (note that this *assumes* parts.size() > 0).
+ size_t total_size = (parts.size() - 1) * sep.size();
+ for (const auto& part : parts)
+ total_size += part.size();
+ string_type result;
+ result.reserve(total_size);
+
+ auto iter = parts.begin();
+ DCHECK(iter != parts.end());
+ result.append(iter->data(), iter->size());
+ ++iter;
+
+ for (; iter != parts.end(); ++iter) {
+ result.append(sep.data(), sep.size());
+ result.append(iter->data(), iter->size());
+ }
+
+ // Sanity-check that we pre-allocated correctly.
+ DCHECK_EQ(total_size, result.size());
+
+ return result;
+}
+
+template <class StringType>
+StringType DoReplaceStringPlaceholders(
+ BasicStringPiece<StringType> format_string,
+ const std::vector<StringType>& subst,
+ std::vector<size_t>* offsets) {
+ size_t substitutions = subst.size();
+ DCHECK_LT(substitutions, 10U);
+
+ size_t sub_length = 0;
+ for (const auto& cur : subst)
+ sub_length += cur.length();
+
+ StringType formatted;
+ formatted.reserve(format_string.length() + sub_length);
+
+ std::vector<ReplacementOffset> r_offsets;
+ for (auto i = format_string.begin(); i != format_string.end(); ++i) {
+ if ('$' == *i) {
+ if (i + 1 != format_string.end()) {
+ ++i;
+ if ('$' == *i) {
+ while (i != format_string.end() && '$' == *i) {
+ formatted.push_back('$');
+ ++i;
+ }
+ --i;
+ } else {
+ if (*i < '1' || *i > '9') {
+ DLOG(ERROR) << "Invalid placeholder: $" << *i;
+ continue;
+ }
+ uintptr_t index = *i - '1';
+ if (offsets) {
+ ReplacementOffset r_offset(index,
+ static_cast<int>(formatted.size()));
+ r_offsets.insert(
+ std::upper_bound(r_offsets.begin(), r_offsets.end(), r_offset,
+ &CompareParameter),
+ r_offset);
+ }
+ if (index < substitutions)
+ formatted.append(subst.at(index));
+ }
+ }
+ } else {
+ formatted.push_back(*i);
+ }
+ }
+ if (offsets) {
+ for (const auto& cur : r_offsets)
+ offsets->push_back(cur.offset);
+ }
+ return formatted;
+}
+
+// The following code is compatible with the OpenBSD lcpy interface. See:
+// http://www.gratisoft.us/todd/papers/strlcpy.html
+// ftp://ftp.openbsd.org/pub/OpenBSD/src/lib/libc/string/{wcs,str}lcpy.c
+
+template <typename CHAR>
+size_t lcpyT(CHAR* dst, const CHAR* src, size_t dst_size) {
+ for (size_t i = 0; i < dst_size; ++i) {
+ if ((dst[i] = src[i]) == 0) // We hit and copied the terminating NULL.
+ return i;
+ }
+
+ // We were left off at dst_size. We over copied 1 byte. Null terminate.
+ if (dst_size != 0)
+ dst[dst_size - 1] = 0;
+
+ // Count the rest of the |src|, and return it's length in characters.
+ while (src[dst_size])
+ ++dst_size;
+ return dst_size;
+}
+
+} // namespace internal
+
+} // namespace base
+
+#endif // BASE_STRINGS_STRING_UTIL_INTERNAL_H_
diff --git a/chromium/base/strings/string_util_posix.h b/chromium/base/strings/string_util_posix.h
index 8299118e106..c869df78e58 100644
--- a/chromium/base/strings/string_util_posix.h
+++ b/chromium/base/strings/string_util_posix.h
@@ -11,7 +11,7 @@
#include <string.h>
#include <wchar.h>
-#include "base/logging.h"
+#include "base/check.h"
namespace base {
diff --git a/chromium/base/strings/string_util_unittest.cc b/chromium/base/strings/string_util_unittest.cc
index f1132b9be4e..b9be7fbfa84 100644
--- a/chromium/base/strings/string_util_unittest.cc
+++ b/chromium/base/strings/string_util_unittest.cc
@@ -12,6 +12,7 @@
#include <algorithm>
#include <type_traits>
+#include "base/bits.h"
#include "base/stl_util.h"
#include "base/strings/string16.h"
#include "base/strings/utf_string_conversions.h"
@@ -678,28 +679,28 @@ TEST(StringUtilTest, FormatBytesUnlocalized) {
int64_t bytes;
const char* expected;
} cases[] = {
- // Expected behavior: we show one post-decimal digit when we have
- // under two pre-decimal digits, except in cases where it makes no
- // sense (zero or bytes).
- // Since we switch units once we cross the 1000 mark, this keeps
- // the display of file sizes or bytes consistently around three
- // digits.
- {0, "0 B"},
- {512, "512 B"},
- {1024*1024, "1.0 MB"},
- {1024*1024*1024, "1.0 GB"},
- {10LL*1024*1024*1024, "10.0 GB"},
- {99LL*1024*1024*1024, "99.0 GB"},
- {105LL*1024*1024*1024, "105 GB"},
- {105LL*1024*1024*1024 + 500LL*1024*1024, "105 GB"},
- {~(1LL << 63), "8192 PB"},
-
- {99*1024 + 103, "99.1 kB"},
- {1024*1024 + 103, "1.0 MB"},
- {1024*1024 + 205 * 1024, "1.2 MB"},
- {1024*1024*1024 + (927 * 1024*1024), "1.9 GB"},
- {10LL*1024*1024*1024, "10.0 GB"},
- {100LL*1024*1024*1024, "100 GB"},
+ // Expected behavior: we show one post-decimal digit when we have
+ // under two pre-decimal digits, except in cases where it makes no
+ // sense (zero or bytes).
+ // Since we switch units once we cross the 1000 mark, this keeps
+ // the display of file sizes or bytes consistently around three
+ // digits.
+ {0, "0 B"},
+ {512, "512 B"},
+ {1024 * 1024, "1.0 MB"},
+ {1024 * 1024 * 1024, "1.0 GB"},
+ {10LL * 1024 * 1024 * 1024, "10.0 GB"},
+ {99LL * 1024 * 1024 * 1024, "99.0 GB"},
+ {105LL * 1024 * 1024 * 1024, "105 GB"},
+ {105LL * 1024 * 1024 * 1024 + 500LL * 1024 * 1024, "105 GB"},
+ {~(bits::LeftmostBit<int64_t>()), "8192 PB"},
+
+ {99 * 1024 + 103, "99.1 kB"},
+ {1024 * 1024 + 103, "1.0 MB"},
+ {1024 * 1024 + 205 * 1024, "1.2 MB"},
+ {1024 * 1024 * 1024 + (927 * 1024 * 1024), "1.9 GB"},
+ {10LL * 1024 * 1024 * 1024, "10.0 GB"},
+ {100LL * 1024 * 1024 * 1024, "100 GB"},
};
for (const auto& i : cases) {
diff --git a/chromium/base/strings/string_util_win.cc b/chromium/base/strings/string_util_win.cc
new file mode 100644
index 00000000000..2c19729e0a5
--- /dev/null
+++ b/chromium/base/strings/string_util_win.cc
@@ -0,0 +1,145 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_util_win.h"
+
+#include "base/strings/string_util_internal.h"
+
+namespace base {
+
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+bool IsStringASCII(WStringPiece str) {
+ return internal::DoIsStringASCII(str.data(), str.length());
+}
+
+std::wstring ToLowerASCII(WStringPiece str) {
+ return internal::ToLowerASCIIImpl(str);
+}
+
+std::wstring ToUpperASCII(WStringPiece str) {
+ return internal::ToUpperASCIIImpl(str);
+}
+
+int CompareCaseInsensitiveASCII(WStringPiece a, WStringPiece b) {
+ return internal::CompareCaseInsensitiveASCIIT(a, b);
+}
+
+bool EqualsCaseInsensitiveASCII(WStringPiece a, WStringPiece b) {
+ return a.size() == b.size() &&
+ internal::CompareCaseInsensitiveASCIIT(a, b) == 0;
+}
+
+bool RemoveChars(WStringPiece input,
+ WStringPiece remove_chars,
+ std::wstring* output) {
+ return internal::ReplaceCharsT(input, remove_chars, WStringPiece(), output);
+}
+
+bool ReplaceChars(WStringPiece input,
+ WStringPiece replace_chars,
+ WStringPiece replace_with,
+ std::wstring* output) {
+ return internal::ReplaceCharsT(input, replace_chars, replace_with, output);
+}
+
+bool TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ std::wstring* output) {
+ return internal::TrimStringT(input, trim_chars, TRIM_ALL, output) !=
+ TRIM_NONE;
+}
+
+WStringPiece TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ TrimPositions positions) {
+ return internal::TrimStringPieceT(input, trim_chars, positions);
+}
+
+TrimPositions TrimWhitespace(WStringPiece input,
+ TrimPositions positions,
+ std::wstring* output) {
+ return internal::TrimStringT(input, WStringPiece(kWhitespaceWide), positions,
+ output);
+}
+
+WStringPiece TrimWhitespace(WStringPiece input, TrimPositions positions) {
+ return internal::TrimStringPieceT(input, WStringPiece(kWhitespaceWide),
+ positions);
+}
+
+std::wstring CollapseWhitespace(WStringPiece text,
+ bool trim_sequences_with_line_breaks) {
+ return internal::CollapseWhitespaceT(text, trim_sequences_with_line_breaks);
+}
+
+bool ContainsOnlyChars(WStringPiece input, WStringPiece characters) {
+ return input.find_first_not_of(characters) == StringPiece::npos;
+}
+
+bool LowerCaseEqualsASCII(WStringPiece str, StringPiece lowercase_ascii) {
+ return internal::DoLowerCaseEqualsASCII(str, lowercase_ascii);
+}
+
+bool EqualsASCII(WStringPiece str, StringPiece ascii) {
+ return std::equal(ascii.begin(), ascii.end(), str.begin(), str.end());
+}
+
+bool StartsWith(WStringPiece str,
+ WStringPiece search_for,
+ CompareCase case_sensitivity) {
+ return internal::StartsWithT(str, search_for, case_sensitivity);
+}
+
+bool EndsWith(WStringPiece str,
+ WStringPiece search_for,
+ CompareCase case_sensitivity) {
+ return internal::EndsWithT(str, search_for, case_sensitivity);
+}
+
+void ReplaceFirstSubstringAfterOffset(std::wstring* str,
+ size_t start_offset,
+ WStringPiece find_this,
+ WStringPiece replace_with) {
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<std::wstring>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_FIRST);
+}
+
+void ReplaceSubstringsAfterOffset(std::wstring* str,
+ size_t start_offset,
+ WStringPiece find_this,
+ WStringPiece replace_with) {
+ internal::DoReplaceMatchesAfterOffset(
+ str, start_offset, internal::SubstringMatcher<std::wstring>{find_this},
+ replace_with, internal::ReplaceType::REPLACE_ALL);
+}
+
+wchar_t* WriteInto(std::wstring* str, size_t length_with_null) {
+ return internal::WriteIntoT(str, length_with_null);
+}
+
+std::wstring JoinString(span<const std::wstring> parts,
+ WStringPiece separator) {
+ return internal::JoinStringT(parts, separator);
+}
+
+std::wstring JoinString(span<const WStringPiece> parts,
+ WStringPiece separator) {
+ return internal::JoinStringT(parts, separator);
+}
+
+std::wstring JoinString(std::initializer_list<WStringPiece> parts,
+ WStringPiece separator) {
+ return internal::JoinStringT(parts, separator);
+}
+
+std::wstring ReplaceStringPlaceholders(WStringPiece format_string,
+ const std::vector<std::wstring>& subst,
+ std::vector<size_t>* offsets) {
+ return internal::DoReplaceStringPlaceholders(format_string, subst, offsets);
+}
+
+#endif
+
+} // namespace base
diff --git a/chromium/base/strings/string_util_win.h b/chromium/base/strings/string_util_win.h
index 7f260bfc8b4..c39133d923c 100644
--- a/chromium/base/strings/string_util_win.h
+++ b/chromium/base/strings/string_util_win.h
@@ -11,7 +11,14 @@
#include <string.h>
#include <wchar.h>
-#include "base/logging.h"
+#include <string>
+#include <vector>
+
+#include "base/check.h"
+#include "base/containers/span.h"
+#include "base/strings/string16.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/string_util.h"
namespace base {
@@ -39,6 +46,154 @@ inline int vswprintf(wchar_t* buffer, size_t size,
return length;
}
+// Utility functions to access the underlying string buffer as a wide char
+// pointer.
+//
+// Note: These functions violate strict aliasing when char16 and wchar_t are
+// unrelated types. We thus pass -fno-strict-aliasing to the compiler on
+// non-Windows platforms [1], and rely on it being off in Clang's CL mode [2].
+//
+// [1] https://crrev.com/b9a0976622/build/config/compiler/BUILD.gn#244
+// [2]
+// https://github.com/llvm/llvm-project/blob/1e28a66/clang/lib/Driver/ToolChains/Clang.cpp#L3949
+inline wchar_t* as_writable_wcstr(char16* str) {
+ return reinterpret_cast<wchar_t*>(str);
+}
+
+inline wchar_t* as_writable_wcstr(string16& str) {
+ return reinterpret_cast<wchar_t*>(data(str));
+}
+
+inline const wchar_t* as_wcstr(const char16* str) {
+ return reinterpret_cast<const wchar_t*>(str);
+}
+
+inline const wchar_t* as_wcstr(StringPiece16 str) {
+ return reinterpret_cast<const wchar_t*>(str.data());
+}
+
+// Utility functions to access the underlying string buffer as a char16 pointer.
+inline char16* as_writable_u16cstr(wchar_t* str) {
+ return reinterpret_cast<char16*>(str);
+}
+
+inline char16* as_writable_u16cstr(std::wstring& str) {
+ return reinterpret_cast<char16*>(data(str));
+}
+
+inline const char16* as_u16cstr(const wchar_t* str) {
+ return reinterpret_cast<const char16*>(str);
+}
+
+inline const char16* as_u16cstr(WStringPiece str) {
+ return reinterpret_cast<const char16*>(str.data());
+}
+
+// Utility functions to convert between base::WStringPiece and
+// base::StringPiece16.
+inline WStringPiece AsWStringPiece(StringPiece16 str) {
+ return WStringPiece(as_wcstr(str.data()), str.size());
+}
+
+inline StringPiece16 AsStringPiece16(WStringPiece str) {
+ return StringPiece16(as_u16cstr(str.data()), str.size());
+}
+
+inline std::wstring AsWString(StringPiece16 str) {
+ return std::wstring(as_wcstr(str.data()), str.size());
+}
+
+inline string16 AsString16(WStringPiece str) {
+ return string16(as_u16cstr(str.data()), str.size());
+}
+
+// The following section contains overloads of the cross-platform APIs for
+// std::wstring and base::WStringPiece. These are only enabled if std::wstring
+// and base::string16 are distinct types, as otherwise this would result in an
+// ODR violation.
+// TODO(crbug.com/911896): Remove those guards once base::string16 is
+// std::u16string.
+#if defined(BASE_STRING16_IS_STD_U16STRING)
+BASE_EXPORT bool IsStringASCII(WStringPiece str);
+
+BASE_EXPORT std::wstring ToLowerASCII(WStringPiece str);
+
+BASE_EXPORT std::wstring ToUpperASCII(WStringPiece str);
+
+BASE_EXPORT int CompareCaseInsensitiveASCII(WStringPiece a, WStringPiece b);
+
+BASE_EXPORT bool EqualsCaseInsensitiveASCII(WStringPiece a, WStringPiece b);
+
+BASE_EXPORT bool RemoveChars(WStringPiece input,
+ WStringPiece remove_chars,
+ std::wstring* output);
+
+BASE_EXPORT bool ReplaceChars(WStringPiece input,
+ WStringPiece replace_chars,
+ WStringPiece replace_with,
+ std::wstring* output);
+
+BASE_EXPORT bool TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ std::string* output);
+
+BASE_EXPORT WStringPiece TrimString(WStringPiece input,
+ WStringPiece trim_chars,
+ TrimPositions positions);
+
+BASE_EXPORT TrimPositions TrimWhitespace(WStringPiece input,
+ TrimPositions positions,
+ std::wstring* output);
+
+BASE_EXPORT WStringPiece TrimWhitespace(WStringPiece input,
+ TrimPositions positions);
+
+BASE_EXPORT std::wstring CollapseWhitespace(
+ WStringPiece text,
+ bool trim_sequences_with_line_breaks);
+
+BASE_EXPORT bool ContainsOnlyChars(WStringPiece input, WStringPiece characters);
+
+BASE_EXPORT bool LowerCaseEqualsASCII(WStringPiece str,
+ StringPiece lowecase_ascii);
+
+BASE_EXPORT bool EqualsASCII(StringPiece16 str, StringPiece ascii);
+
+BASE_EXPORT bool StartsWith(WStringPiece str,
+ WStringPiece search_for,
+ CompareCase case_sensitivity);
+
+BASE_EXPORT bool EndsWith(WStringPiece str,
+ WStringPiece search_for,
+ CompareCase case_sensitivity);
+
+BASE_EXPORT void ReplaceFirstSubstringAfterOffset(std::wstring* str,
+ size_t start_offset,
+ WStringPiece find_this,
+ WStringPiece replace_with);
+
+BASE_EXPORT void ReplaceSubstringsAfterOffset(std::wstring* str,
+ size_t start_offset,
+ WStringPiece find_this,
+ WStringPiece replace_with);
+
+BASE_EXPORT wchar_t* WriteInto(std::wstring* str, size_t length_with_null);
+
+BASE_EXPORT std::wstring JoinString(span<const std::wstring> parts,
+ WStringPiece separator);
+
+BASE_EXPORT std::wstring JoinString(span<const WStringPiece> parts,
+ WStringPiece separator);
+
+BASE_EXPORT std::wstring JoinString(std::initializer_list<WStringPiece> parts,
+ WStringPiece separator);
+
+BASE_EXPORT std::wstring ReplaceStringPlaceholders(
+ WStringPiece format_string,
+ const std::vector<string16>& subst,
+ std::vector<size_t>* offsets);
+#endif
+
} // namespace base
#endif // BASE_STRINGS_STRING_UTIL_WIN_H_
diff --git a/chromium/base/strings/utf_string_conversions.cc b/chromium/base/strings/utf_string_conversions.cc
index 9a79889159e..0b55cd9e59d 100644
--- a/chromium/base/strings/utf_string_conversions.cc
+++ b/chromium/base/strings/utf_string_conversions.cc
@@ -339,4 +339,16 @@ std::string UTF16ToASCII(StringPiece16 utf16) {
return std::string(utf16.begin(), utf16.end());
}
+#if defined(WCHAR_T_IS_UTF16)
+std::wstring ASCIIToWide(StringPiece ascii) {
+ DCHECK(IsStringASCII(ascii)) << ascii;
+ return std::wstring(ascii.begin(), ascii.end());
+}
+
+std::string WideToASCII(WStringPiece wide) {
+ DCHECK(IsStringASCII(wide)) << wide;
+ return std::string(wide.begin(), wide.end());
+}
+#endif // defined(WCHAR_T_IS_UTF16)
+
} // namespace base
diff --git a/chromium/base/strings/utf_string_conversions.h b/chromium/base/strings/utf_string_conversions.h
index f780fb4f4f8..9ee91453a02 100644
--- a/chromium/base/strings/utf_string_conversions.h
+++ b/chromium/base/strings/utf_string_conversions.h
@@ -12,6 +12,7 @@
#include "base/base_export.h"
#include "base/strings/string16.h"
#include "base/strings/string_piece.h"
+#include "build/build_config.h"
namespace base {
@@ -49,6 +50,16 @@ BASE_EXPORT string16 ASCIIToUTF16(StringPiece ascii) WARN_UNUSED_RESULT;
// beforehand.
BASE_EXPORT std::string UTF16ToASCII(StringPiece16 utf16) WARN_UNUSED_RESULT;
+#if defined(WCHAR_T_IS_UTF16)
+// This converts an ASCII string, typically a hardcoded constant, to a wide
+// string.
+BASE_EXPORT std::wstring ASCIIToWide(StringPiece ascii) WARN_UNUSED_RESULT;
+
+// Converts to 7-bit ASCII by truncating. The result must be known to be ASCII
+// beforehand.
+BASE_EXPORT std::string WideToASCII(WStringPiece wide) WARN_UNUSED_RESULT;
+#endif // defined(WCHAR_T_IS_UTF16)
+
} // namespace base
#endif // BASE_STRINGS_UTF_STRING_CONVERSIONS_H_
diff --git a/chromium/base/synchronization/condition_variable.h b/chromium/base/synchronization/condition_variable.h
index d92b738081d..f57ed132829 100644
--- a/chromium/base/synchronization/condition_variable.h
+++ b/chromium/base/synchronization/condition_variable.h
@@ -67,7 +67,7 @@
#endif
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
diff --git a/chromium/base/synchronization/lock.h b/chromium/base/synchronization/lock.h
index e89db4a56db..d9cfbb758f1 100644
--- a/chromium/base/synchronization/lock.h
+++ b/chromium/base/synchronization/lock.h
@@ -6,7 +6,7 @@
#define BASE_SYNCHRONIZATION_LOCK_H_
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/synchronization/lock_impl.h"
#include "base/thread_annotations.h"
diff --git a/chromium/base/synchronization/lock_impl.h b/chromium/base/synchronization/lock_impl.h
index 66f4b32696a..773bd1bcd58 100644
--- a/chromium/base/synchronization/lock_impl.h
+++ b/chromium/base/synchronization/lock_impl.h
@@ -6,7 +6,7 @@
#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
diff --git a/chromium/base/system/sys_info.h b/chromium/base/system/sys_info.h
index 057b0dc45e8..ae3fef01811 100644
--- a/chromium/base/system/sys_info.h
+++ b/chromium/base/system/sys_info.h
@@ -160,6 +160,10 @@ class BASE_EXPORT SysInfo {
// Returns the kernel version of the host operating system.
static std::string KernelVersion();
+
+ // Crashes if running on Chrome OS non-test image. Use only for really
+ // sensitive and risky use cases.
+ static void CrashIfChromeOSNonTestImage();
#endif // defined(OS_CHROMEOS)
#if defined(OS_ANDROID)
diff --git a/chromium/base/system/sys_info_chromeos.cc b/chromium/base/system/sys_info_chromeos.cc
index 1d688f13491..6be670fb0d3 100644
--- a/chromium/base/system/sys_info_chromeos.cc
+++ b/chromium/base/system/sys_info_chromeos.cc
@@ -223,4 +223,21 @@ void SysInfo::SetChromeOSVersionInfoForTest(const std::string& lsb_release,
g_chrome_os_version_info.Get().Parse();
}
+// static
+void SysInfo::CrashIfChromeOSNonTestImage() {
+ if (!IsRunningOnChromeOS())
+ return;
+
+ // On the test images etc/lsb-release has a line:
+ // CHROMEOS_RELEASE_TRACK=testimage-channel.
+ const char kChromeOSReleaseTrack[] = "CHROMEOS_RELEASE_TRACK";
+ const char kTestImageRelease[] = "testimage-channel";
+
+ std::string track;
+ CHECK(SysInfo::GetLsbReleaseValue(kChromeOSReleaseTrack, &track));
+
+ // Crash if can't find test-image marker in the release track.
+ CHECK_NE(track.find(kTestImageRelease), std::string::npos);
+}
+
} // namespace base
diff --git a/chromium/base/system/sys_info_linux.cc b/chromium/base/system/sys_info_linux.cc
index d9bfa496fde..f69569fb51c 100644
--- a/chromium/base/system/sys_info_linux.cc
+++ b/chromium/base/system/sys_info_linux.cc
@@ -8,6 +8,7 @@
#include <stdint.h>
#include <limits>
+#include <sstream>
#include "base/check.h"
#include "base/files/file_util.h"
diff --git a/chromium/base/system/sys_info_unittest.cc b/chromium/base/system/sys_info_unittest.cc
index 68add20e022..6c9c226a92e 100644
--- a/chromium/base/system/sys_info_unittest.cc
+++ b/chromium/base/system/sys_info_unittest.cc
@@ -21,6 +21,7 @@
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest-death-test.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/platform_test.h"
@@ -268,6 +269,34 @@ TEST_F(SysInfoTest, IsRunningOnChromeOS) {
EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
}
+TEST_F(SysInfoTest, CrashOnBaseImage) {
+ const char kLsbRelease2[] =
+ "CHROMEOS_RELEASE_NAME=Chrome OS\n"
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
+ "CHROMEOS_RELEASE_TRACK=stable-channel\n";
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+ EXPECT_DEATH_IF_SUPPORTED({ SysInfo::CrashIfChromeOSNonTestImage(); }, "");
+}
+
+TEST_F(SysInfoTest, NoCrashOnTestImage) {
+ const char kLsbRelease2[] =
+ "CHROMEOS_RELEASE_NAME=Chrome OS\n"
+ "CHROMEOS_RELEASE_VERSION=1.2.3.4\n"
+ "CHROMEOS_RELEASE_TRACK=testimage-channel\n";
+ SysInfo::SetChromeOSVersionInfoForTest(kLsbRelease2, Time());
+ EXPECT_TRUE(SysInfo::IsRunningOnChromeOS());
+ // Should not crash.
+ SysInfo::CrashIfChromeOSNonTestImage();
+}
+
+TEST_F(SysInfoTest, NoCrashOnLinuxBuild) {
+ SysInfo::SetChromeOSVersionInfoForTest("", Time());
+ EXPECT_FALSE(SysInfo::IsRunningOnChromeOS());
+ // Should not crash.
+ SysInfo::CrashIfChromeOSNonTestImage();
+}
+
#endif // OS_CHROMEOS
} // namespace base
diff --git a/chromium/base/task/common/checked_lock.h b/chromium/base/task/common/checked_lock.h
index 29ce5735b61..4399ec477a5 100644
--- a/chromium/base/task/common/checked_lock.h
+++ b/chromium/base/task/common/checked_lock.h
@@ -31,18 +31,27 @@ namespace internal {
// CheckedLock(const CheckedLock* predecessor)
// Constructor that specifies an allowed predecessor for that lock.
// DCHECKs
-// On Construction if |predecessor| forms a predecessor lock cycle.
+// On Construction if |predecessor| forms a predecessor lock cycle or
+// is a universal successor.
// On Acquisition if the previous lock acquired on the thread is not
// either |predecessor| or a universal predecessor. Okay if there
// was no previous lock acquired.
//
// CheckedLock(UniversalPredecessor universal_predecessor)
// Constructor for a lock that will allow the acquisition of any lock after
-// it, without needing to explicitly be named a predecessor. Can only be
-// acquired if no locks are currently held by this thread.
-// DCHECKs
+// it, without needing to explicitly be named a predecessor (e.g. a root in
+// a lock chain). Can only be acquired if no locks are currently held by
+// this thread. DCHECKs
// On Acquisition if any CheckedLock is acquired on this thread.
//
+// CheckedLock(UniversalSuccessor universal_successor)
+// Constructor for a lock that will allow its acquisition after any other
+// lock, without needing to explicitly name its predecessor (e.g. a leaf in
+// a lock chain). Can not be acquired after another UniversalSuccessor lock.
+// DCHECKs
+// On Acquisition if there was a previously acquired lock on the thread
+// and it was also a universal successor.
+//
// void Acquire()
// Acquires the lock.
//
@@ -63,6 +72,8 @@ class LOCKABLE CheckedLock : public CheckedLockImpl {
: CheckedLockImpl(predecessor) {}
explicit CheckedLock(UniversalPredecessor universal_predecessor)
: CheckedLockImpl(universal_predecessor) {}
+ explicit CheckedLock(UniversalSuccessor universal_successor)
+ : CheckedLockImpl(universal_successor) {}
};
#else // DCHECK_IS_ON()
class LOCKABLE CheckedLock : public Lock {
@@ -70,6 +81,7 @@ class LOCKABLE CheckedLock : public Lock {
CheckedLock() = default;
explicit CheckedLock(const CheckedLock*) {}
explicit CheckedLock(UniversalPredecessor) {}
+ explicit CheckedLock(UniversalSuccessor) {}
static void AssertNoLockHeldOnCurrentThread() {}
std::unique_ptr<ConditionVariable> CreateConditionVariable() {
diff --git a/chromium/base/task/common/checked_lock_impl.cc b/chromium/base/task/common/checked_lock_impl.cc
index 698886e1615..8b41e95cf8c 100644
--- a/chromium/base/task/common/checked_lock_impl.cc
+++ b/chromium/base/task/common/checked_lock_impl.cc
@@ -81,7 +81,12 @@ class SafeAcquisitionTracker {
// Using at() is exception-safe here as |lock| was registered already.
const CheckedLockImpl* allowed_predecessor =
allowed_predecessor_map_.at(lock);
- DCHECK_EQ(previous_lock, allowed_predecessor);
+ if (lock->is_universal_successor()) {
+ DCHECK(!previous_lock->is_universal_successor());
+ return;
+ } else {
+ DCHECK_EQ(previous_lock, allowed_predecessor);
+ }
}
// Asserts that |lock|'s registered predecessor is safe. Because
@@ -134,12 +139,18 @@ CheckedLockImpl::CheckedLockImpl() : CheckedLockImpl(nullptr) {}
CheckedLockImpl::CheckedLockImpl(const CheckedLockImpl* predecessor)
: is_universal_predecessor_(false) {
+ DCHECK(predecessor == nullptr || !predecessor->is_universal_successor_);
g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
}
CheckedLockImpl::CheckedLockImpl(UniversalPredecessor)
: is_universal_predecessor_(true) {}
+CheckedLockImpl::CheckedLockImpl(UniversalSuccessor)
+ : is_universal_successor_(true) {
+ g_safe_acquisition_tracker.Get().RegisterLock(this, nullptr);
+}
+
CheckedLockImpl::~CheckedLockImpl() {
g_safe_acquisition_tracker.Get().UnregisterLock(this);
}
diff --git a/chromium/base/task/common/checked_lock_impl.h b/chromium/base/task/common/checked_lock_impl.h
index acb1d133753..88aba042aad 100644
--- a/chromium/base/task/common/checked_lock_impl.h
+++ b/chromium/base/task/common/checked_lock_impl.h
@@ -18,6 +18,7 @@ class ConditionVariable;
namespace internal {
struct UniversalPredecessor {};
+struct UniversalSuccessor {};
// A regular lock with simple deadlock correctness checking.
// This lock tracks all of the available locks to make sure that any locks are
@@ -28,6 +29,7 @@ class BASE_EXPORT CheckedLockImpl {
CheckedLockImpl();
explicit CheckedLockImpl(const CheckedLockImpl* predecessor);
explicit CheckedLockImpl(UniversalPredecessor);
+ explicit CheckedLockImpl(UniversalSuccessor);
~CheckedLockImpl();
static void AssertNoLockHeldOnCurrentThread();
@@ -40,10 +42,12 @@ class BASE_EXPORT CheckedLockImpl {
std::unique_ptr<ConditionVariable> CreateConditionVariable();
bool is_universal_predecessor() const { return is_universal_predecessor_; }
+ bool is_universal_successor() const { return is_universal_successor_; }
private:
Lock lock_;
- const bool is_universal_predecessor_;
+ const bool is_universal_predecessor_ = false;
+ const bool is_universal_successor_ = false;
DISALLOW_COPY_AND_ASSIGN(CheckedLockImpl);
};
diff --git a/chromium/base/task/common/checked_lock_unittest.cc b/chromium/base/task/common/checked_lock_unittest.cc
index 54b74c50391..2e21eace50b 100644
--- a/chromium/base/task/common/checked_lock_unittest.cc
+++ b/chromium/base/task/common/checked_lock_unittest.cc
@@ -307,7 +307,7 @@ TEST(CheckedLockTest, AcquireMultipleLocksAfterUniversalPredecessor)
NO_THREAD_SAFETY_ANALYSIS {
// Acquisition of a universal-predecessor lock does not affect acquisition
// rules for locks beyond the one acquired directly after it.
- CheckedLock universal_predecessor((UniversalPredecessor()));
+ CheckedLock universal_predecessor{UniversalPredecessor()};
CheckedLock lock;
CheckedLock lock2(&lock);
CheckedLock lock3;
@@ -329,7 +329,7 @@ NO_THREAD_SAFETY_ANALYSIS {
TEST(CheckedLockTest, AcquireUniversalPredecessorAfterLock)
NO_THREAD_SAFETY_ANALYSIS {
// A universal-predecessor lock may not be acquired after any other lock.
- CheckedLock universal_predecessor((UniversalPredecessor()));
+ CheckedLock universal_predecessor{UniversalPredecessor()};
CheckedLock lock;
EXPECT_DCHECK_DEATH({
@@ -342,8 +342,8 @@ TEST(CheckedLockTest, AcquireUniversalPredecessorAfterUniversalPredecessor)
NO_THREAD_SAFETY_ANALYSIS {
// A universal-predecessor lock may not be acquired after any other lock, not
// even another universal predecessor.
- CheckedLock universal_predecessor((UniversalPredecessor()));
- CheckedLock universal_predecessor2((UniversalPredecessor()));
+ CheckedLock universal_predecessor{UniversalPredecessor()};
+ CheckedLock universal_predecessor2{UniversalPredecessor()};
EXPECT_DCHECK_DEATH({
universal_predecessor.Acquire();
@@ -351,6 +351,70 @@ NO_THREAD_SAFETY_ANALYSIS {
});
}
+TEST(CheckedLockTest, AcquireLockBeforeUniversalSuccessor) {
+ // Acquisition of a universal-successor lock should be allowed
+ // after any other acquisition.
+ CheckedLock universal_successor{UniversalSuccessor()};
+ CheckedLock lock;
+
+ lock.Acquire();
+ universal_successor.Acquire();
+ universal_successor.Release();
+ lock.Release();
+}
+
+TEST(CheckedLockTest, AcquireMultipleLocksBeforeAndAfterUniversalSuccessor)
+NO_THREAD_SAFETY_ANALYSIS {
+ // Acquisition of a universal-successor lock does not affect acquisition
+ // rules for locks beyond the one acquired directly after it.
+ CheckedLock lock;
+ CheckedLock universal_successor{UniversalSuccessor()};
+ CheckedLock lock2;
+
+ lock.Acquire();
+ universal_successor.Acquire();
+ universal_successor.Release();
+ lock.Release();
+
+ EXPECT_DCHECK_DEATH({
+ universal_successor.Acquire();
+ lock2.Acquire();
+ });
+}
+
+TEST(CheckedLockTest, AcquireUniversalSuccessorBeforeLock)
+NO_THREAD_SAFETY_ANALYSIS {
+ // A universal-successor lock may not be acquired before any other lock.
+ CheckedLock universal_successor{UniversalSuccessor()};
+ CheckedLock lock;
+
+ EXPECT_DCHECK_DEATH({
+ universal_successor.Acquire();
+ lock.Acquire();
+ });
+}
+
+TEST(CheckedLockTest, AcquireUniversalSuccessorAfterUniversalSuccessor)
+NO_THREAD_SAFETY_ANALYSIS {
+ // A universal-successor lock may not be acquired before any other lock, not
+ // even another universal successor.
+ CheckedLock universal_successor{UniversalSuccessor()};
+ CheckedLock universal_successor2{UniversalSuccessor()};
+
+ EXPECT_DCHECK_DEATH({
+ universal_successor.Acquire();
+ universal_successor2.Acquire();
+ });
+}
+
+TEST(CheckedLockTest, UniversalSuccessorAsPredecessor)
+NO_THREAD_SAFETY_ANALYSIS {
+ // A universal-successor lock cannot be declared as a predecessor to
+ // any other lock.
+ CheckedLock universal_successor{UniversalSuccessor()};
+ EXPECT_DCHECK_DEATH({ CheckedLock banned_successor(&universal_successor); });
+}
+
TEST(CheckedLockTest, AssertNoLockHeldOnCurrentThread) {
// AssertNoLockHeldOnCurrentThread() shouldn't fail when no lock is acquired.
CheckedLock::AssertNoLockHeldOnCurrentThread();
diff --git a/chromium/base/task/common/task_annotator.cc b/chromium/base/task/common/task_annotator.cc
index 1001359c712..505a55b0e49 100644
--- a/chromium/base/task/common/task_annotator.cc
+++ b/chromium/base/task/common/task_annotator.cc
@@ -10,7 +10,7 @@
#include "base/debug/alias.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
@@ -64,10 +64,10 @@ void TaskAnnotator::WillQueueTask(const char* trace_event_name,
DCHECK(trace_event_name);
DCHECK(pending_task);
DCHECK(task_queue_name);
- TRACE_EVENT_WITH_FLOW1(
- TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
- TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_OUT,
- "task_queue_name", task_queue_name);
+ TRACE_EVENT_WITH_FLOW1("toplevel.flow", trace_event_name,
+ TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)),
+ TRACE_EVENT_FLAG_FLOW_OUT, "task_queue_name",
+ task_queue_name);
DCHECK(!pending_task->task_backtrace[0])
<< "Task backtrace was already set, task posted twice??";
@@ -98,9 +98,9 @@ void TaskAnnotator::RunTask(const char* trace_event_name,
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("toplevel.ipc"),
"TaskAnnotator::RunTask", "ipc_hash", pending_task->ipc_hash);
- TRACE_EVENT_WITH_FLOW0(
- TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
- TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN);
+ TRACE_EVENT_WITH_FLOW0("toplevel.flow", trace_event_name,
+ TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)),
+ TRACE_EVENT_FLAG_FLOW_IN);
// Before running the task, store the IPC context and the task backtrace with
// the chain of PostTasks that resulted in this call and deliberately alias it
diff --git a/chromium/base/task/post_job.h b/chromium/base/task/post_job.h
index 1d396f1fb11..3ae31867e2b 100644
--- a/chromium/base/task/post_job.h
+++ b/chromium/base/task/post_job.h
@@ -7,8 +7,8 @@
#include "base/base_export.h"
#include "base/callback.h"
+#include "base/check_op.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/task/task_traits.h"
diff --git a/chromium/base/task/post_task.cc b/chromium/base/task/post_task.cc
index 12218599e48..744d0043520 100644
--- a/chromium/base/task/post_task.cc
+++ b/chromium/base/task/post_task.cc
@@ -62,19 +62,6 @@ TaskExecutor* GetTaskExecutorForTraits(const TaskTraits& traits) {
} // namespace
-bool PostTask(const Location& from_here, OnceClosure task) {
- // TODO(skyostil): Make task traits required here too.
- return PostDelayedTask(from_here, {ThreadPool()}, std::move(task),
- TimeDelta());
-}
-
-bool PostTaskAndReply(const Location& from_here,
- OnceClosure task,
- OnceClosure reply) {
- return PostTaskAndReply(from_here, {ThreadPool()}, std::move(task),
- std::move(reply));
-}
-
bool PostTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task) {
diff --git a/chromium/base/task/post_task.h b/chromium/base/task/post_task.h
index 8bcd24fad1c..84b959771b1 100644
--- a/chromium/base/task/post_task.h
+++ b/chromium/base/task/post_task.h
@@ -99,27 +99,6 @@ namespace base {
// have to worry about this. You will encounter DCHECKs or nullptr dereferences
// if this is violated. For tests, prefer base::test::TaskEnvironment.
-// Equivalent to calling PostTask with default TaskTraits.
-BASE_EXPORT bool PostTask(const Location& from_here, OnceClosure task);
-inline bool PostTask(OnceClosure task,
- const Location& from_here = Location::Current()) {
- return PostTask(from_here, std::move(task));
-}
-
-// Equivalent to calling PostTaskAndReply with default TaskTraits.
-BASE_EXPORT bool PostTaskAndReply(const Location& from_here,
- OnceClosure task,
- OnceClosure reply);
-
-// Equivalent to calling PostTaskAndReplyWithResult with default TaskTraits.
-template <typename TaskReturnType, typename ReplyArgType>
-bool PostTaskAndReplyWithResult(const Location& from_here,
- OnceCallback<TaskReturnType()> task,
- OnceCallback<void(ReplyArgType)> reply) {
- return PostTaskAndReplyWithResult(from_here, {ThreadPool()}, std::move(task),
- std::move(reply));
-}
-
// Posts |task| with specific |traits|. Returns false if the task definitely
// won't run because of current shutdown state.
BASE_EXPORT bool PostTask(const Location& from_here,
diff --git a/chromium/base/task/post_task_unittest.cc b/chromium/base/task/post_task_unittest.cc
index cbbbe666b58..b54872791a4 100644
--- a/chromium/base/task/post_task_unittest.cc
+++ b/chromium/base/task/post_task_unittest.cc
@@ -99,10 +99,6 @@ class PostTaskTestWithExecutor : public ::testing::Test {
};
TEST_F(PostTaskTestWithExecutor, PostTaskToThreadPool) {
- // Tasks without extension should not go to the TestTaskExecutor.
- EXPECT_TRUE(PostTask(FROM_HERE, DoNothing()));
- EXPECT_FALSE(executor_.runner()->HasPendingTask());
-
EXPECT_TRUE(PostTask(FROM_HERE, {ThreadPool(), MayBlock()}, DoNothing()));
EXPECT_FALSE(executor_.runner()->HasPendingTask());
diff --git a/chromium/base/task/sequence_manager/lazily_deallocated_deque.h b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h
index b7d1b428afa..e439b73c457 100644
--- a/chromium/base/task/sequence_manager/lazily_deallocated_deque.h
+++ b/chromium/base/task/sequence_manager/lazily_deallocated_deque.h
@@ -10,9 +10,9 @@
#include <memory>
#include <vector>
+#include "base/check_op.h"
#include "base/debug/alias.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/time/time.h"
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.cc b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
index 3262cadd9a3..31db9535321 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.cc
@@ -13,6 +13,7 @@
#include "base/debug/crash_logging.h"
#include "base/debug/stack_trace.h"
#include "base/json/json_writer.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_loop_current.h"
#include "base/no_destructor.h"
@@ -28,7 +29,7 @@
#include "base/threading/thread_local.h"
#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
@@ -42,6 +43,25 @@ GetTLSSequenceManagerImpl() {
return lazy_tls_ptr.get();
}
+class TracedBaseValue : public trace_event::ConvertableToTraceFormat {
+ public:
+ explicit TracedBaseValue(Value value) : value_(std::move(value)) {}
+ ~TracedBaseValue() override = default;
+
+ void AppendAsTraceFormat(std::string* out) const override {
+ if (!value_.is_none()) {
+ std::string tmp;
+ JSONWriter::Write(value_, &tmp);
+ *out += tmp;
+ } else {
+ *out += "{}";
+ }
+ }
+
+ private:
+ base::Value value_;
+};
+
} // namespace
std::unique_ptr<SequenceManager> CreateSequenceManagerOnCurrentThread(
@@ -484,8 +504,8 @@ const char* RunTaskTraceNameForPriority(TaskQueue::QueuePriority priority) {
} // namespace
-Task* SequenceManagerImpl::SelectNextTask() {
- Task* task = SelectNextTaskImpl();
+Task* SequenceManagerImpl::SelectNextTask(SelectTaskOption option) {
+ Task* task = SelectNextTaskImpl(option);
if (!task)
return nullptr;
@@ -557,7 +577,7 @@ void SequenceManagerImpl::LogTaskDebugInfo(
}
#endif // DCHECK_IS_ON() && !defined(OS_NACL)
-Task* SequenceManagerImpl::SelectNextTaskImpl() {
+Task* SequenceManagerImpl::SelectNextTaskImpl(SelectTaskOption option) {
CHECK(Validate());
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
@@ -577,10 +597,12 @@ Task* SequenceManagerImpl::SelectNextTaskImpl() {
while (true) {
internal::WorkQueue* work_queue =
- main_thread_only().selector.SelectWorkQueueToService();
+ main_thread_only().selector.SelectWorkQueueToService(option);
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug"), "SequenceManager",
- this, AsValueWithSelectorResult(work_queue, /* force_verbose */ false));
+ this,
+ AsValueWithSelectorResultForTracing(work_queue,
+ /* force_verbose */ false));
if (!work_queue)
return nullptr;
@@ -648,15 +670,18 @@ void SequenceManagerImpl::DidRunTask() {
CleanUpQueues();
}
-TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) const {
+TimeDelta SequenceManagerImpl::DelayTillNextTask(
+ LazyNow* lazy_now,
+ SelectTaskOption option) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- if (auto priority = main_thread_only().selector.GetHighestPendingPriority()) {
+ if (auto priority =
+ main_thread_only().selector.GetHighestPendingPriority(option)) {
// If the selector has non-empty queues we trivially know there is immediate
// work to be done. However we may want to yield to native work if it is
// more important.
if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
- return GetDelayTillNextDelayedTask(lazy_now);
+ return GetDelayTillNextDelayedTask(lazy_now, option);
return TimeDelta();
}
@@ -664,9 +689,11 @@ TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) const {
// NB ReloadEmptyWorkQueues involves a memory barrier, so it's fastest to not
// do this always.
ReloadEmptyWorkQueues();
- if (auto priority = main_thread_only().selector.GetHighestPendingPriority()) {
+
+ if (auto priority =
+ main_thread_only().selector.GetHighestPendingPriority(option)) {
if (UNLIKELY(!ShouldRunTaskOfPriority(*priority)))
- return GetDelayTillNextDelayedTask(lazy_now);
+ return GetDelayTillNextDelayedTask(lazy_now, option);
return TimeDelta();
}
@@ -674,13 +701,17 @@ TimeDelta SequenceManagerImpl::DelayTillNextTask(LazyNow* lazy_now) const {
// call MoveReadyDelayedTasksToWorkQueues because it's assumed
// DelayTillNextTask will return TimeDelta>() if the delayed task is due to
// run now.
- return GetDelayTillNextDelayedTask(lazy_now);
+ return GetDelayTillNextDelayedTask(lazy_now, option);
}
TimeDelta SequenceManagerImpl::GetDelayTillNextDelayedTask(
- LazyNow* lazy_now) const {
+ LazyNow* lazy_now,
+ SelectTaskOption option) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
+ if (option == SelectTaskOption::kSkipDelayedTask)
+ return TimeDelta::Max();
+
TimeDelta delay_till_next_task = TimeDelta::Max();
for (TimeDomain* time_domain : main_thread_only().time_domains) {
Optional<TimeDelta> delay = time_domain->DelayTillNextTask(lazy_now);
@@ -895,49 +926,45 @@ EnqueueOrder SequenceManagerImpl::GetNextSequenceNumber() {
}
std::unique_ptr<trace_event::ConvertableToTraceFormat>
-SequenceManagerImpl::AsValueWithSelectorResult(
+SequenceManagerImpl::AsValueWithSelectorResultForTracing(
internal::WorkQueue* selected_work_queue,
bool force_verbose) const {
- auto state = std::make_unique<trace_event::TracedValue>();
- AsValueWithSelectorResultInto(state.get(), selected_work_queue,
- force_verbose);
- return std::move(state);
+ return std::make_unique<TracedBaseValue>(
+ AsValueWithSelectorResult(selected_work_queue, force_verbose));
}
-void SequenceManagerImpl::AsValueWithSelectorResultInto(
- trace_event::TracedValue* state,
+Value SequenceManagerImpl::AsValueWithSelectorResult(
internal::WorkQueue* selected_work_queue,
bool force_verbose) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
TimeTicks now = NowTicks();
- state->BeginArray("active_queues");
+ Value state(Value::Type::DICTIONARY);
+ Value active_queues(Value::Type::LIST);
for (auto* const queue : main_thread_only().active_queues)
- queue->AsValueInto(now, state, force_verbose);
- state->EndArray();
- state->BeginArray("queues_to_gracefully_shutdown");
+ active_queues.Append(queue->AsValue(now, force_verbose));
+ state.SetKey("active_queues", std::move(active_queues));
+ Value shutdown_queues(Value::Type::LIST);
for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
- pair.first->AsValueInto(now, state, force_verbose);
- state->EndArray();
- state->BeginArray("queues_to_delete");
+ shutdown_queues.Append(pair.first->AsValue(now, force_verbose));
+ state.SetKey("queues_to_gracefully_shutdown", std::move(shutdown_queues));
+ Value queues_to_delete(Value::Type::LIST);
for (const auto& pair : main_thread_only().queues_to_delete)
- pair.first->AsValueInto(now, state, force_verbose);
- state->EndArray();
- state->BeginDictionary("selector");
- main_thread_only().selector.AsValueInto(state);
- state->EndDictionary();
+ queues_to_delete.Append(pair.first->AsValue(now, force_verbose));
+ state.SetKey("queues_to_delete", std::move(queues_to_delete));
+ state.SetKey("selector", main_thread_only().selector.AsValue());
if (selected_work_queue) {
- state->SetString("selected_queue",
- selected_work_queue->task_queue()->GetName());
- state->SetString("work_queue_name", selected_work_queue->name());
+ state.SetStringKey("selected_queue",
+ selected_work_queue->task_queue()->GetName());
+ state.SetStringKey("work_queue_name", selected_work_queue->name());
}
- state->SetString("native_work_priority",
- TaskQueue::PriorityToString(
- *main_thread_only().pending_native_work.begin()));
-
- state->BeginArray("time_domains");
+ state.SetStringKey("native_work_priority",
+ TaskQueue::PriorityToString(
+ *main_thread_only().pending_native_work.begin()));
+ Value time_domains(Value::Type::LIST);
for (auto* time_domain : main_thread_only().time_domains)
- time_domain->AsValueInto(state);
- state->EndArray();
+ time_domains.Append(time_domain->AsValue());
+ state.SetKey("time_domains", std::move(time_domains));
+ return state;
}
void SequenceManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
@@ -1092,9 +1119,10 @@ scoped_refptr<TaskQueue> SequenceManagerImpl::CreateTaskQueue(
}
std::string SequenceManagerImpl::DescribeAllPendingTasks() const {
- trace_event::TracedValueJSON value;
- AsValueWithSelectorResultInto(&value, nullptr, /* force_verbose */ true);
- return value.ToJSON();
+ Value value = AsValueWithSelectorResult(nullptr, /* force_verbose */ true);
+ std::string result;
+ JSONWriter::Write(value, &result);
+ return result;
}
std::unique_ptr<NativeWorkHandle> SequenceManagerImpl::OnNativeWorkPending(
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl.h b/chromium/base/task/sequence_manager/sequence_manager_impl.h
index cf22672ecca..10fd729b6db 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl.h
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl.h
@@ -38,6 +38,7 @@
#include "base/task/sequence_manager/thread_controller.h"
#include "base/threading/thread_checker.h"
#include "base/time/default_tick_clock.h"
+#include "base/values.h"
#include "build/build_config.h"
namespace base {
@@ -125,9 +126,12 @@ class BASE_EXPORT SequenceManagerImpl
void RemoveTaskObserver(TaskObserver* task_observer) override;
// SequencedTaskSource implementation:
- Task* SelectNextTask() override;
+ Task* SelectNextTask(
+ SelectTaskOption option = SelectTaskOption::kDefault) override;
void DidRunTask() override;
- TimeDelta DelayTillNextTask(LazyNow* lazy_now) const override;
+ TimeDelta DelayTillNextTask(
+ LazyNow* lazy_now,
+ SelectTaskOption option = SelectTaskOption::kDefault) const override;
bool HasPendingHighResolutionTasks() override;
bool OnSystemIdle() override;
@@ -342,11 +346,10 @@ class BASE_EXPORT SequenceManagerImpl
bool GetAddQueueTimeToTasks();
std::unique_ptr<trace_event::ConvertableToTraceFormat>
- AsValueWithSelectorResult(internal::WorkQueue* selected_work_queue,
- bool force_verbose) const;
- void AsValueWithSelectorResultInto(trace_event::TracedValue*,
- internal::WorkQueue* selected_work_queue,
- bool force_verbose) const;
+ AsValueWithSelectorResultForTracing(internal::WorkQueue* selected_work_queue,
+ bool force_verbose) const;
+ Value AsValueWithSelectorResult(internal::WorkQueue* selected_work_queue,
+ bool force_verbose) const;
// Used in construction of TaskQueueImpl to obtain an AtomicFlag which it can
// use to request reload by ReloadEmptyWorkQueues. The lifetime of
@@ -379,14 +382,15 @@ class BASE_EXPORT SequenceManagerImpl
// Helper to terminate all scoped trace events to allow starting new ones
// in SelectNextTask().
- Task* SelectNextTaskImpl();
+ Task* SelectNextTaskImpl(SelectTaskOption option);
// Check if a task of priority |priority| should run given the pending set of
// native work.
bool ShouldRunTaskOfPriority(TaskQueue::QueuePriority priority) const;
// Ignores any immediate work.
- TimeDelta GetDelayTillNextDelayedTask(LazyNow* lazy_now) const;
+ TimeDelta GetDelayTillNextDelayedTask(LazyNow* lazy_now,
+ SelectTaskOption option) const;
#if DCHECK_IS_ON()
void LogTaskDebugInfo(const internal::WorkQueue* work_queue) const;
diff --git a/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
index 584691fe3b0..baf3d6fbdb9 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_impl_unittest.cc
@@ -48,14 +48,18 @@
#include "base/test/task_environment.h"
#include "base/test/test_mock_time_task_runner.h"
#include "base/test/test_simple_task_runner.h"
-#include "base/test/trace_event_analyzer.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "base/trace_event/blame_context.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/test/trace_event_analyzer.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
using base::sequence_manager::EnqueueOrder;
using testing::_;
using testing::AnyNumber;
@@ -2719,6 +2723,7 @@ TEST_P(SequenceManagerTest, CurrentlyExecutingTaskQueue_NestedLoop) {
EXPECT_EQ(nullptr, sequence_manager()->currently_executing_task_queue());
}
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST_P(SequenceManagerTest, BlameContextAttribution) {
if (GetUnderlyingRunnerType() == TestType::kMessagePump)
return;
@@ -2744,6 +2749,7 @@ TEST_P(SequenceManagerTest, BlameContextAttribution) {
EXPECT_EQ(2u, events.size());
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
TEST_P(SequenceManagerTest, NoWakeUpsForCanceledDelayedTasks) {
auto queue = CreateTaskQueue();
@@ -2993,6 +2999,181 @@ TEST_P(SequenceManagerTest, SweepCanceledDelayedTasks_ManyTasks) {
}
}
+TEST_P(SequenceManagerTest, DelayedTasksNotSelected) {
+ auto queue = CreateTaskQueue();
+ constexpr TimeDelta kDelay(TimeDelta::FromMilliseconds(10));
+ LazyNow lazy_now(mock_tick_clock());
+ EXPECT_EQ(TimeDelta::Max(), sequence_manager()->DelayTillNextTask(&lazy_now));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), kDelay);
+
+ // No task should be ready to execute.
+ EXPECT_FALSE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kDefault));
+ EXPECT_FALSE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ EXPECT_EQ(kDelay, sequence_manager()->DelayTillNextTask(&lazy_now));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ AdvanceMockTickClock(kDelay);
+ LazyNow lazy_now2(mock_tick_clock());
+
+ // Delayed task is ready to be executed. Consider it only if not in power
+ // suspend state.
+ EXPECT_FALSE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Execute the delayed task.
+ EXPECT_TRUE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kDefault));
+ sequence_manager()->DidRunTask();
+ EXPECT_EQ(TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(&lazy_now2));
+
+ // Tidy up.
+ queue->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest, DelayedTasksNotSelectedWithImmediateTask) {
+ auto queue = CreateTaskQueue();
+ constexpr TimeDelta kDelay(TimeDelta::FromMilliseconds(10));
+ LazyNow lazy_now(mock_tick_clock());
+
+ EXPECT_EQ(TimeDelta::Max(), sequence_manager()->DelayTillNextTask(&lazy_now));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Post an immediate task.
+ queue->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
+ queue->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask), kDelay);
+
+ EXPECT_EQ(TimeDelta(), sequence_manager()->DelayTillNextTask(&lazy_now));
+ EXPECT_EQ(
+ TimeDelta(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ AdvanceMockTickClock(kDelay);
+ LazyNow lazy_now2(mock_tick_clock());
+
+ // An immediate task is present, even if we skip the delayed tasks.
+ EXPECT_EQ(
+ TimeDelta(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Immediate task should be ready to execute, execute it.
+ EXPECT_TRUE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ sequence_manager()->DidRunTask();
+
+ // Delayed task is ready to be executed. Consider it only if not in power
+ // suspend state. This test differs from
+ // SequenceManagerTest.DelayedTasksNotSelected as it confirms that delayed
+ // tasks are ignored even if they're already in the ready queue (per having
+ // performed task selection already before running the immediate task above).
+ EXPECT_FALSE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Execute the delayed task.
+ EXPECT_TRUE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kDefault));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ sequence_manager()->DidRunTask();
+
+ // Tidy up.
+ queue->ShutdownTaskQueue();
+}
+
+TEST_P(SequenceManagerTest,
+ DelayedTasksNotSelectedWithImmediateTaskWithPriority) {
+ auto queues = CreateTaskQueues(4u);
+ queues[0]->SetQueuePriority(TaskQueue::QueuePriority::kLowPriority);
+ queues[1]->SetQueuePriority(TaskQueue::QueuePriority::kNormalPriority);
+ queues[2]->SetQueuePriority(TaskQueue::QueuePriority::kHighPriority);
+ queues[3]->SetQueuePriority(TaskQueue::QueuePriority::kVeryHighPriority);
+
+ // Post immediate tasks.
+ queues[0]->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
+ queues[2]->task_runner()->PostTask(FROM_HERE, BindOnce(&NopTask));
+
+ // Post delayed tasks.
+ constexpr TimeDelta kDelay(TimeDelta::FromMilliseconds(10));
+ queues[1]->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ kDelay);
+ queues[3]->task_runner()->PostDelayedTask(FROM_HERE, BindOnce(&NopTask),
+ kDelay);
+
+ LazyNow lazy_now(mock_tick_clock());
+
+ EXPECT_EQ(
+ TimeDelta(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ AdvanceMockTickClock(kDelay);
+ LazyNow lazy_now2(mock_tick_clock());
+
+ EXPECT_EQ(
+ TimeDelta(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Immediate tasks should be ready to execute, execute them.
+ EXPECT_TRUE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ sequence_manager()->DidRunTask();
+ EXPECT_TRUE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ sequence_manager()->DidRunTask();
+
+ // No immediate tasks can be executed anymore.
+ EXPECT_FALSE(sequence_manager()->SelectNextTask(
+ SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(
+ TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(
+ &lazy_now2, SequencedTaskSource::SelectTaskOption::kSkipDelayedTask));
+
+ // Execute delayed tasks.
+ EXPECT_TRUE(sequence_manager()->SelectNextTask());
+ sequence_manager()->DidRunTask();
+ EXPECT_TRUE(sequence_manager()->SelectNextTask());
+ sequence_manager()->DidRunTask();
+
+ // No delayed tasks can be executed anymore.
+ EXPECT_FALSE(sequence_manager()->SelectNextTask());
+ EXPECT_EQ(TimeDelta::Max(),
+ sequence_manager()->DelayTillNextTask(&lazy_now2));
+
+ // Tidy up.
+ queues[0]->ShutdownTaskQueue();
+ queues[1]->ShutdownTaskQueue();
+ queues[2]->ShutdownTaskQueue();
+ queues[3]->ShutdownTaskQueue();
+}
+
TEST_P(SequenceManagerTest, DelayTillNextTask) {
auto queues = CreateTaskQueues(2u);
@@ -4159,8 +4340,6 @@ class MockTimeDomain : public TimeDomain {
MOCK_METHOD1(MaybeFastForwardToNextTask, bool(bool quit_when_idle_requested));
- void AsValueIntoInternal(trace_event::TracedValue* state) const override {}
-
const char* GetName() const override { return "Test"; }
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override {}
diff --git a/chromium/base/task/sequence_manager/sequence_manager_perftest.cc b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
index 463f82bf2f3..5ea530c2cce 100644
--- a/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
+++ b/chromium/base/task/sequence_manager/sequence_manager_perftest.cc
@@ -8,6 +8,7 @@
#include <memory>
#include "base/bind.h"
+#include "base/logging.h"
#include "base/message_loop/message_pump_default.h"
#include "base/message_loop/message_pump_type.h"
#include "base/run_loop.h"
diff --git a/chromium/base/task/sequence_manager/sequenced_task_source.h b/chromium/base/task/sequence_manager/sequenced_task_source.h
index 5ea8874ab5e..7fea4d213b3 100644
--- a/chromium/base/task/sequence_manager/sequenced_task_source.h
+++ b/chromium/base/task/sequence_manager/sequenced_task_source.h
@@ -17,20 +17,27 @@ namespace internal {
// Interface to pass tasks to ThreadController.
class SequencedTaskSource {
public:
+ enum class SelectTaskOption { kDefault, kSkipDelayedTask };
+
virtual ~SequencedTaskSource() = default;
// Returns the next task to run from this source or nullptr if
// there're no more tasks ready to run. If a task is returned,
// DidRunTask() must be invoked before the next call to SelectNextTask().
- virtual Task* SelectNextTask() = 0;
+ // |option| allows control on which kind of tasks can be selected.
+ virtual Task* SelectNextTask(
+ SelectTaskOption option = SelectTaskOption::kDefault) = 0;
// Notifies this source that the task previously obtained
// from SelectNextTask() has been completed.
virtual void DidRunTask() = 0;
// Returns the delay till the next task or TimeDelta::Max()
- // if there are no tasks left.
- virtual TimeDelta DelayTillNextTask(LazyNow* lazy_now) const = 0;
+ // if there are no tasks left. |option| allows control on which kind of tasks
+ // can be selected.
+ virtual TimeDelta DelayTillNextTask(
+ LazyNow* lazy_now,
+ SelectTaskOption option = SelectTaskOption::kDefault) const = 0;
// Return true if there are any pending tasks in the task source which require
// high resolution timing.
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.cc b/chromium/base/task/sequence_manager/task_queue_impl.cc
index 88305f84345..2a71aabf793 100644
--- a/chromium/base/task/sequence_manager/task_queue_impl.cc
+++ b/chromium/base/task/sequence_manager/task_queue_impl.cc
@@ -4,9 +4,12 @@
#include "base/task/sequence_manager/task_queue_impl.h"
+#include <inttypes.h>
+
#include <memory>
#include <utility>
+#include "base/logging.h"
#include "base/strings/stringprintf.h"
#include "base/task/common/scoped_defer_task_posting.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
@@ -15,8 +18,7 @@
#include "base/task/task_observer.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
-#include "base/trace_event/blame_context.h"
-#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
@@ -650,55 +652,52 @@ TaskQueue::QueuePriority TaskQueueImpl::GetQueuePriority() const {
return static_cast<TaskQueue::QueuePriority>(set_index);
}
-void TaskQueueImpl::AsValueInto(TimeTicks now,
- trace_event::TracedValue* state,
- bool force_verbose) const {
+Value TaskQueueImpl::AsValue(TimeTicks now, bool force_verbose) const {
base::internal::CheckedAutoLock lock(any_thread_lock_);
- state->BeginDictionary();
- state->SetString("name", GetName());
+ Value state(Value::Type::DICTIONARY);
+ state.SetStringKey("name", GetName());
if (any_thread_.unregistered) {
- state->SetBoolean("unregistered", true);
- state->EndDictionary();
- return;
+ state.SetBoolKey("unregistered", true);
+ return state;
}
DCHECK(main_thread_only().time_domain);
DCHECK(main_thread_only().delayed_work_queue);
DCHECK(main_thread_only().immediate_work_queue);
- state->SetString(
+ state.SetStringKey(
"task_queue_id",
StringPrintf("0x%" PRIx64,
static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this))));
- state->SetBoolean("enabled", IsQueueEnabled());
- state->SetString("time_domain_name",
- main_thread_only().time_domain->GetName());
- state->SetInteger("any_thread_.immediate_incoming_queuesize",
- any_thread_.immediate_incoming_queue.size());
- state->SetInteger("delayed_incoming_queue_size",
- main_thread_only().delayed_incoming_queue.size());
- state->SetInteger("immediate_work_queue_size",
- main_thread_only().immediate_work_queue->Size());
- state->SetInteger("delayed_work_queue_size",
- main_thread_only().delayed_work_queue->Size());
-
- state->SetInteger("any_thread_.immediate_incoming_queuecapacity",
- any_thread_.immediate_incoming_queue.capacity());
- state->SetInteger("immediate_work_queue_capacity",
- immediate_work_queue()->Capacity());
- state->SetInteger("delayed_work_queue_capacity",
- delayed_work_queue()->Capacity());
+ state.SetBoolKey("enabled", IsQueueEnabled());
+ state.SetStringKey("time_domain_name",
+ main_thread_only().time_domain->GetName());
+ state.SetIntKey("any_thread_.immediate_incoming_queuesize",
+ any_thread_.immediate_incoming_queue.size());
+ state.SetIntKey("delayed_incoming_queue_size",
+ main_thread_only().delayed_incoming_queue.size());
+ state.SetIntKey("immediate_work_queue_size",
+ main_thread_only().immediate_work_queue->Size());
+ state.SetIntKey("delayed_work_queue_size",
+ main_thread_only().delayed_work_queue->Size());
+
+ state.SetIntKey("any_thread_.immediate_incoming_queuecapacity",
+ any_thread_.immediate_incoming_queue.capacity());
+ state.SetIntKey("immediate_work_queue_capacity",
+ immediate_work_queue()->Capacity());
+ state.SetIntKey("delayed_work_queue_capacity",
+ delayed_work_queue()->Capacity());
if (!main_thread_only().delayed_incoming_queue.empty()) {
TimeDelta delay_to_next_task =
(main_thread_only().delayed_incoming_queue.top().delayed_run_time -
main_thread_only().time_domain->CreateLazyNow().Now());
- state->SetDouble("delay_to_next_task_ms",
- delay_to_next_task.InMillisecondsF());
+ state.SetDoubleKey("delay_to_next_task_ms",
+ delay_to_next_task.InMillisecondsF());
}
if (main_thread_only().current_fence)
- state->SetInteger("current_fence", main_thread_only().current_fence);
+ state.SetIntKey("current_fence", main_thread_only().current_fence);
if (main_thread_only().delayed_fence) {
- state->SetDouble(
+ state.SetDoubleKey(
"delayed_fence_seconds_from_now",
(main_thread_only().delayed_fence.value() - now).InSecondsF());
}
@@ -709,21 +708,18 @@ void TaskQueueImpl::AsValueInto(TimeTicks now,
&verbose);
if (verbose || force_verbose) {
- state->BeginArray("immediate_incoming_queue");
- QueueAsValueInto(any_thread_.immediate_incoming_queue, now, state);
- state->EndArray();
- state->BeginArray("delayed_work_queue");
- main_thread_only().delayed_work_queue->AsValueInto(now, state);
- state->EndArray();
- state->BeginArray("immediate_work_queue");
- main_thread_only().immediate_work_queue->AsValueInto(now, state);
- state->EndArray();
- state->BeginArray("delayed_incoming_queue");
- main_thread_only().delayed_incoming_queue.AsValueInto(now, state);
- state->EndArray();
+ state.SetKey("immediate_incoming_queue",
+ QueueAsValue(any_thread_.immediate_incoming_queue, now));
+ state.SetKey("delayed_work_queue",
+ main_thread_only().delayed_work_queue->AsValue(now));
+ state.SetKey("immediate_work_queue",
+ main_thread_only().immediate_work_queue->AsValue(now));
+ state.SetKey("delayed_incoming_queue",
+ main_thread_only().delayed_incoming_queue.AsValue(now));
}
- state->SetString("priority", TaskQueue::PriorityToString(GetQueuePriority()));
- state->EndDictionary();
+ state.SetStringKey("priority",
+ TaskQueue::PriorityToString(GetQueuePriority()));
+ return state;
}
void TaskQueueImpl::AddTaskObserver(TaskObserver* task_observer) {
@@ -913,34 +909,31 @@ bool TaskQueueImpl::WasBlockedOrLowPriority(EnqueueOrder enqueue_order) const {
}
// static
-void TaskQueueImpl::QueueAsValueInto(const TaskDeque& queue,
- TimeTicks now,
- trace_event::TracedValue* state) {
- for (const Task& task : queue) {
- TaskAsValueInto(task, now, state);
- }
+Value TaskQueueImpl::QueueAsValue(const TaskDeque& queue, TimeTicks now) {
+ Value state(Value::Type::LIST);
+ for (const Task& task : queue)
+ state.Append(TaskAsValue(task, now));
+ return state;
}
// static
-void TaskQueueImpl::TaskAsValueInto(const Task& task,
- TimeTicks now,
- trace_event::TracedValue* state) {
- state->BeginDictionary();
- state->SetString("posted_from", task.posted_from.ToString());
+Value TaskQueueImpl::TaskAsValue(const Task& task, TimeTicks now) {
+ Value state(Value::Type::DICTIONARY);
+ state.SetStringKey("posted_from", task.posted_from.ToString());
if (task.enqueue_order_set())
- state->SetInteger("enqueue_order", task.enqueue_order());
- state->SetInteger("sequence_num", task.sequence_num);
- state->SetBoolean("nestable", task.nestable == Nestable::kNestable);
- state->SetBoolean("is_high_res", task.is_high_res);
- state->SetBoolean("is_cancelled", task.task.IsCancelled());
- state->SetDouble("delayed_run_time",
- (task.delayed_run_time - TimeTicks()).InMillisecondsF());
+ state.SetIntKey("enqueue_order", task.enqueue_order());
+ state.SetIntKey("sequence_num", task.sequence_num);
+ state.SetBoolKey("nestable", task.nestable == Nestable::kNestable);
+ state.SetBoolKey("is_high_res", task.is_high_res);
+ state.SetBoolKey("is_cancelled", task.task.IsCancelled());
+ state.SetDoubleKey("delayed_run_time",
+ (task.delayed_run_time - TimeTicks()).InMillisecondsF());
const TimeDelta delayed_run_time_milliseconds_from_now =
task.delayed_run_time.is_null() ? TimeDelta()
: (task.delayed_run_time - now);
- state->SetDouble("delayed_run_time_milliseconds_from_now",
- delayed_run_time_milliseconds_from_now.InMillisecondsF());
- state->EndDictionary();
+ state.SetDoubleKey("delayed_run_time_milliseconds_from_now",
+ delayed_run_time_milliseconds_from_now.InMillisecondsF());
+ return state;
}
bool TaskQueueImpl::IsQueueEnabled() const {
@@ -1426,12 +1419,11 @@ void TaskQueueImpl::DelayedIncomingQueue::SweepCancelledTasks() {
std::make_heap(queue_.c.begin(), queue_.c.end(), queue_.comp);
}
-void TaskQueueImpl::DelayedIncomingQueue::AsValueInto(
- TimeTicks now,
- trace_event::TracedValue* state) const {
- for (const Task& task : queue_.c) {
- TaskAsValueInto(task, now, state);
- }
+Value TaskQueueImpl::DelayedIncomingQueue::AsValue(TimeTicks now) const {
+ Value state(Value::Type::LIST);
+ for (const Task& task : queue_.c)
+ state.Append(TaskAsValue(task, now));
+ return state;
}
} // namespace internal
diff --git a/chromium/base/task/sequence_manager/task_queue_impl.h b/chromium/base/task/sequence_manager/task_queue_impl.h
index aa382fb1490..b781bdb2b33 100644
--- a/chromium/base/task/sequence_manager/task_queue_impl.h
+++ b/chromium/base/task/sequence_manager/task_queue_impl.h
@@ -14,6 +14,7 @@
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
+#include "base/observer_list.h"
#include "base/pending_task.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
@@ -25,8 +26,9 @@
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue.h"
#include "base/threading/thread_checker.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/traced_value.h"
+#include "base/time/time_override.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/values.h"
namespace base {
namespace sequence_manager {
@@ -141,9 +143,7 @@ class BASE_EXPORT TaskQueueImpl {
// Must only be called from the thread this task queue was created on.
void ReloadEmptyImmediateWorkQueue();
- void AsValueInto(TimeTicks now,
- trace_event::TracedValue* state,
- bool force_verbose) const;
+ Value AsValue(TimeTicks now, bool force_verbose) const;
bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
bool GetShouldNotifyObservers() const { return should_notify_observers_; }
@@ -322,7 +322,7 @@ class BASE_EXPORT TaskQueueImpl {
void SweepCancelledTasks();
std::priority_queue<Task> TakeTasks() { return std::move(queue_); }
- void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+ Value AsValue(TimeTicks now) const;
private:
struct PQueue : public std::priority_queue<Task> {
@@ -428,15 +428,8 @@ class BASE_EXPORT TaskQueueImpl {
void TakeImmediateIncomingQueueTasks(TaskDeque* queue);
void TraceQueueSize() const;
- static void QueueAsValueInto(const TaskDeque& queue,
- TimeTicks now,
- trace_event::TracedValue* state);
- static void QueueAsValueInto(const std::priority_queue<Task>& queue,
- TimeTicks now,
- trace_event::TracedValue* state);
- static void TaskAsValueInto(const Task& task,
- TimeTicks now,
- trace_event::TracedValue* state);
+ static Value QueueAsValue(const TaskDeque& queue, TimeTicks now);
+ static Value TaskAsValue(const Task& task, TimeTicks now);
// Schedules delayed work on time domain and calls the observer.
void UpdateDelayedWakeUp(LazyNow* lazy_now);
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.cc b/chromium/base/task/sequence_manager/task_queue_selector.cc
index 3b4f59d1efc..3bacdc49e08 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector.cc
+++ b/chromium/base/task/sequence_manager/task_queue_selector.cc
@@ -12,7 +12,7 @@
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/threading/thread_checker.h"
-#include "base/trace_event/traced_value.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace sequence_manager {
@@ -167,17 +167,34 @@ bool TaskQueueSelector::CheckContainsQueueForTest(
}
#endif
-WorkQueue* TaskQueueSelector::SelectWorkQueueToService() {
+WorkQueue* TaskQueueSelector::SelectWorkQueueToService(
+ SelectTaskOption option) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- if (!active_priority_tracker_.HasActivePriority())
+ auto highest_priority = GetHighestPendingPriority(option);
+ if (!highest_priority.has_value())
return nullptr;
// Select the priority from which we will select a task. Usually this is
// the highest priority for which we have work, unless we are starving a lower
// priority.
- TaskQueue::QueuePriority priority =
- active_priority_tracker_.HighestActivePriority();
+ TaskQueue::QueuePriority priority = highest_priority.value();
+
+ // For selecting an immediate queue only, the highest priority can be used as
+ // a starting priority, but it is required to check work at other priorities.
+ // For the case where a delayed task is at a higher priority than an immediate
+ // task, HighestActivePriority(...) returns the priority of the delayed task
+ // but the resulting queue must be the lower one.
+ if (option == SelectTaskOption::kSkipDelayedTask) {
+ WorkQueue* queue =
+#if DCHECK_IS_ON()
+ random_task_selection_
+ ? ChooseImmediateOnlyWithPriority<SetOperationRandom>(priority)
+ :
+#endif
+ ChooseImmediateOnlyWithPriority<SetOperationOldest>(priority);
+ return queue;
+ }
WorkQueue* queue =
#if DCHECK_IS_ON()
@@ -197,21 +214,37 @@ WorkQueue* TaskQueueSelector::SelectWorkQueueToService() {
return queue;
}
-void TaskQueueSelector::AsValueInto(trace_event::TracedValue* state) const {
+Value TaskQueueSelector::AsValue() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
- state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
+ Value state(Value::Type::DICTIONARY);
+ state.SetIntKey("immediate_starvation_count", immediate_starvation_count_);
+ return state;
}
void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
task_queue_selector_observer_ = observer;
}
-Optional<TaskQueue::QueuePriority>
-TaskQueueSelector::GetHighestPendingPriority() const {
+Optional<TaskQueue::QueuePriority> TaskQueueSelector::GetHighestPendingPriority(
+ SelectTaskOption option) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!active_priority_tracker_.HasActivePriority())
return nullopt;
- return active_priority_tracker_.HighestActivePriority();
+
+ TaskQueue::QueuePriority highest_priority =
+ active_priority_tracker_.HighestActivePriority();
+ if (option != SelectTaskOption::kSkipDelayedTask)
+ return highest_priority;
+
+ for (; highest_priority != TaskQueue::kQueuePriorityCount;
+ highest_priority = NextPriority(highest_priority)) {
+ if (active_priority_tracker_.IsActive(highest_priority) &&
+ !immediate_work_queue_sets_.IsSetEmpty(highest_priority)) {
+ return highest_priority;
+ }
+ }
+
+ return nullopt;
}
void TaskQueueSelector::SetImmediateStarvationCountForTest(
@@ -220,7 +253,7 @@ void TaskQueueSelector::SetImmediateStarvationCountForTest(
}
bool TaskQueueSelector::HasTasksWithPriority(
- TaskQueue::QueuePriority priority) {
+ TaskQueue::QueuePriority priority) const {
return !delayed_work_queue_sets_.IsSetEmpty(priority) ||
!immediate_work_queue_sets_.IsSetEmpty(priority);
}
diff --git a/chromium/base/task/sequence_manager/task_queue_selector.h b/chromium/base/task/sequence_manager/task_queue_selector.h
index 9df9ac8a32a..5ad4d8f462e 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector.h
+++ b/chromium/base/task/sequence_manager/task_queue_selector.h
@@ -11,8 +11,10 @@
#include "base/macros.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/sequence_manager.h"
+#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue_selector_logic.h"
#include "base/task/sequence_manager/work_queue_sets.h"
+#include "base/values.h"
namespace base {
namespace sequence_manager {
@@ -24,6 +26,8 @@ class AssociatedThreadId;
// of particular task queues.
class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
public:
+ using SelectTaskOption = SequencedTaskSource::SelectTaskOption;
+
TaskQueueSelector(scoped_refptr<AssociatedThreadId> associated_thread,
const SequenceManager::Settings& settings);
@@ -51,10 +55,11 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
// Called to choose the work queue from which the next task should be taken
// and run. Return the queue to service if there is one or null otherwise.
// This function is called on the main thread.
- WorkQueue* SelectWorkQueueToService();
+ WorkQueue* SelectWorkQueueToService(
+ SelectTaskOption option = SelectTaskOption::kDefault);
- // Serialize the selector state for tracing.
- void AsValueInto(trace_event::TracedValue* state) const;
+ // Serialize the selector state for tracing/debugging.
+ Value AsValue() const;
class BASE_EXPORT Observer {
public:
@@ -70,7 +75,8 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
// Returns the priority of the most important pending task if one exists.
// O(1).
- Optional<TaskQueue::QueuePriority> GetHighestPendingPriority() const;
+ Optional<TaskQueue::QueuePriority> GetHighestPendingPriority(
+ SelectTaskOption option = SelectTaskOption::kDefault) const;
// WorkQueueSets::Observer implementation:
void WorkQueueSetBecameEmpty(size_t set_index) override;
@@ -172,7 +178,7 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
// Select an immediate work queue if we are starving immediate tasks.
if (immediate_starvation_count_ >= kMaxDelayedStarvationTasks) {
WorkQueue* queue =
- SetOperation::GetWithPriority(immediate_work_queue_sets_, priority);
+ ChooseImmediateOnlyWithPriority<SetOperation>(priority);
if (queue)
return queue;
return SetOperation::GetWithPriority(delayed_work_queue_sets_, priority);
@@ -180,6 +186,12 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
return ChooseImmediateOrDelayedTaskWithPriority<SetOperation>(priority);
}
+ template <typename SetOperation>
+ WorkQueue* ChooseImmediateOnlyWithPriority(
+ TaskQueue::QueuePriority priority) const {
+ return SetOperation::GetWithPriority(immediate_work_queue_sets_, priority);
+ }
+
private:
void ChangeSetIndex(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
@@ -218,7 +230,7 @@ class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
TaskQueue::QueuePriority priority);
// Returns true if there are pending tasks with priority |priority|.
- bool HasTasksWithPriority(TaskQueue::QueuePriority priority);
+ bool HasTasksWithPriority(TaskQueue::QueuePriority priority) const;
scoped_refptr<AssociatedThreadId> associated_thread_;
diff --git a/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
index 1ec6bdc3795..90413d29080 100644
--- a/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
+++ b/chromium/base/task/sequence_manager/task_queue_selector_unittest.cc
@@ -396,6 +396,75 @@ TEST_F(TaskQueueSelectorTest, ChooseWithPriority_OnlyImmediate) {
TaskQueue::kNormalPriority));
}
+TEST_F(TaskQueueSelectorTest,
+ SelectWorkQueueToServiceImmediateOnlyWithoutImmediateTask) {
+ task_queues_[0]->delayed_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ EXPECT_EQ(nullptr,
+ selector_.SelectWorkQueueToService(
+ TaskQueueSelector::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(task_queues_[0]->delayed_work_queue(),
+ selector_.SelectWorkQueueToService());
+}
+
+TEST_F(TaskQueueSelectorTest,
+ SelectWorkQueueToServiceImmediateOnlyWithDelayedTasks) {
+ task_queues_[0]->delayed_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(1)));
+ task_queues_[0]->immediate_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+
+ EXPECT_EQ(task_queues_[0]->immediate_work_queue(),
+ selector_.SelectWorkQueueToService(
+ TaskQueueSelector::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(task_queues_[0]->delayed_work_queue(),
+ selector_.SelectWorkQueueToService());
+}
+
+TEST_F(TaskQueueSelectorTest,
+ SelectWorkQueueToServiceImmediateOnlyWithDisabledQueues) {
+ task_queues_[0]->delayed_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(1)));
+ task_queues_[0]->immediate_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(2)));
+ task_queues_[1]->delayed_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(3)));
+ task_queues_[2]->immediate_work_queue()->Push(
+ Task(PostedTask(nullptr, test_closure_, FROM_HERE), TimeTicks(),
+ EnqueueOrder(), EnqueueOrder::FromIntForTesting(4)));
+
+ EXPECT_EQ(task_queues_[0]->delayed_work_queue(),
+ selector_.SelectWorkQueueToService());
+ EXPECT_EQ(task_queues_[0]->immediate_work_queue(),
+ selector_.SelectWorkQueueToService(
+ TaskQueueSelector::SelectTaskOption::kSkipDelayedTask));
+
+ task_queues_[0]->SetQueueEnabled(false);
+ selector_.DisableQueue(task_queues_[0].get());
+
+ EXPECT_EQ(task_queues_[1]->delayed_work_queue(),
+ selector_.SelectWorkQueueToService());
+ EXPECT_EQ(task_queues_[2]->immediate_work_queue(),
+ selector_.SelectWorkQueueToService(
+ TaskQueueSelector::SelectTaskOption::kSkipDelayedTask));
+
+ task_queues_[1]->SetQueueEnabled(false);
+ selector_.DisableQueue(task_queues_[1].get());
+
+ EXPECT_EQ(task_queues_[2]->immediate_work_queue(),
+ selector_.SelectWorkQueueToService(
+ TaskQueueSelector::SelectTaskOption::kSkipDelayedTask));
+ EXPECT_EQ(task_queues_[2]->immediate_work_queue(),
+ selector_.SelectWorkQueueToService());
+}
+
TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
TaskQueueSelectorForTest selector(associated_thread_);
MockObserver mock_observer;
diff --git a/chromium/base/task/sequence_manager/thread_controller_impl.cc b/chromium/base/task/sequence_manager/thread_controller_impl.cc
index ab55a0cc91e..15b9ae60306 100644
--- a/chromium/base/task/sequence_manager/thread_controller_impl.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_impl.cc
@@ -13,7 +13,7 @@
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace sequence_manager {
diff --git a/chromium/base/task/sequence_manager/thread_controller_power_monitor.cc b/chromium/base/task/sequence_manager/thread_controller_power_monitor.cc
new file mode 100644
index 00000000000..12dcb126110
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_power_monitor.cc
@@ -0,0 +1,91 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_power_monitor.h"
+
+#include "base/feature_list.h"
+#include "base/power_monitor/power_monitor.h"
+#include "base/trace_event/base_tracing.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+namespace {
+
+// Activate the power management events that affect task scheduling.
+const Feature kUsePowerMonitorWithThreadController{
+ "UsePowerMonitorWithThreadController", FEATURE_DISABLED_BY_DEFAULT};
+
+// TODO(1074332): Remove this when the experiment becomes the default.
+bool g_use_thread_controller_power_monitor_ = false;
+
+} // namespace
+
+ThreadControllerPowerMonitor::ThreadControllerPowerMonitor() = default;
+
+ThreadControllerPowerMonitor::~ThreadControllerPowerMonitor() {
+ PowerMonitor::RemoveObserver(this);
+}
+
+void ThreadControllerPowerMonitor::BindToCurrentThread() {
+ // Occasionally registration happens twice (i.e. when the deprecated
+ // ThreadController::SetDefaultTaskRunner() re-initializes the
+ // ThreadController).
+ if (is_observer_registered_)
+ PowerMonitor::RemoveObserver(this);
+
+ // Register the observer to deliver notifications on the current thread.
+ PowerMonitor::AddObserver(this);
+ is_observer_registered_ = true;
+}
+
+bool ThreadControllerPowerMonitor::IsProcessInPowerSuspendState() {
+ return is_power_suspended_;
+}
+
+// static
+void ThreadControllerPowerMonitor::InitializeOnMainThread() {
+ DCHECK(!g_use_thread_controller_power_monitor_);
+ g_use_thread_controller_power_monitor_ =
+ FeatureList::IsEnabled(kUsePowerMonitorWithThreadController);
+}
+
+// static
+void ThreadControllerPowerMonitor::OverrideUsePowerMonitorForTesting(
+ bool use_power_monitor) {
+ g_use_thread_controller_power_monitor_ = use_power_monitor;
+}
+
+// static
+void ThreadControllerPowerMonitor::ResetForTesting() {
+ g_use_thread_controller_power_monitor_ = false;
+}
+
+void ThreadControllerPowerMonitor::OnSuspend() {
+ if (!g_use_thread_controller_power_monitor_)
+ return;
+ DCHECK(!is_power_suspended_);
+
+ TRACE_EVENT_NESTABLE_ASYNC_BEGIN0("base", "ThreadController::Suspended",
+ this);
+ is_power_suspended_ = true;
+}
+
+void ThreadControllerPowerMonitor::OnResume() {
+ if (!g_use_thread_controller_power_monitor_)
+ return;
+
+ // It is possible a suspend was already happening before the observer was
+ // added to the power monitor. Ignoring the resume notification in that case.
+ if (is_power_suspended_) {
+ TRACE_EVENT_NESTABLE_ASYNC_END0("base", "ThreadController::Suspended",
+ this);
+ is_power_suspended_ = false;
+ }
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_power_monitor.h b/chromium/base/task/sequence_manager/thread_controller_power_monitor.h
new file mode 100644
index 00000000000..46b44c8d85f
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_power_monitor.h
@@ -0,0 +1,56 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_POWER_MONITOR_H_
+#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_POWER_MONITOR_H_
+
+#include "base/power_monitor/power_observer.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+// A helper class that keeps track of the power state and handles power
+// notifications. The class register itself to the PowerMonitor and receives
+// notifications on the bound thread (see BindToCurrentThread(...)).
+class BASE_EXPORT ThreadControllerPowerMonitor : public PowerObserver {
+ public:
+ ThreadControllerPowerMonitor();
+ ~ThreadControllerPowerMonitor() override;
+ ThreadControllerPowerMonitor(const ThreadControllerPowerMonitor&) = delete;
+ ThreadControllerPowerMonitor& operator=(const ThreadControllerPowerMonitor&) =
+ delete;
+
+ // Register this class to the power monitor to receive notifications on this
+ // thread. It is safe to call this before PowerMonitor is initialized.
+ void BindToCurrentThread();
+
+ // Returns whether the process is between power suspend and resume
+ // notifications.
+ bool IsProcessInPowerSuspendState();
+
+ // Initialize the ThreadControllerPowerMonitor. Must be called once on the
+ // main thread during startup while single-threaded.
+ static void InitializeOnMainThread();
+
+ static void OverrideUsePowerMonitorForTesting(bool use_power_monitor);
+ static void ResetForTesting();
+
+ // base::PowerObserver:
+ void OnSuspend() override;
+ void OnResume() override;
+
+ private:
+ // Power state based on notifications delivered to this observer.
+ bool is_power_suspended_ = false;
+
+ // Whether PowerMonitor observer is registered.
+ bool is_observer_registered_ = false;
+};
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
+
+#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_POWER_MONITOR_H_
diff --git a/chromium/base/task/sequence_manager/thread_controller_power_monitor_unittest.cc b/chromium/base/task/sequence_manager/thread_controller_power_monitor_unittest.cc
new file mode 100644
index 00000000000..72f91ad39d5
--- /dev/null
+++ b/chromium/base/task/sequence_manager/thread_controller_power_monitor_unittest.cc
@@ -0,0 +1,69 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task/sequence_manager/thread_controller_power_monitor.h"
+
+#include "base/power_monitor/power_monitor.h"
+#include "base/power_monitor/power_monitor_source.h"
+#include "base/test/power_monitor_test_base.h"
+#include "base/test/task_environment.h"
+
+#include "base/test/mock_callback.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace sequence_manager {
+namespace internal {
+
+class ThreadControllerPowerMonitorTest : public testing::Test {
+ public:
+ void SetUp() override {
+ power_monitor_source_ = new PowerMonitorTestSource();
+ PowerMonitor::Initialize(
+ std::unique_ptr<PowerMonitorSource>(power_monitor_source_));
+ thread_controller_power_monitor_ =
+ std::make_unique<ThreadControllerPowerMonitor>();
+ internal::ThreadControllerPowerMonitor::OverrideUsePowerMonitorForTesting(
+ true);
+ }
+
+ void TearDown() override {
+ thread_controller_power_monitor_.reset();
+ internal::ThreadControllerPowerMonitor::ResetForTesting();
+ PowerMonitor::ShutdownForTesting();
+ }
+
+ protected:
+ base::test::SingleThreadTaskEnvironment task_environment_;
+ PowerMonitorTestSource* power_monitor_source_ = nullptr;
+ std::unique_ptr<ThreadControllerPowerMonitor>
+ thread_controller_power_monitor_;
+};
+
+TEST_F(ThreadControllerPowerMonitorTest, IsProcessInPowerSuspendState) {
+ EXPECT_FALSE(
+ thread_controller_power_monitor_->IsProcessInPowerSuspendState());
+
+ // Before the monitor is bound to the thread, the notifications are not
+ // received.
+ power_monitor_source_->GenerateSuspendEvent();
+ EXPECT_FALSE(
+ thread_controller_power_monitor_->IsProcessInPowerSuspendState());
+ power_monitor_source_->GenerateResumeEvent();
+ EXPECT_FALSE(
+ thread_controller_power_monitor_->IsProcessInPowerSuspendState());
+
+ thread_controller_power_monitor_->BindToCurrentThread();
+
+ // Ensures notifications are processed.
+ power_monitor_source_->GenerateSuspendEvent();
+ EXPECT_TRUE(thread_controller_power_monitor_->IsProcessInPowerSuspendState());
+ power_monitor_source_->GenerateResumeEvent();
+ EXPECT_FALSE(
+ thread_controller_power_monitor_->IsProcessInPowerSuspendState());
+}
+
+} // namespace internal
+} // namespace sequence_manager
+} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
index f225da8b584..590e8297807 100644
--- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.cc
@@ -4,14 +4,16 @@
#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+#include <algorithm>
+#include <utility>
+
#include "base/auto_reset.h"
-#include "base/feature_list.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
-#include "base/power_monitor/power_monitor.h"
#include "base/threading/hang_watcher.h"
#include "base/time/tick_clock.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
#if defined(OS_IOS)
@@ -25,12 +27,6 @@ namespace sequence_manager {
namespace internal {
namespace {
-// Activate the power management events that affect the tasks scheduling.
-const Feature kUsePowerMonitorWithThreadController{
- "UsePowerMonitorWithThreadController", FEATURE_DISABLED_BY_DEFAULT};
-
-bool g_use_power_monitor_with_thread_controller = false;
-
// Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to
// mitigate https://crbug.com/850450 where some platforms are unhappy with
// delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no
@@ -183,6 +179,9 @@ void ThreadControllerWithMessagePumpImpl::InitializeThreadTaskRunnerHandle() {
main_thread_only().thread_task_runner_handle.reset();
main_thread_only().thread_task_runner_handle =
std::make_unique<ThreadTaskRunnerHandle>(task_runner_);
+ // When the task runner is known, bind the power manager. Power notifications
+ // are received through that sequence.
+ power_monitor_.BindToCurrentThread();
}
scoped_refptr<SingleThreadTaskRunner>
@@ -306,7 +305,12 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
DCHECK(main_thread_only().task_source);
for (int i = 0; i < main_thread_only().work_batch_size; i++) {
- Task* task = main_thread_only().task_source->SelectNextTask();
+ const SequencedTaskSource::SelectTaskOption select_task_option =
+ power_monitor_.IsProcessInPowerSuspendState()
+ ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
+ : SequencedTaskSource::SelectTaskOption::kDefault;
+ Task* task =
+ main_thread_only().task_source->SelectNextTask(select_task_option);
if (!task)
break;
@@ -351,8 +355,14 @@ TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
work_deduplicator_.WillCheckForMoreWork();
- TimeDelta do_work_delay =
- main_thread_only().task_source->DelayTillNextTask(continuation_lazy_now);
+ // Re-check the state of the power after running tasks. An executed task may
+ // have been a power change notification.
+ const SequencedTaskSource::SelectTaskOption select_task_option =
+ power_monitor_.IsProcessInPowerSuspendState()
+ ? SequencedTaskSource::SelectTaskOption::kSkipDelayedTask
+ : SequencedTaskSource::SelectTaskOption::kDefault;
+ TimeDelta do_work_delay = main_thread_only().task_source->DelayTillNextTask(
+ continuation_lazy_now, select_task_option);
DCHECK_GE(do_work_delay, TimeDelta());
return do_work_delay;
}
@@ -368,8 +378,7 @@ bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
work_id_provider_->IncrementWorkId();
#if defined(OS_WIN)
- if (!g_use_power_monitor_with_thread_controller ||
- !base::PowerMonitor::IsProcessSuspended()) {
+ if (!power_monitor_.IsProcessInPowerSuspendState()) {
// Avoid calling Time::ActivateHighResolutionTimer() between
// suspend/resume as the system hangs if we do (crbug.com/1074028).
// OnResume() will generate a task on this thread per the
@@ -532,11 +541,5 @@ bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() {
}
} // namespace internal
-
-void PostFieldTrialInitialization() {
- internal::g_use_power_monitor_with_thread_controller =
- FeatureList::IsEnabled(internal::kUsePowerMonitorWithThreadController);
-}
-
} // namespace sequence_manager
} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
index 0dbf946f9ea..7a153d44485 100644
--- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl.h
@@ -17,6 +17,7 @@
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/thread_controller.h"
+#include "base/task/sequence_manager/thread_controller_power_monitor.h"
#include "base/task/sequence_manager/work_deduplicator.h"
#include "base/thread_annotations.h"
#include "base/threading/hang_watcher.h"
@@ -94,17 +95,6 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl
void Quit() override;
void EnsureWorkScheduled() override;
- private:
- friend class DoWorkScope;
- friend class RunScope;
-
- // Returns the delay till the next task. If there's no delay TimeDelta::Max()
- // will be returned.
- TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now);
-
- void InitializeThreadTaskRunnerHandle()
- EXCLUSIVE_LOCKS_REQUIRED(task_runner_lock_);
-
struct MainThreadOnly {
MainThreadOnly();
~MainThreadOnly();
@@ -134,6 +124,25 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl
bool task_execution_allowed = true;
};
+ const MainThreadOnly& MainThreadOnlyForTesting() const {
+ return main_thread_only_;
+ }
+
+ ThreadControllerPowerMonitor* ThreadControllerPowerMonitorForTesting() {
+ return &power_monitor_;
+ }
+
+ private:
+ friend class DoWorkScope;
+ friend class RunScope;
+
+ // Returns the delay till the next task. If there's no delay TimeDelta::Max()
+ // will be returned.
+ TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now);
+
+ void InitializeThreadTaskRunnerHandle()
+ EXCLUSIVE_LOCKS_REQUIRED(task_runner_lock_);
+
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
@@ -154,6 +163,8 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl
WorkDeduplicator work_deduplicator_;
+ ThreadControllerPowerMonitor power_monitor_;
+
// Can only be set once (just before calling
// work_deduplicator_.BindToCurrentThread()). After that only read access is
// allowed.
@@ -187,11 +198,6 @@ class BASE_EXPORT ThreadControllerWithMessagePumpImpl
};
} // namespace internal
-
-// Initialize ThreadController features. Called after FeatureList is available
-// when the process is still single-threaded.
-BASE_EXPORT void PostFieldTrialInitialization();
-
} // namespace sequence_manager
} // namespace base
diff --git a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
index b5e252fd800..8fcda55a02a 100644
--- a/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
+++ b/chromium/base/task/sequence_manager/thread_controller_with_message_pump_impl_unittest.cc
@@ -4,19 +4,24 @@
#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
+#include <queue>
+#include <string>
+#include <utility>
+#include <vector>
+
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/memory/scoped_refptr.h"
#include "base/single_thread_task_runner.h"
+#include "base/task/sequence_manager/thread_controller_power_monitor.h"
#include "base/test/bind_test_util.h"
#include "base/test/mock_callback.h"
#include "base/test/simple_test_tick_clock.h"
#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
-#include <queue>
-
using testing::_;
using testing::Invoke;
using testing::ElementsAre;
@@ -30,7 +35,7 @@ class ThreadControllerForTest
: public internal::ThreadControllerWithMessagePumpImpl {
public:
ThreadControllerForTest(std::unique_ptr<MessagePump> pump,
- SequenceManager::Settings& settings)
+ const SequenceManager::Settings& settings)
: ThreadControllerWithMessagePumpImpl(std::move(pump), settings) {}
using ThreadControllerWithMessagePumpImpl::DoIdleWork;
@@ -38,6 +43,10 @@ class ThreadControllerForTest
using ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled;
using ThreadControllerWithMessagePumpImpl::Quit;
using ThreadControllerWithMessagePumpImpl::Run;
+
+ using ThreadControllerWithMessagePumpImpl::MainThreadOnlyForTesting;
+ using ThreadControllerWithMessagePumpImpl::
+ ThreadControllerPowerMonitorForTesting;
};
class MockMessagePump : public MessagePump {
@@ -78,11 +87,15 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
explicit FakeSequencedTaskSource(TickClock* clock) : clock_(clock) {}
~FakeSequencedTaskSource() override = default;
- Task* SelectNextTask() override {
+ Task* SelectNextTask(SelectTaskOption option) override {
if (tasks_.empty())
return nullptr;
if (tasks_.front().delayed_run_time > clock_->NowTicks())
return nullptr;
+ if (option == SequencedTaskSource::SelectTaskOption::kSkipDelayedTask &&
+ !tasks_.front().delayed_run_time.is_null()) {
+ return nullptr;
+ }
running_stack_.push_back(std::move(tasks_.front()));
tasks_.pop();
return &running_stack_.back();
@@ -90,9 +103,14 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
void DidRunTask() override { running_stack_.pop_back(); }
- TimeDelta DelayTillNextTask(LazyNow* lazy_now) const override {
+ TimeDelta DelayTillNextTask(LazyNow* lazy_now,
+ SelectTaskOption option) const override {
if (tasks_.empty())
return TimeDelta::Max();
+ if (option == SequencedTaskSource::SelectTaskOption::kSkipDelayedTask &&
+ !tasks_.front().delayed_run_time.is_null()) {
+ return TimeDelta::Max();
+ }
if (tasks_.front().delayed_run_time.is_null())
return TimeDelta();
if (lazy_now->Now() > tasks_.front().delayed_run_time)
@@ -110,7 +128,13 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
delayed_run_time, EnqueueOrder::FromIntForTesting(13)));
}
- bool HasPendingHighResolutionTasks() override { return false; }
+ bool HasPendingHighResolutionTasks() override {
+ return has_pending_high_resolution_tasks;
+ }
+
+ void SetHasPendingHighResolutionTasks(bool state) {
+ has_pending_high_resolution_tasks = state;
+ }
bool OnSystemIdle() override { return false; }
@@ -118,6 +142,7 @@ class FakeSequencedTaskSource : public internal::SequencedTaskSource {
TickClock* clock_;
std::queue<Task> tasks_;
std::vector<Task> running_stack_;
+ bool has_pending_high_resolution_tasks = false;
};
TimeTicks Seconds(int seconds) {
@@ -143,6 +168,15 @@ class ThreadControllerWithMessagePumpTest : public testing::Test {
thread_controller_.SetSequencedTaskSource(&task_source_);
}
+ void SetUp() override {
+ internal::ThreadControllerPowerMonitor::OverrideUsePowerMonitorForTesting(
+ true);
+ }
+
+ void TearDown() override {
+ internal::ThreadControllerPowerMonitor::ResetForTesting();
+ }
+
protected:
MockMessagePump* message_pump_;
SequenceManager::Settings settings_;
@@ -578,5 +612,131 @@ TEST_F(ThreadControllerWithMessagePumpTest, RunWithTimeout) {
thread_controller_.Run(true, TimeDelta::FromSeconds(15));
}
+#if defined(OS_WIN)
+TEST_F(ThreadControllerWithMessagePumpTest, SetHighResolutionTimer) {
+ MockCallback<OnceClosure> task;
+ task_source_.AddTask(FROM_HERE, task.Get(), Seconds(5));
+
+ ThreadTaskRunnerHandle handle(MakeRefCounted<FakeTaskRunner>());
+
+ EXPECT_CALL(*message_pump_, Run(_))
+ .WillOnce(Invoke([&](MessagePump::Delegate* delegate) {
+ // Should initially not be in high resolution.
+ EXPECT_FALSE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ // Ensures timer resolution is set to high resolution.
+ task_source_.SetHasPendingHighResolutionTasks(true);
+ EXPECT_FALSE(delegate->DoIdleWork());
+ EXPECT_TRUE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ // Ensures time resolution is set back to low resolution.
+ task_source_.SetHasPendingHighResolutionTasks(false);
+ EXPECT_FALSE(delegate->DoIdleWork());
+ EXPECT_FALSE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ EXPECT_CALL(*message_pump_, Quit());
+ thread_controller_.Quit();
+ }));
+
+ RunLoop run_loop;
+ run_loop.Run();
+}
+#endif // OS_WIN
+
+#if defined(OS_WIN)
+TEST_F(ThreadControllerWithMessagePumpTest,
+ SetHighResolutionTimerWithPowerSuspend) {
+ MockCallback<OnceClosure> task;
+ task_source_.AddTask(FROM_HERE, task.Get(), Seconds(5));
+
+ ThreadTaskRunnerHandle handle(MakeRefCounted<FakeTaskRunner>());
+
+ EXPECT_CALL(*message_pump_, Run(_))
+ .WillOnce(Invoke([&](MessagePump::Delegate* delegate) {
+ // Should initially not be in high resolution.
+ EXPECT_FALSE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ // The power suspend notification is sent.
+ thread_controller_.ThreadControllerPowerMonitorForTesting()
+ ->OnSuspend();
+
+ // The timer resolution should NOT be updated during power suspend.
+ task_source_.SetHasPendingHighResolutionTasks(true);
+ EXPECT_FALSE(delegate->DoIdleWork());
+ EXPECT_FALSE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ // The power resume notification is sent.
+ thread_controller_.ThreadControllerPowerMonitorForTesting()->OnResume();
+
+ // Ensures timer resolution is set to high resolution.
+ EXPECT_FALSE(delegate->DoIdleWork());
+ EXPECT_TRUE(
+ thread_controller_.MainThreadOnlyForTesting().in_high_res_mode);
+
+ EXPECT_CALL(*message_pump_, Quit());
+ thread_controller_.Quit();
+ }));
+
+ RunLoop run_loop;
+ run_loop.Run();
+}
+#endif // OS_WIN
+
+TEST_F(ThreadControllerWithMessagePumpTest,
+ ScheduleDelayedWorkWithPowerSuspend) {
+ ThreadTaskRunnerHandle handle(MakeRefCounted<FakeTaskRunner>());
+
+ MockCallback<OnceClosure> task1;
+ task_source_.AddTask(FROM_HERE, task1.Get(), Seconds(10));
+ MockCallback<OnceClosure> task2;
+ task_source_.AddTask(FROM_HERE, task2.Get(), Seconds(15));
+
+ clock_.SetNowTicks(Seconds(5));
+
+ // Call a no-op DoWork. Expect that it doesn't do any work.
+ EXPECT_CALL(task1, Run()).Times(0);
+ EXPECT_CALL(task2, Run()).Times(0);
+ EXPECT_EQ(thread_controller_.DoWork().delayed_run_time, Seconds(10));
+ testing::Mock::VerifyAndClearExpectations(&task1);
+ testing::Mock::VerifyAndClearExpectations(&task2);
+
+ // Simulate a power suspend.
+ thread_controller_.ThreadControllerPowerMonitorForTesting()->OnSuspend();
+
+ // Delayed task is not yet ready to be executed.
+ EXPECT_CALL(task1, Run()).Times(0);
+ EXPECT_CALL(task2, Run()).Times(0);
+ EXPECT_EQ(thread_controller_.DoWork().delayed_run_time, TimeTicks::Max());
+ testing::Mock::VerifyAndClearExpectations(&task1);
+ testing::Mock::VerifyAndClearExpectations(&task2);
+
+ // Move time after the expiration delay of tasks.
+ clock_.SetNowTicks(Seconds(17));
+
+ // Should not process delayed tasks. The process is still in suspended power
+ // state.
+ EXPECT_CALL(task1, Run()).Times(0);
+ EXPECT_CALL(task2, Run()).Times(0);
+ EXPECT_EQ(thread_controller_.DoWork().delayed_run_time, TimeTicks::Max());
+ testing::Mock::VerifyAndClearExpectations(&task1);
+ testing::Mock::VerifyAndClearExpectations(&task2);
+
+ // Simulate a power resume.
+ thread_controller_.ThreadControllerPowerMonitorForTesting()->OnResume();
+
+ // No longer in suspended state. Controller should process both delayed tasks.
+ EXPECT_CALL(task1, Run()).Times(1);
+ EXPECT_CALL(task2, Run()).Times(1);
+ EXPECT_TRUE(thread_controller_.DoWork().is_immediate());
+ EXPECT_EQ(thread_controller_.DoWork().delayed_run_time, TimeTicks::Max());
+ testing::Mock::VerifyAndClearExpectations(&task1);
+ testing::Mock::VerifyAndClearExpectations(&task2);
+}
+
} // namespace sequence_manager
} // namespace base
diff --git a/chromium/base/task/sequence_manager/time_domain.cc b/chromium/base/task/sequence_manager/time_domain.cc
index 1df52f3d131..2a31f8b8143 100644
--- a/chromium/base/task/sequence_manager/time_domain.cc
+++ b/chromium/base/task/sequence_manager/time_domain.cc
@@ -140,20 +140,15 @@ Optional<TimeTicks> TimeDomain::NextScheduledRunTime() const {
return delayed_wake_up_queue_.Min().wake_up.time;
}
-void TimeDomain::AsValueInto(trace_event::TracedValue* state) const {
- state->BeginDictionary();
- state->SetString("name", GetName());
- state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
+Value TimeDomain::AsValue() const {
+ Value state(Value::Type::DICTIONARY);
+ state.SetStringKey("name", GetName());
+ state.SetIntKey("registered_delay_count", delayed_wake_up_queue_.size());
if (!delayed_wake_up_queue_.empty()) {
TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
- state->SetDouble("next_delay_ms", delay.InMillisecondsF());
+ state.SetDoubleKey("next_delay_ms", delay.InMillisecondsF());
}
- AsValueIntoInternal(state);
- state->EndDictionary();
-}
-
-void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const {
- // Can be overriden to trace some additional state.
+ return state;
}
} // namespace sequence_manager
diff --git a/chromium/base/task/sequence_manager/time_domain.h b/chromium/base/task/sequence_manager/time_domain.h
index ddbbc54bd96..6c3319bf0ee 100644
--- a/chromium/base/task/sequence_manager/time_domain.h
+++ b/chromium/base/task/sequence_manager/time_domain.h
@@ -8,12 +8,13 @@
#include <map>
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/time/time.h"
+#include "base/values.h"
namespace base {
namespace sequence_manager {
@@ -56,7 +57,7 @@ class BASE_EXPORT TimeDomain {
// NOTE: |lazy_now| and the return value are in the SequenceManager's time.
virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0;
- void AsValueInto(trace_event::TracedValue* state) const;
+ Value AsValue() const;
bool has_pending_high_resolution_tasks() const {
return pending_high_res_wake_up_count_;
@@ -91,9 +92,6 @@ class BASE_EXPORT TimeDomain {
// May be overriden to control wake ups manually.
virtual void RequestDoWork();
- // For implementation-specific tracing.
- virtual void AsValueIntoInternal(trace_event::TracedValue* state) const;
-
virtual const char* GetName() const = 0;
// Called when the TimeDomain is registered. |sequence_manager| is expected to
diff --git a/chromium/base/task/sequence_manager/time_domain_unittest.cc b/chromium/base/task/sequence_manager/time_domain_unittest.cc
index 2096520fc16..8a5c16b4464 100644
--- a/chromium/base/task/sequence_manager/time_domain_unittest.cc
+++ b/chromium/base/task/sequence_manager/time_domain_unittest.cc
@@ -57,7 +57,6 @@ class TestTimeDomain : public TimeDomain {
return false;
}
- void AsValueIntoInternal(trace_event::TracedValue* state) const override {}
const char* GetName() const override { return "Test"; }
internal::TaskQueueImpl* NextScheduledTaskQueue() const {
diff --git a/chromium/base/task/sequence_manager/work_queue.cc b/chromium/base/task/sequence_manager/work_queue.cc
index 836f00034b9..b3667285ad1 100644
--- a/chromium/base/task/sequence_manager/work_queue.cc
+++ b/chromium/base/task/sequence_manager/work_queue.cc
@@ -18,11 +18,11 @@ WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
QueueType queue_type)
: task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
-void WorkQueue::AsValueInto(TimeTicks now,
- trace_event::TracedValue* state) const {
- for (const Task& task : tasks_) {
- TaskQueueImpl::TaskAsValueInto(task, now, state);
- }
+Value WorkQueue::AsValue(TimeTicks now) const {
+ Value state(Value::Type::LIST);
+ for (const Task& task : tasks_)
+ state.Append(TaskQueueImpl::TaskAsValue(task, now));
+ return state;
}
WorkQueue::~WorkQueue() {
diff --git a/chromium/base/task/sequence_manager/work_queue.h b/chromium/base/task/sequence_manager/work_queue.h
index 65fdee4ca28..77bdc127520 100644
--- a/chromium/base/task/sequence_manager/work_queue.h
+++ b/chromium/base/task/sequence_manager/work_queue.h
@@ -10,8 +10,7 @@
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue_impl.h"
-#include "base/trace_event/trace_event.h"
-#include "base/trace_event/traced_value.h"
+#include "base/values.h"
namespace base {
namespace sequence_manager {
@@ -43,7 +42,7 @@ class BASE_EXPORT WorkQueue {
// Assigns the current set index.
void AssignSetIndex(size_t work_queue_set_index);
- void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
+ Value AsValue(TimeTicks now) const;
// Returns true if the |tasks_| is empty. This method ignores any fences.
bool Empty() const { return tasks_.empty(); }
diff --git a/chromium/base/task/sequence_manager/work_queue_sets.h b/chromium/base/task/sequence_manager/work_queue_sets.h
index f128c62c369..626debe0075 100644
--- a/chromium/base/task/sequence_manager/work_queue_sets.h
+++ b/chromium/base/task/sequence_manager/work_queue_sets.h
@@ -9,13 +9,13 @@
#include <map>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
-#include "base/trace_event/traced_value.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace sequence_manager {
diff --git a/chromium/base/task/single_thread_task_executor_unittest.cc b/chromium/base/task/single_thread_task_executor_unittest.cc
index 3e4d44c24f1..df9162063c5 100644
--- a/chromium/base/task/single_thread_task_executor_unittest.cc
+++ b/chromium/base/task/single_thread_task_executor_unittest.cc
@@ -1,20 +1,68 @@
-// Copyright 2019 The Chromium Authors. All rights reserved.
+// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/single_thread_task_executor.h"
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+#include "base/compiler_specific.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/memory/ref_counted.h"
+#include "base/message_loop/message_loop_current.h"
+#include "base/message_loop/message_pump_for_io.h"
+#include "base/message_loop/message_pump_type.h"
+#include "base/pending_task.h"
+#include "base/posix/eintr_wrapper.h"
#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
#include "base/task/post_task.h"
+#include "base/task/task_observer.h"
+#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/test/bind_test_util.h"
+#include "base/test/gtest_util.h"
+#include "base/test/metrics/histogram_tester.h"
+#include "base/test/test_simple_task_runner.h"
+#include "base/test/test_timeouts.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/sequence_local_storage_slot.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
+#if defined(OS_ANDROID)
+#include "base/android/java_handler_thread.h"
+#include "base/android/jni_android.h"
+#include "base/test/android/java_handler_thread_helpers.h"
+#endif
+
+#if defined(OS_WIN)
+#include "base/message_loop/message_pump_win.h"
+#include "base/process/memory.h"
+#include "base/strings/string16.h"
+#include "base/win/current_module.h"
+#include "base/win/message_window.h"
+#include "base/win/scoped_handle.h"
+#endif
+
using ::testing::IsNull;
using ::testing::NotNull;
namespace base {
+// TODO(darin): Platform-specific MessageLoop tests should be grouped together
+// to avoid chopping this file up with so many #ifdefs.
+
TEST(SingleThreadTaskExecutorTest, GetTaskExecutorForCurrentThread) {
EXPECT_THAT(GetTaskExecutorForCurrentThread(), IsNull());
@@ -43,4 +91,2111 @@ TEST(SingleThreadTaskExecutorTest,
run_loop.Run();
}
+namespace {
+
+class Foo : public RefCounted<Foo> {
+ public:
+ Foo() : test_count_(0) {}
+
+ void Test0() { ++test_count_; }
+
+ void Test1ConstRef(const std::string& a) {
+ ++test_count_;
+ result_.append(a);
+ }
+
+ void Test1Ptr(std::string* a) {
+ ++test_count_;
+ result_.append(*a);
+ }
+
+ void Test1Int(int a) { test_count_ += a; }
+
+ void Test2Ptr(std::string* a, std::string* b) {
+ ++test_count_;
+ result_.append(*a);
+ result_.append(*b);
+ }
+
+ void Test2Mixed(const std::string& a, std::string* b) {
+ ++test_count_;
+ result_.append(a);
+ result_.append(*b);
+ }
+
+ int test_count() const { return test_count_; }
+ const std::string& result() const { return result_; }
+
+ private:
+ friend class RefCounted<Foo>;
+
+ ~Foo() = default;
+
+ int test_count_;
+ std::string result_;
+
+ DISALLOW_COPY_AND_ASSIGN(Foo);
+};
+
+// This function runs slowly to simulate a large amount of work being done.
+static void SlowFunc(TimeDelta pause, int* quit_counter) {
+ PlatformThread::Sleep(pause);
+ if (--(*quit_counter) == 0)
+ RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+// This function records the time when Run was called in a Time object, which is
+// useful for building a variety of SingleThreadTaskExecutor tests.
+static void RecordRunTimeFunc(TimeTicks* run_time, int* quit_counter) {
+ *run_time = TimeTicks::Now();
+
+ // Cause our Run function to take some time to execute. As a result we can
+ // count on subsequent RecordRunTimeFunc()s running at a future time,
+ // without worry about the resolution of our system clock being an issue.
+ SlowFunc(TimeDelta::FromMilliseconds(10), quit_counter);
+}
+
+enum TaskType {
+ MESSAGEBOX,
+ ENDDIALOG,
+ RECURSIVE,
+ TIMEDMESSAGELOOP,
+ QUITMESSAGELOOP,
+ ORDERED,
+ PUMPS,
+ SLEEP,
+ RUNS,
+};
+
+// Saves the order in which the tasks executed.
+struct TaskItem {
+ TaskItem(TaskType t, int c, bool s) : type(t), cookie(c), start(s) {}
+
+ TaskType type;
+ int cookie;
+ bool start;
+
+ bool operator==(const TaskItem& other) const {
+ return type == other.type && cookie == other.cookie && start == other.start;
+ }
+};
+
+std::ostream& operator<<(std::ostream& os, TaskType type) {
+ switch (type) {
+ case MESSAGEBOX:
+ os << "MESSAGEBOX";
+ break;
+ case ENDDIALOG:
+ os << "ENDDIALOG";
+ break;
+ case RECURSIVE:
+ os << "RECURSIVE";
+ break;
+ case TIMEDMESSAGELOOP:
+ os << "TIMEDMESSAGELOOP";
+ break;
+ case QUITMESSAGELOOP:
+ os << "QUITMESSAGELOOP";
+ break;
+ case ORDERED:
+ os << "ORDERED";
+ break;
+ case PUMPS:
+ os << "PUMPS";
+ break;
+ case SLEEP:
+ os << "SLEEP";
+ break;
+ default:
+ NOTREACHED();
+ os << "Unknown TaskType";
+ break;
+ }
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const TaskItem& item) {
+ if (item.start)
+ return os << item.type << " " << item.cookie << " starts";
+ return os << item.type << " " << item.cookie << " ends";
+}
+
+class TaskList {
+ public:
+ void RecordStart(TaskType type, int cookie) {
+ TaskItem item(type, cookie, true);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ void RecordEnd(TaskType type, int cookie) {
+ TaskItem item(type, cookie, false);
+ DVLOG(1) << item;
+ task_list_.push_back(item);
+ }
+
+ size_t Size() { return task_list_.size(); }
+
+ TaskItem Get(int n) { return task_list_[n]; }
+
+ private:
+ std::vector<TaskItem> task_list_;
+};
+
+class DummyTaskObserver : public TaskObserver {
+ public:
+ explicit DummyTaskObserver(int num_tasks)
+ : num_tasks_started_(0), num_tasks_processed_(0), num_tasks_(num_tasks) {}
+
+ DummyTaskObserver(int num_tasks, int num_tasks_started)
+ : num_tasks_started_(num_tasks_started),
+ num_tasks_processed_(0),
+ num_tasks_(num_tasks) {}
+
+ ~DummyTaskObserver() override = default;
+
+ void WillProcessTask(const PendingTask& pending_task,
+ bool /* was_blocked_or_low_priority */) override {
+ num_tasks_started_++;
+ EXPECT_LE(num_tasks_started_, num_tasks_);
+ EXPECT_EQ(num_tasks_started_, num_tasks_processed_ + 1);
+ }
+
+ void DidProcessTask(const PendingTask& pending_task) override {
+ num_tasks_processed_++;
+ EXPECT_LE(num_tasks_started_, num_tasks_);
+ EXPECT_EQ(num_tasks_started_, num_tasks_processed_);
+ }
+
+ int num_tasks_started() const { return num_tasks_started_; }
+ int num_tasks_processed() const { return num_tasks_processed_; }
+
+ private:
+ int num_tasks_started_;
+ int num_tasks_processed_;
+ const int num_tasks_;
+
+ DISALLOW_COPY_AND_ASSIGN(DummyTaskObserver);
+};
+
+// A method which reposts itself |depth| times.
+void RecursiveFunc(TaskList* order, int cookie, int depth) {
+ order->RecordStart(RECURSIVE, cookie);
+ if (depth > 0) {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RecursiveFunc, order, cookie, depth - 1));
+ }
+ order->RecordEnd(RECURSIVE, cookie);
+}
+
+void QuitFunc(TaskList* order, int cookie) {
+ order->RecordStart(QUITMESSAGELOOP, cookie);
+ RunLoop::QuitCurrentWhenIdleDeprecated();
+ order->RecordEnd(QUITMESSAGELOOP, cookie);
+}
+
+void PostNTasks(int posts_remaining) {
+ if (posts_remaining > 1) {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&PostNTasks, posts_remaining - 1));
+ }
+}
+
+#if defined(OS_WIN)
+
+void SubPumpFunc(OnceClosure on_done) {
+ MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop
+ allow_nestable_tasks;
+ MSG msg;
+ while (::GetMessage(&msg, NULL, 0, 0)) {
+ ::TranslateMessage(&msg);
+ ::DispatchMessage(&msg);
+ }
+ std::move(on_done).Run();
+}
+
+const wchar_t kMessageBoxTitle[] = L"SingleThreadTaskExecutor Unit Test";
+
+// SingleThreadTaskExecutor implicitly start a "modal message loop". Modal
+// dialog boxes, common controls (like OpenFile) and StartDoc printing function
+// can cause implicit message loops.
+void MessageBoxFunc(TaskList* order, int cookie, bool is_reentrant) {
+ order->RecordStart(MESSAGEBOX, cookie);
+ Optional<MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop>
+ maybe_allow_nesting;
+ if (is_reentrant)
+ maybe_allow_nesting.emplace();
+ ::MessageBox(NULL, L"Please wait...", kMessageBoxTitle, MB_OK);
+ order->RecordEnd(MESSAGEBOX, cookie);
+}
+
+// Will end the MessageBox.
+void EndDialogFunc(TaskList* order, int cookie) {
+ order->RecordStart(ENDDIALOG, cookie);
+ HWND window = GetActiveWindow();
+ if (window != NULL) {
+ EXPECT_NE(::EndDialog(window, IDCONTINUE), 0);
+ // Cheap way to signal that the window wasn't found if RunEnd() isn't
+ // called.
+ order->RecordEnd(ENDDIALOG, cookie);
+ }
+}
+
+// A method which posts a RecursiveFunc that will want to run while
+// ::MessageBox() is active.
+void RecursiveFuncWin(scoped_refptr<SingleThreadTaskRunner> task_runner,
+ HANDLE event,
+ bool expect_window,
+ TaskList* order,
+ bool message_box_is_reentrant) {
+ task_runner->PostTask(FROM_HERE, BindOnce(&RecursiveFunc, order, 1, 2));
+ task_runner->PostTask(
+ FROM_HERE, BindOnce(&MessageBoxFunc, order, 2, message_box_is_reentrant));
+ task_runner->PostTask(FROM_HERE, BindOnce(&RecursiveFunc, order, 3, 2));
+ // The trick here is that for nested task processing, this task will be
+ // ran _inside_ the MessageBox message loop, dismissing the MessageBox
+ // without a chance.
+ // For non-nested task processing, this will be executed _after_ the
+ // MessageBox will have been dismissed by the code below, where
+ // expect_window_ is true.
+ task_runner->PostTask(FROM_HERE, BindOnce(&EndDialogFunc, order, 4));
+ task_runner->PostTask(FROM_HERE, BindOnce(&QuitFunc, order, 5));
+
+ // Enforce that every tasks are sent before starting to run the main thread
+ // message loop.
+ ASSERT_TRUE(SetEvent(event));
+
+ // Poll for the MessageBox. Don't do this at home! At the speed we do it,
+ // you will never realize one MessageBox was shown.
+ for (; expect_window;) {
+ HWND window = ::FindWindow(L"#32770", kMessageBoxTitle);
+ if (window) {
+ // Dismiss it.
+ for (;;) {
+ HWND button = ::FindWindowEx(window, NULL, L"Button", NULL);
+ if (button != NULL) {
+ EXPECT_EQ(0, ::SendMessage(button, WM_LBUTTONDOWN, 0, 0));
+ EXPECT_EQ(0, ::SendMessage(button, WM_LBUTTONUP, 0, 0));
+ break;
+ }
+ }
+ break;
+ }
+ }
+}
+
+#endif // defined(OS_WIN)
+
+void PostNTasksThenQuit(int posts_remaining) {
+ if (posts_remaining > 1) {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&PostNTasksThenQuit, posts_remaining - 1));
+ } else {
+ RunLoop::QuitCurrentWhenIdleDeprecated();
+ }
+}
+
+#if defined(OS_WIN)
+
+class TestIOHandler : public MessagePumpForIO::IOHandler {
+ public:
+ TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
+
+ void OnIOCompleted(MessagePumpForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) override;
+
+ void Init();
+ void WaitForIO();
+ OVERLAPPED* context() { return &context_.overlapped; }
+ DWORD size() { return sizeof(buffer_); }
+
+ private:
+ char buffer_[48];
+ MessagePumpForIO::IOContext context_;
+ HANDLE signal_;
+ win::ScopedHandle file_;
+ bool wait_;
+};
+
+TestIOHandler::TestIOHandler(const wchar_t* name, HANDLE signal, bool wait)
+ : MessagePumpForIO::IOHandler(FROM_HERE), signal_(signal), wait_(wait) {
+ memset(buffer_, 0, sizeof(buffer_));
+
+ file_.Set(CreateFile(name, GENERIC_READ, 0, NULL, OPEN_EXISTING,
+ FILE_FLAG_OVERLAPPED, NULL));
+ EXPECT_TRUE(file_.IsValid());
+}
+
+void TestIOHandler::Init() {
+ MessageLoopCurrentForIO::Get()->RegisterIOHandler(file_.Get(), this);
+
+ DWORD read;
+ EXPECT_FALSE(ReadFile(file_.Get(), buffer_, size(), &read, context()));
+ EXPECT_EQ(static_cast<DWORD>(ERROR_IO_PENDING), GetLastError());
+ if (wait_)
+ WaitForIO();
+}
+
+void TestIOHandler::OnIOCompleted(MessagePumpForIO::IOContext* context,
+ DWORD bytes_transfered,
+ DWORD error) {
+ ASSERT_TRUE(context == &context_);
+ ASSERT_TRUE(SetEvent(signal_));
+}
+
+void TestIOHandler::WaitForIO() {
+ EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(300, this));
+ EXPECT_TRUE(MessageLoopCurrentForIO::Get()->WaitForIOCompletion(400, this));
+}
+
+void RunTest_IOHandler() {
+ win::ScopedHandle callback_called(CreateEvent(NULL, TRUE, FALSE, NULL));
+ ASSERT_TRUE(callback_called.IsValid());
+
+ const wchar_t* kPipeName = L"\\\\.\\pipe\\iohandler_pipe";
+ win::ScopedHandle server(
+ CreateNamedPipe(kPipeName, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ ASSERT_TRUE(server.IsValid());
+
+ Thread thread("IOHandler test");
+ Thread::Options options;
+ options.message_pump_type = MessagePumpType::IO;
+ ASSERT_TRUE(thread.StartWithOptions(options));
+
+ TestIOHandler handler(kPipeName, callback_called.Get(), false);
+ thread.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler)));
+ // Make sure the thread runs and sleeps for lack of work.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+
+ const char buffer[] = "Hello there!";
+ DWORD written;
+ EXPECT_TRUE(WriteFile(server.Get(), buffer, sizeof(buffer), &written, NULL));
+
+ DWORD result = WaitForSingleObject(callback_called.Get(), 1000);
+ EXPECT_EQ(WAIT_OBJECT_0, result);
+
+ thread.Stop();
+}
+
+void RunTest_WaitForIO() {
+ win::ScopedHandle callback1_called(CreateEvent(NULL, TRUE, FALSE, NULL));
+ win::ScopedHandle callback2_called(CreateEvent(NULL, TRUE, FALSE, NULL));
+ ASSERT_TRUE(callback1_called.IsValid());
+ ASSERT_TRUE(callback2_called.IsValid());
+
+ const wchar_t* kPipeName1 = L"\\\\.\\pipe\\iohandler_pipe1";
+ const wchar_t* kPipeName2 = L"\\\\.\\pipe\\iohandler_pipe2";
+ win::ScopedHandle server1(
+ CreateNamedPipe(kPipeName1, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ win::ScopedHandle server2(
+ CreateNamedPipe(kPipeName2, PIPE_ACCESS_OUTBOUND, 0, 1, 0, 0, 0, NULL));
+ ASSERT_TRUE(server1.IsValid());
+ ASSERT_TRUE(server2.IsValid());
+
+ Thread thread("IOHandler test");
+ Thread::Options options;
+ options.message_pump_type = MessagePumpType::IO;
+ ASSERT_TRUE(thread.StartWithOptions(options));
+
+ TestIOHandler handler1(kPipeName1, callback1_called.Get(), false);
+ TestIOHandler handler2(kPipeName2, callback2_called.Get(), true);
+ thread.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler1)));
+ // TODO(ajwong): Do we really need such long Sleeps in this function?
+ // Make sure the thread runs and sleeps for lack of work.
+ TimeDelta delay = TimeDelta::FromMilliseconds(100);
+ PlatformThread::Sleep(delay);
+ thread.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&TestIOHandler::Init, Unretained(&handler2)));
+ PlatformThread::Sleep(delay);
+
+ // At this time handler1 is waiting to be called, and the thread is waiting
+ // on the Init method of handler2, filtering only handler2 callbacks.
+
+ const char buffer[] = "Hello there!";
+ DWORD written;
+ EXPECT_TRUE(WriteFile(server1.Get(), buffer, sizeof(buffer), &written, NULL));
+ PlatformThread::Sleep(2 * delay);
+ EXPECT_EQ(static_cast<DWORD>(WAIT_TIMEOUT),
+ WaitForSingleObject(callback1_called.Get(), 0))
+ << "handler1 has not been called";
+
+ EXPECT_TRUE(WriteFile(server2.Get(), buffer, sizeof(buffer), &written, NULL));
+
+ HANDLE objects[2] = {callback1_called.Get(), callback2_called.Get()};
+ DWORD result = WaitForMultipleObjects(2, objects, TRUE, 1000);
+ EXPECT_EQ(WAIT_OBJECT_0, result);
+
+ thread.Stop();
+}
+
+#endif // defined(OS_WIN)
+
+} // namespace
+
+//-----------------------------------------------------------------------------
+// Each test is run against each type of SingleThreadTaskExecutor. That way we
+// are sure that SingleThreadTaskExecutor works properly in all configurations.
+// Of course, in some cases, a unit test may only be for a particular type of
+// loop.
+
+class SingleThreadTaskExecutorTypedTest
+ : public ::testing::TestWithParam<MessagePumpType> {
+ public:
+ SingleThreadTaskExecutorTypedTest() = default;
+ ~SingleThreadTaskExecutorTypedTest() = default;
+
+ static std::string ParamInfoToString(
+ ::testing::TestParamInfo<MessagePumpType> param_info) {
+ switch (param_info.param) {
+ case MessagePumpType::DEFAULT:
+ return "default_pump";
+ case MessagePumpType::IO:
+ return "IO_pump";
+ case MessagePumpType::UI:
+ return "UI_pump";
+ case MessagePumpType::CUSTOM:
+ break;
+#if defined(OS_ANDROID)
+ case MessagePumpType::JAVA:
+ break;
+#endif // defined(OS_ANDROID)
+#if defined(OS_MACOSX)
+ case MessagePumpType::NS_RUNLOOP:
+ break;
+#endif // defined(OS_MACOSX)
+#if defined(OS_WIN)
+ case MessagePumpType::UI_WITH_WM_QUIT_SUPPORT:
+ break;
+#endif // defined(OS_WIN)
+ }
+ NOTREACHED();
+ return "";
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(SingleThreadTaskExecutorTypedTest);
+};
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostTask) {
+ SingleThreadTaskExecutor executor(GetParam());
+ // Add tests to message loop
+ scoped_refptr<Foo> foo(new Foo());
+ std::string a("a"), b("b"), c("c"), d("d");
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&Foo::Test0, foo));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&Foo::Test1Ptr, foo, &b));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&Foo::Test1Int, foo, 100));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test2Ptr, foo, &a, &c));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test2Mixed, foo, a, &d));
+ // After all tests, post a message that will shut down the message loop
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
+
+ // Now kick things off
+ RunLoop().Run();
+
+ EXPECT_EQ(foo->test_count(), 105);
+ EXPECT_EQ(foo->result(), "abacad");
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_Basic) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that PostDelayedTask results in a delayed task.
+
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ int num_tasks = 1;
+ TimeTicks run_time;
+
+ TimeTicks time_before_run = TimeTicks::Now();
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks), kDelay);
+ RunLoop().Run();
+ TimeTicks time_after_run = TimeTicks::Now();
+
+ EXPECT_EQ(0, num_tasks);
+ EXPECT_LT(kDelay, time_after_run - time_before_run);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InDelayOrder) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that two tasks with different delays run in the right order.
+ int num_tasks = 2;
+ TimeTicks run_time1, run_time2;
+
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ TimeDelta::FromMilliseconds(200));
+ // If we get a large pause in execution (due to a context switch) here, this
+ // test could fail.
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time2 < run_time1);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that two tasks with the same delay run in the order in which they
+ // were posted.
+ //
+ // NOTE: This is actually an approximate test since the API only takes a
+ // "delay" parameter, so we are not exactly simulating two tasks that get
+ // posted at the exact same time. It would be nice if the API allowed us to
+ // specify the desired run time.
+
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ int num_tasks = 2;
+ TimeTicks run_time1, run_time2;
+
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks), kDelay);
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks), kDelay);
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time1 < run_time2);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder_2) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that a delayed task still runs after a normal tasks even if the
+ // normal tasks take a long time to run.
+
+ const TimeDelta kPause = TimeDelta::FromMilliseconds(50);
+
+ int num_tasks = 2;
+ TimeTicks run_time;
+
+ executor.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&SlowFunc, kPause, &num_tasks));
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ TimeTicks time_before_run = TimeTicks::Now();
+ RunLoop().Run();
+ TimeTicks time_after_run = TimeTicks::Now();
+
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_LT(kPause, time_after_run - time_before_run);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_InPostOrder_3) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that a delayed task still runs after a pile of normal tasks. The key
+ // difference between this test and the previous one is that here we return
+ // the SingleThreadTaskExecutor a lot so we give the SingleThreadTaskExecutor
+ // plenty of opportunities to maybe run the delayed task. It should know not
+ // to do so until the delayed task's delay has passed.
+
+ int num_tasks = 11;
+ TimeTicks run_time1, run_time2;
+
+ // Clutter the ML with tasks.
+ for (int i = 1; i < num_tasks; ++i)
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks));
+
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(1));
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ EXPECT_TRUE(run_time2 > run_time1);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, PostDelayedTask_SharedTimer) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ // Test that the interval of the timer, used to run the next delayed task, is
+ // set to a value corresponding to when the next delayed task should run.
+
+ // By setting num_tasks to 1, we ensure that the first task to run causes the
+ // run loop to exit.
+ int num_tasks = 1;
+ TimeTicks run_time1, run_time2;
+
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time1, &num_tasks),
+ TimeDelta::FromSeconds(1000));
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time2, &num_tasks),
+ TimeDelta::FromMilliseconds(10));
+
+ TimeTicks start_time = TimeTicks::Now();
+
+ RunLoop().Run();
+ EXPECT_EQ(0, num_tasks);
+
+ // Ensure that we ran in far less time than the slower timer.
+ TimeDelta total_time = TimeTicks::Now() - start_time;
+ EXPECT_GT(5000, total_time.InMilliseconds());
+
+ // In case both timers somehow run at nearly the same time, sleep a little
+ // and then run all pending to force them both to have run. This is just
+ // encouraging flakiness if there is any.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(run_time1.is_null());
+ EXPECT_FALSE(run_time2.is_null());
+}
+
+namespace {
+
+// This is used to inject a test point for recording the destructor calls for
+// Closure objects send to MessageLoop::PostTask(). It is awkward usage since we
+// are trying to hook the actual destruction, which is not a common operation.
+class RecordDeletionProbe : public RefCounted<RecordDeletionProbe> {
+ public:
+ RecordDeletionProbe(RecordDeletionProbe* post_on_delete, bool* was_deleted)
+ : post_on_delete_(post_on_delete), was_deleted_(was_deleted) {}
+ void Run() {}
+
+ private:
+ friend class RefCounted<RecordDeletionProbe>;
+
+ ~RecordDeletionProbe() {
+ *was_deleted_ = true;
+ if (post_on_delete_.get())
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RecordDeletionProbe::Run, post_on_delete_));
+ }
+
+ scoped_refptr<RecordDeletionProbe> post_on_delete_;
+ bool* was_deleted_;
+};
+
+} // namespace
+
+/* TODO(darin): SingleThreadTaskExecutor does not support deleting all tasks in
+ */
+/* the destructor. */
+/* Fails, http://crbug.com/50272. */
+TEST_P(SingleThreadTaskExecutorTypedTest, DISABLED_EnsureDeletion) {
+ bool a_was_deleted = false;
+ bool b_was_deleted = false;
+ {
+ SingleThreadTaskExecutor executor(GetParam());
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(nullptr, &a_was_deleted)));
+ // TODO(ajwong): Do we really need 1000ms here?
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&RecordDeletionProbe::Run,
+ new RecordDeletionProbe(nullptr, &b_was_deleted)),
+ TimeDelta::FromMilliseconds(1000));
+ }
+ EXPECT_TRUE(a_was_deleted);
+ EXPECT_TRUE(b_was_deleted);
+}
+
+/* TODO(darin): SingleThreadTaskExecutor does not support deleting all tasks in
+ */
+/* the destructor. */
+/* Fails, http://crbug.com/50272. */
+TEST_P(SingleThreadTaskExecutorTypedTest, DISABLED_EnsureDeletion_Chain) {
+ bool a_was_deleted = false;
+ bool b_was_deleted = false;
+ bool c_was_deleted = false;
+ {
+ SingleThreadTaskExecutor executor(GetParam());
+ // The scoped_refptr for each of the below is held either by the chained
+ // RecordDeletionProbe, or the bound RecordDeletionProbe::Run() callback.
+ RecordDeletionProbe* a = new RecordDeletionProbe(nullptr, &a_was_deleted);
+ RecordDeletionProbe* b = new RecordDeletionProbe(a, &b_was_deleted);
+ RecordDeletionProbe* c = new RecordDeletionProbe(b, &c_was_deleted);
+ executor.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&RecordDeletionProbe::Run, c));
+ }
+ EXPECT_TRUE(a_was_deleted);
+ EXPECT_TRUE(b_was_deleted);
+ EXPECT_TRUE(c_was_deleted);
+}
+
+namespace {
+
+void NestingFunc(int* depth) {
+ if (*depth > 0) {
+ *depth -= 1;
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&NestingFunc, depth));
+
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).Run();
+ }
+ base::RunLoop::QuitCurrentWhenIdleDeprecated();
+}
+
+} // namespace
+
+TEST_P(SingleThreadTaskExecutorTypedTest, Nesting) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ int depth = 50;
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&NestingFunc, &depth));
+ RunLoop().Run();
+ EXPECT_EQ(depth, 0);
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, Recursive) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 1, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RecursiveFunc, &order, 2, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&QuitFunc, &order, 3));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(14U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+ EXPECT_EQ(order.Get(6), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(7), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(8), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 2, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 2, false));
+}
+
+namespace {
+
+void OrderedFunc(TaskList* order, int cookie) {
+ order->RecordStart(ORDERED, cookie);
+ order->RecordEnd(ORDERED, cookie);
+}
+
+} // namespace
+
+// Tests that non nestable tasks run in FIFO if there are no nested loops.
+TEST_P(SingleThreadTaskExecutorTypedTest, NonNestableWithNoNesting) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&QuitFunc, &order, 3));
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(6U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(ORDERED, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(QUITMESSAGELOOP, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(QUITMESSAGELOOP, 3, false));
+}
+
+namespace {
+
+void FuncThatPumps(TaskList* order, int cookie) {
+ order->RecordStart(PUMPS, cookie);
+ RunLoop(RunLoop::Type::kNestableTasksAllowed).RunUntilIdle();
+ order->RecordEnd(PUMPS, cookie);
+}
+
+void SleepFunc(TaskList* order, int cookie, TimeDelta delay) {
+ order->RecordStart(SLEEP, cookie);
+ PlatformThread::Sleep(delay);
+ order->RecordEnd(SLEEP, cookie);
+}
+
+} // namespace
+
+// Tests that non nestable tasks don't run when there's code in the call stack.
+TEST_P(SingleThreadTaskExecutorTypedTest, NonNestableDelayedInNestedLoop) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatPumps, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&SleepFunc, &order, 4, TimeDelta::FromMilliseconds(50)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 5));
+ ThreadTaskRunnerHandle::Get()->PostNonNestableTask(
+ FROM_HERE, BindOnce(&QuitFunc, &order, 6));
+
+ RunLoop().Run();
+
+ // FIFO order.
+ ASSERT_EQ(12U, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(PUMPS, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(2), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(order.Get(3), TaskItem(SLEEP, 4, true));
+ EXPECT_EQ(order.Get(4), TaskItem(SLEEP, 4, false));
+ EXPECT_EQ(order.Get(5), TaskItem(ORDERED, 5, true));
+ EXPECT_EQ(order.Get(6), TaskItem(ORDERED, 5, false));
+ EXPECT_EQ(order.Get(7), TaskItem(PUMPS, 1, false));
+ EXPECT_EQ(order.Get(8), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(9), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(10), TaskItem(QUITMESSAGELOOP, 6, true));
+ EXPECT_EQ(order.Get(11), TaskItem(QUITMESSAGELOOP, 6, false));
+}
+
+namespace {
+
+void FuncThatRuns(TaskList* order, int cookie, RunLoop* run_loop) {
+ order->RecordStart(RUNS, cookie);
+ run_loop->Run();
+ order->RecordEnd(RUNS, cookie);
+}
+
+void FuncThatQuitsNow() {
+ base::RunLoop::QuitCurrentDeprecated();
+}
+
+} // namespace
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(SingleThreadTaskExecutorTypedTest, QuitNow) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 4)); // never runs
+
+ RunLoop().Run();
+
+ ASSERT_EQ(6U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitTop) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitNested) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Quits current loop and immediately runs a nested loop.
+void QuitAndRunNestedLoop(TaskList* order,
+ int cookie,
+ RunLoop* outer_run_loop,
+ RunLoop* nested_run_loop) {
+ order->RecordStart(RUNS, cookie);
+ outer_run_loop->Quit();
+ nested_run_loop->Run();
+ order->RecordEnd(RUNS, cookie);
+}
+
+// Test that we can run nested loop after quitting the current one.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopNestedAfterQuit) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop;
+
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&QuitAndRunNestedLoop, &order, 1, &outer_run_loop,
+ &nested_run_loop));
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(2U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitBogus) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+ RunLoop bogus_run_loop;
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ bogus_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_run_loop.QuitClosure());
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(4U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit only quits the corresponding MessageLoop::Run.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitDeep) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop outer_run_loop;
+ RunLoop nested_loop1(RunLoop::Type::kNestableTasksAllowed);
+ RunLoop nested_loop2(RunLoop::Type::kNestableTasksAllowed);
+ RunLoop nested_loop3(RunLoop::Type::kNestableTasksAllowed);
+ RunLoop nested_loop4(RunLoop::Type::kNestableTasksAllowed);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_loop1)));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 2, Unretained(&nested_loop2)));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 3, Unretained(&nested_loop3)));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatRuns, &order, 4, Unretained(&nested_loop4)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 5));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ outer_run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 6));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop1.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 7));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop2.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 8));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop3.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 9));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ nested_loop4.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 10));
+
+ outer_run_loop.Run();
+
+ ASSERT_EQ(18U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 5, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 6, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 7, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 8, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 9, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 4, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 3, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works before RunWithID.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderBefore) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ run_loop.Quit();
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 1)); // never runs
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
+
+ run_loop.Run();
+
+ ASSERT_EQ(0U, order.Size());
+}
+
+// Tests RunLoopQuit works during RunWithID.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderDuring) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop run_loop;
+
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 1));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE, run_loop.QuitClosure());
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&OrderedFunc, &order, 2)); // never runs
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&FuncThatQuitsNow)); // never runs
+
+ run_loop.Run();
+
+ ASSERT_EQ(2U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 1, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// Tests RunLoopQuit works after RunWithID.
+TEST_P(SingleThreadTaskExecutorTypedTest, RunLoopQuitOrderAfter) {
+ SingleThreadTaskExecutor executor(GetParam());
+
+ TaskList order;
+
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(&FuncThatRuns, &order, 1, Unretained(&nested_run_loop)));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 2));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 3));
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, nested_run_loop.QuitClosure()); // has no affect
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&OrderedFunc, &order, 4));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&FuncThatQuitsNow));
+
+ nested_run_loop.allow_quit_current_deprecated_ = true;
+
+ RunLoop outer_run_loop;
+ outer_run_loop.Run();
+
+ ASSERT_EQ(8U, order.Size());
+ int task_index = 0;
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 2, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(RUNS, 1, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 3, false));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, true));
+ EXPECT_EQ(order.Get(task_index++), TaskItem(ORDERED, 4, false));
+ EXPECT_EQ(static_cast<size_t>(task_index), order.Size());
+}
+
+// There was a bug in the MessagePumpGLib where posting tasks recursively
+// caused the message loop to hang, due to the buffer of the internal pipe
+// becoming full. Test all SingleThreadTaskExecutor types to ensure this issue
+// does not exist in other MessagePumps.
+//
+// On Linux, the pipe buffer size is 64KiB by default. The bug caused one
+// byte accumulated in the pipe per two posts, so we should repeat 128K
+// times to reproduce the bug.
+#if defined(OS_FUCHSIA)
+// TODO(crbug.com/810077): This is flaky on Fuchsia.
+#define MAYBE_RecursivePosts DISABLED_RecursivePosts
+#else
+#define MAYBE_RecursivePosts RecursivePosts
+#endif
+TEST_P(SingleThreadTaskExecutorTypedTest, MAYBE_RecursivePosts) {
+ const int kNumTimes = 1 << 17;
+ SingleThreadTaskExecutor executor(GetParam());
+ executor.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&PostNTasksThenQuit, kNumTimes));
+ RunLoop().Run();
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, NestableTasksAllowedAtTopLevel) {
+ SingleThreadTaskExecutor executor(GetParam());
+ EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+}
+
+// Nestable tasks shouldn't be allowed to run reentrantly by default (regression
+// test for https://crbug.com/754112).
+TEST_P(SingleThreadTaskExecutorTypedTest, NestableTasksDisallowedByDefault) {
+ SingleThreadTaskExecutor executor(GetParam());
+ RunLoop run_loop;
+ executor.task_runner()->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* run_loop) {
+ EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+ run_loop->Quit();
+ },
+ Unretained(&run_loop)));
+ run_loop.Run();
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest,
+ NestableTasksProcessedWhenRunLoopAllows) {
+ SingleThreadTaskExecutor executor(GetParam());
+ RunLoop run_loop;
+ executor.task_runner()->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* run_loop) {
+ // This test would hang if this RunLoop wasn't of type
+ // kNestableTasksAllowed (i.e. this is testing that this is
+ // processed and doesn't hang).
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* nested_run_loop) {
+ // Each additional layer of application task nesting
+ // requires its own allowance. The kNestableTasksAllowed
+ // RunLoop allowed this task to be processed but further
+ // nestable tasks are by default disallowed from this
+ // layer.
+ EXPECT_FALSE(
+ MessageLoopCurrent::Get()->NestableTasksAllowed());
+ nested_run_loop->Quit();
+ },
+ Unretained(&nested_run_loop)));
+ nested_run_loop.Run();
+
+ run_loop->Quit();
+ },
+ Unretained(&run_loop)));
+ run_loop.Run();
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest,
+ NestableTasksAllowedExplicitlyInScope) {
+ SingleThreadTaskExecutor executor(GetParam());
+ RunLoop run_loop;
+ executor.task_runner()->PostTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* run_loop) {
+ {
+ MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop
+ allow_nestable_tasks;
+ EXPECT_TRUE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+ }
+ EXPECT_FALSE(MessageLoopCurrent::Get()->NestableTasksAllowed());
+ run_loop->Quit();
+ },
+ Unretained(&run_loop)));
+ run_loop.Run();
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, IsIdleForTesting) {
+ SingleThreadTaskExecutor executor(GetParam());
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce([]() {}));
+ executor.task_runner()->PostDelayedTask(FROM_HERE, BindOnce([]() {}),
+ TimeDelta::FromMilliseconds(10));
+ EXPECT_FALSE(MessageLoopCurrent::Get()->IsIdleForTesting());
+ RunLoop().RunUntilIdle();
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(20));
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+}
+
+TEST_P(SingleThreadTaskExecutorTypedTest, IsIdleForTestingNonNestableTask) {
+ SingleThreadTaskExecutor executor(GetParam());
+ RunLoop run_loop;
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+ bool nested_task_run = false;
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ RunLoop nested_run_loop(RunLoop::Type::kNestableTasksAllowed);
+
+ executor.task_runner()->PostNonNestableTask(
+ FROM_HERE, BindLambdaForTesting([&]() { nested_task_run = true; }));
+
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() {
+ EXPECT_FALSE(nested_task_run);
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+ }));
+
+ nested_run_loop.RunUntilIdle();
+ EXPECT_FALSE(nested_task_run);
+ EXPECT_FALSE(MessageLoopCurrent::Get()->IsIdleForTesting());
+ }));
+
+ run_loop.RunUntilIdle();
+
+ EXPECT_TRUE(nested_task_run);
+ EXPECT_TRUE(MessageLoopCurrent::Get()->IsIdleForTesting());
+}
+
+INSTANTIATE_TEST_SUITE_P(All,
+ SingleThreadTaskExecutorTypedTest,
+ ::testing::Values(MessagePumpType::DEFAULT,
+ MessagePumpType::UI,
+ MessagePumpType::IO),
+ SingleThreadTaskExecutorTypedTest::ParamInfoToString);
+
+#if defined(OS_WIN)
+
+// Verifies that the SingleThreadTaskExecutor ignores WM_QUIT, rather than
+// quitting. Users of SingleThreadTaskExecutor typically expect to control when
+// their RunLoops stop Run()ning explicitly, via QuitClosure() etc (see
+// https://crbug.com/720078).
+TEST(SingleThreadTaskExecutorTest, WmQuitIsIgnored) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ // Post a WM_QUIT message to the current thread.
+ ::PostQuitMessage(0);
+
+ // Post a task to the current thread, with a small delay to make it less
+ // likely that we process the posted task before looking for WM_* messages.
+ bool task_was_run = false;
+ RunLoop run_loop;
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(
+ [](bool* flag, OnceClosure closure) {
+ *flag = true;
+ std::move(closure).Run();
+ },
+ &task_was_run, run_loop.QuitClosure()),
+ TestTimeouts::tiny_timeout());
+
+ // Run the loop, and ensure that the posted task is processed before we quit.
+ run_loop.Run();
+ EXPECT_TRUE(task_was_run);
+}
+
+TEST(SingleThreadTaskExecutorTest, PostDelayedTask_SharedTimer_SubPump) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ // Test that the interval of the timer, used to run the next delayed task, is
+ // set to a value corresponding to when the next delayed task should run.
+
+ // By setting num_tasks to 1, we ensure that the first task to run causes the
+ // run loop to exit.
+ int num_tasks = 1;
+ TimeTicks run_time;
+
+ RunLoop run_loop;
+
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+
+ // This very delayed task should never run.
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE, BindOnce(&RecordRunTimeFunc, &run_time, &num_tasks),
+ TimeDelta::FromSeconds(1000));
+
+ // This slightly delayed task should run from within SubPumpFunc.
+ executor.task_runner()->PostDelayedTask(FROM_HERE,
+ BindOnce(&::PostQuitMessage, 0),
+ TimeDelta::FromMilliseconds(10));
+
+ Time start_time = Time::Now();
+
+ run_loop.Run();
+ EXPECT_EQ(1, num_tasks);
+
+ // Ensure that we ran in far less time than the slower timer.
+ TimeDelta total_time = Time::Now() - start_time;
+ EXPECT_GT(5000, total_time.InMilliseconds());
+
+ // In case both timers somehow run at nearly the same time, sleep a little
+ // and then run all pending to force them both to have run. This is just
+ // encouraging flakiness if there is any.
+ PlatformThread::Sleep(TimeDelta::FromMilliseconds(100));
+ RunLoop().RunUntilIdle();
+
+ EXPECT_TRUE(run_time.is_null());
+}
+
+namespace {
+
+// When this fires (per the associated WM_TIMER firing), it posts an
+// application task to quit the native loop.
+bool QuitOnSystemTimer(UINT message,
+ WPARAM wparam,
+ LPARAM lparam,
+ LRESULT* result) {
+ if (message == static_cast<UINT>(WM_TIMER)) {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ BindOnce(&::PostQuitMessage, 0));
+ }
+ *result = 0;
+ return true;
+}
+
+// When this fires (per the associated WM_TIMER firing), it posts a delayed
+// application task to quit the native loop.
+bool DelayedQuitOnSystemTimer(UINT message,
+ WPARAM wparam,
+ LPARAM lparam,
+ LRESULT* result) {
+ if (message == static_cast<UINT>(WM_TIMER)) {
+ ThreadTaskRunnerHandle::Get()->PostDelayedTask(
+ FROM_HERE, BindOnce(&::PostQuitMessage, 0),
+ TimeDelta::FromMilliseconds(10));
+ }
+ *result = 0;
+ return true;
+}
+
+} // namespace
+
+// This is a regression test for
+// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125
+// See below for the delayed task version.
+TEST(SingleThreadTaskExecutorTest, PostImmediateTaskFromSystemPump) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ RunLoop run_loop;
+
+ // A native message window to generate a system message which invokes
+ // QuitOnSystemTimer() when the native timer fires.
+ win::MessageWindow local_message_window;
+ local_message_window.Create(BindRepeating(&QuitOnSystemTimer));
+ ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));
+
+ // The first task will enter a native message loop. This test then verifies
+ // that the pump is able to run an immediate application task after the native
+ // pump went idle.
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+
+ // Test success is determined by not hanging in this Run() call.
+ run_loop.Run();
+}
+
+// This is a regression test for
+// https://crrev.com/c/1455266/9/base/message_loop/message_pump_win.cc#125 This
+// is the delayed task equivalent of the above PostImmediateTaskFromSystemPump
+// test.
+//
+// As a reminder of how this works, here's the sequence of events in this test:
+// 1) Test start:
+// work_deduplicator.cc(24): BindToCurrentThread
+// work_deduplicator.cc(34): OnWorkRequested
+// thread_controller_with_message_pump_impl.cc(237) : DoWork
+// work_deduplicator.cc(50): OnWorkStarted
+// 2) SubPumpFunc entered:
+// message_loop_unittest.cc(278): SubPumpFunc
+// 3) ScopedAllowApplicationTasksInNativeNestedLoop triggers nested
+// ScheduleWork: work_deduplicator.cc(34): OnWorkRequested
+// 4) Nested system loop starts and pumps internal kMsgHaveWork:
+// message_loop_unittest.cc(282): SubPumpFunc : Got Message
+// message_pump_win.cc(302): HandleWorkMessage
+// thread_controller_with_message_pump_impl.cc(237) : DoWork
+// 5) Attempt to DoWork(), there's nothing to do, NextWorkInfo indicates delay.
+// work_deduplicator.cc(50): OnWorkStarted
+// work_deduplicator.cc(58): WillCheckForMoreWork
+// work_deduplicator.cc(67): DidCheckForMoreWork
+// 6) Return control to HandleWorkMessage() which schedules native timer
+// and goes to sleep (no kMsgHaveWork in native queue).
+// message_pump_win.cc(328): HandleWorkMessage ScheduleNativeTimer
+// 7) Native timer fires and posts the delayed application task:
+// message_loop_unittest.cc(282): SubPumpFunc : Got Message
+// message_loop_unittest.cc(1581): DelayedQuitOnSystemTimer
+// !! This is the critical step verified by this test. Since the
+// ThreadController is idle after (6), it won't be invoked again and thus
+// won't get a chance to return a NextWorkInfo that indicates the next
+// delay. A native timer is thus required to have SubPumpFunc handle it.
+// work_deduplicator.cc(42): OnDelayedWorkRequested
+// message_pump_win.cc(129): ScheduleDelayedWork
+// 9) The scheduled native timer fires and runs application task binding
+// ::PostQuitMessage :
+// message_loop_unittest.cc(282) SubPumpFunc : Got Message
+// work_deduplicator.cc(50): OnWorkStarted
+// thread_controller_with_message_pump_impl.cc(237) : DoWork
+// 10) SequenceManager updates delay to none and notifies
+// (TODO(scheduler-dev): Could remove this step but WorkDeduplicator knows
+// to ignore at least):
+// work_deduplicator.cc(42): OnDelayedWorkRequested
+// 11) Nested application task completes and SubPumpFunc unwinds:
+// work_deduplicator.cc(58): WillCheckForMoreWork
+// work_deduplicator.cc(67): DidCheckForMoreWork
+// 12) ~ScopedAllowApplicationTasksInNativeNestedLoop() makes sure
+// WorkDeduplicator knows we're back in DoWork() (not relevant in this test
+// but important overall). work_deduplicator.cc(50): OnWorkStarted
+// 13) Application task which ran SubPumpFunc completes and test finishes.
+// work_deduplicator.cc(67): DidCheckForMoreWork
+TEST(SingleThreadTaskExecutorTest, PostDelayedTaskFromSystemPump) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ RunLoop run_loop;
+
+ // A native message window to generate a system message which invokes
+ // DelayedQuitOnSystemTimer() when the native timer fires.
+ win::MessageWindow local_message_window;
+ local_message_window.Create(BindRepeating(&DelayedQuitOnSystemTimer));
+ ASSERT_TRUE(::SetTimer(local_message_window.hwnd(), 0, 20, nullptr));
+
+ // The first task will enter a native message loop. This test then verifies
+ // that the pump is able to run a delayed application task after the native
+ // pump went idle.
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+
+ // Test success is determined by not hanging in this Run() call.
+ run_loop.Run();
+}
+
+TEST(SingleThreadTaskExecutorTest, WmQuitIsVisibleToSubPump) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ // Regression test for https://crbug.com/888559. When processing a
+ // kMsgHaveWork we peek and remove the next message and dispatch that ourself,
+ // to minimize impact of these messages on message-queue processing. If we
+ // received kMsgHaveWork dispatched by a nested pump (e.g. ::GetMessage()
+ // loop) then there is a risk that the next message is that loop's WM_QUIT
+ // message, which must be processed directly by ::GetMessage() for the loop to
+ // actually quit. This test verifies that WM_QUIT exits works as expected even
+ // if it happens to immediately follow a kMsgHaveWork in the queue.
+
+ RunLoop run_loop;
+
+ // This application task will enter the subpump.
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+
+ // This application task will post a native WM_QUIT.
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+
+ // The presence of this application task means that the pump will see a
+ // non-empty queue after processing the previous application task (which
+ // posted the WM_QUIT) and hence will repost a kMsgHaveWork message in the
+ // native event queue. Without the fix to https://crbug.com/888559, this would
+ // previously result in the subpump processing kMsgHaveWork and it stealing
+ // the WM_QUIT message, leaving the test hung in the subpump.
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+
+ // Test success is determined by not hanging in this Run() call.
+ run_loop.Run();
+}
+
+TEST(SingleThreadTaskExecutorTest,
+ RepostingWmQuitDoesntStarveUpcomingNativeLoop) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ // This test ensures that application tasks are being processed by the native
+ // subpump despite the kMsgHaveWork event having already been consumed by the
+ // time the subpump is entered. This is subtly enforced by
+ // MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop which
+ // will ScheduleWork() upon construction (and if it's absent, the
+ // SingleThreadTaskExecutor shouldn't process application tasks so
+ // kMsgHaveWork is irrelevant). Note: This test also fails prior to the fix
+ // for https://crbug.com/888559 (in fact, the last two tasks are sufficient as
+ // a regression test), probably because of a dangling kMsgHaveWork recreating
+ // the effect from
+ // SingleThreadTaskExecutorTest.NativeMsgProcessingDoesntStealWmQuit.
+
+ RunLoop run_loop;
+
+ // This application task will post a native WM_QUIT which will be ignored
+ // by the main message pump.
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+
+ // Make sure the pump does a few extra cycles and processes (ignores) the
+ // WM_QUIT.
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+
+ // This application task will enter the subpump.
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+
+ // Post an application task that will post WM_QUIT to the nested loop. The
+ // test will hang if the subpump doesn't process application tasks as it
+ // should.
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+
+ // Test success is determined by not hanging in this Run() call.
+ run_loop.Run();
+}
+
+// TODO(https://crbug.com/890016): Enable once multiple layers of nested loops
+// works.
+TEST(SingleThreadTaskExecutorTest,
+ DISABLED_UnwindingMultipleSubPumpsDoesntStarveApplicationTasks) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ // Regression test for https://crbug.com/890016.
+ // Tests that the subpump is still processing application tasks after
+ // unwinding from nested subpumps (i.e. that they didn't consume the last
+ // kMsgHaveWork).
+
+ RunLoop run_loop;
+
+ // Enter multiple levels of nested subpumps.
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&SubPumpFunc, run_loop.QuitClosure()));
+ executor.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&SubPumpFunc, DoNothing::Once()));
+ executor.task_runner()->PostTask(FROM_HERE,
+ BindOnce(&SubPumpFunc, DoNothing::Once()));
+
+ // Quit two layers (with tasks in between to allow each quit to be handled
+ // before continuing -- ::PostQuitMessage() sets a bit, it's not a real queued
+ // message :
+ // https://blogs.msdn.microsoft.com/oldnewthing/20051104-33/?p=33453).
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+ executor.task_runner()->PostTask(FROM_HERE, DoNothing());
+
+ bool last_task_ran = false;
+ executor.task_runner()->PostTask(
+ FROM_HERE, BindOnce([](bool* to_set) { *to_set = true; },
+ Unretained(&last_task_ran)));
+
+ executor.task_runner()->PostTask(FROM_HERE, BindOnce(&::PostQuitMessage, 0));
+
+ run_loop.Run();
+
+ EXPECT_TRUE(last_task_ran);
+}
+
+namespace {
+
+// A side effect of this test is the generation a beep. Sorry.
+void RunTest_NestingDenial2(MessagePumpType message_pump_type) {
+ SingleThreadTaskExecutor executor(message_pump_type);
+
+ Thread worker("NestingDenial2_worker");
+ Thread::Options options;
+ options.message_pump_type = message_pump_type;
+ ASSERT_EQ(true, worker.StartWithOptions(options));
+ TaskList order;
+ win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+ worker.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+ event.Get(), true, &order, false));
+ // Let the other thread execute.
+ WaitForSingleObject(event.Get(), INFINITE);
+ RunLoop().Run();
+
+ ASSERT_EQ(17u, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+ EXPECT_EQ(order.Get(3), TaskItem(MESSAGEBOX, 2, false));
+ EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(5), TaskItem(RECURSIVE, 3, false));
+ // When EndDialogFunc is processed, the window is already dismissed, hence no
+ // "end" entry.
+ EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, true));
+ EXPECT_EQ(order.Get(7), TaskItem(QUITMESSAGELOOP, 5, true));
+ EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, false));
+ EXPECT_EQ(order.Get(9), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, false));
+}
+
+} // namespace
+
+// This test occasionally hangs, would need to be turned into an
+// interactive_ui_test, see http://crbug.com/44567.
+TEST(SingleThreadTaskExecutorTest, DISABLED_NestingDenial2) {
+ RunTest_NestingDenial2(MessagePumpType::DEFAULT);
+ RunTest_NestingDenial2(MessagePumpType::UI);
+ RunTest_NestingDenial2(MessagePumpType::IO);
+}
+
+// A side effect of this test is the generation a beep. Sorry. This test also
+// needs to process windows messages on the current thread.
+TEST(SingleThreadTaskExecutorTest, NestingSupport2) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+
+ Thread worker("NestingSupport2_worker");
+ Thread::Options options;
+ options.message_pump_type = MessagePumpType::UI;
+ ASSERT_EQ(true, worker.StartWithOptions(options));
+ TaskList order;
+ win::ScopedHandle event(CreateEvent(NULL, FALSE, FALSE, NULL));
+ worker.task_runner()->PostTask(
+ FROM_HERE, BindOnce(&RecursiveFuncWin, ThreadTaskRunnerHandle::Get(),
+ event.Get(), false, &order, true));
+ // Let the other thread execute.
+ WaitForSingleObject(event.Get(), INFINITE);
+ RunLoop().Run();
+
+ ASSERT_EQ(18u, order.Size());
+ EXPECT_EQ(order.Get(0), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(1), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(2), TaskItem(MESSAGEBOX, 2, true));
+ // Note that this executes in the MessageBox modal loop.
+ EXPECT_EQ(order.Get(3), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(4), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(5), TaskItem(ENDDIALOG, 4, true));
+ EXPECT_EQ(order.Get(6), TaskItem(ENDDIALOG, 4, false));
+ EXPECT_EQ(order.Get(7), TaskItem(MESSAGEBOX, 2, false));
+ /* The order can subtly change here. The reason is that when RecursiveFunc(1)
+ is called in the main thread, if it is faster than getting to the
+ PostTask(FROM_HERE, BindOnce(&QuitFunc) execution, the order of task
+ execution can change. We don't care anyway that the order isn't correct.
+ EXPECT_EQ(order.Get(8), TaskItem(QUITMESSAGELOOP, 5, true));
+ EXPECT_EQ(order.Get(9), TaskItem(QUITMESSAGELOOP, 5, false));
+ EXPECT_EQ(order.Get(10), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(11), TaskItem(RECURSIVE, 1, false));
+ */
+ EXPECT_EQ(order.Get(12), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(13), TaskItem(RECURSIVE, 3, false));
+ EXPECT_EQ(order.Get(14), TaskItem(RECURSIVE, 1, true));
+ EXPECT_EQ(order.Get(15), TaskItem(RECURSIVE, 1, false));
+ EXPECT_EQ(order.Get(16), TaskItem(RECURSIVE, 3, true));
+ EXPECT_EQ(order.Get(17), TaskItem(RECURSIVE, 3, false));
+}
+
+#endif // defined(OS_WIN)
+
+#if defined(OS_WIN)
+TEST(SingleThreadTaskExecutorTest, IOHandler) {
+ RunTest_IOHandler();
+}
+
+TEST(SingleThreadTaskExecutorTest, WaitForIO) {
+ RunTest_WaitForIO();
+}
+
+TEST(SingleThreadTaskExecutorTest, HighResolutionTimer) {
+ SingleThreadTaskExecutor executor;
+ Time::EnableHighResolutionTimer(true);
+
+ constexpr TimeDelta kFastTimer = TimeDelta::FromMilliseconds(5);
+ constexpr TimeDelta kSlowTimer = TimeDelta::FromMilliseconds(100);
+
+ {
+ // Post a fast task to enable the high resolution timers.
+ RunLoop run_loop;
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* run_loop) {
+ EXPECT_TRUE(Time::IsHighResolutionTimerInUse());
+ run_loop->QuitWhenIdle();
+ },
+ &run_loop),
+ kFastTimer);
+ run_loop.Run();
+ }
+ EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
+ {
+ // Check that a slow task does not trigger the high resolution logic.
+ RunLoop run_loop;
+ executor.task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(
+ [](RunLoop* run_loop) {
+ EXPECT_FALSE(Time::IsHighResolutionTimerInUse());
+ run_loop->QuitWhenIdle();
+ },
+ &run_loop),
+ kSlowTimer);
+ run_loop.Run();
+ }
+ Time::EnableHighResolutionTimer(false);
+ Time::ResetHighResolutionTimerUsage();
+}
+
+#endif // defined(OS_WIN)
+
+namespace {
+// Inject a test point for recording the destructor calls for Closure objects
+// send to MessageLoop::PostTask(). It is awkward usage since we are trying to
+// hook the actual destruction, which is not a common operation.
+class DestructionObserverProbe : public RefCounted<DestructionObserverProbe> {
+ public:
+ DestructionObserverProbe(bool* task_destroyed,
+ bool* destruction_observer_called)
+ : task_destroyed_(task_destroyed),
+ destruction_observer_called_(destruction_observer_called) {}
+ virtual void Run() {
+ // This task should never run.
+ ADD_FAILURE();
+ }
+
+ private:
+ friend class RefCounted<DestructionObserverProbe>;
+
+ virtual ~DestructionObserverProbe() {
+ EXPECT_FALSE(*destruction_observer_called_);
+ *task_destroyed_ = true;
+ }
+
+ bool* task_destroyed_;
+ bool* destruction_observer_called_;
+};
+
+class MLDestructionObserver : public MessageLoopCurrent::DestructionObserver {
+ public:
+ MLDestructionObserver(bool* task_destroyed, bool* destruction_observer_called)
+ : task_destroyed_(task_destroyed),
+ destruction_observer_called_(destruction_observer_called),
+ task_destroyed_before_message_loop_(false) {}
+ void WillDestroyCurrentMessageLoop() override {
+ task_destroyed_before_message_loop_ = *task_destroyed_;
+ *destruction_observer_called_ = true;
+ }
+ bool task_destroyed_before_message_loop() const {
+ return task_destroyed_before_message_loop_;
+ }
+
+ private:
+ bool* task_destroyed_;
+ bool* destruction_observer_called_;
+ bool task_destroyed_before_message_loop_;
+};
+
+} // namespace
+
+TEST(SingleThreadTaskExecutorTest, DestructionObserverTest) {
+ // Verify that the destruction observer gets called at the very end (after
+ // all the pending tasks have been destroyed).
+ auto executor = std::make_unique<SingleThreadTaskExecutor>();
+ const TimeDelta kDelay = TimeDelta::FromMilliseconds(100);
+
+ bool task_destroyed = false;
+ bool destruction_observer_called = false;
+
+ MLDestructionObserver observer(&task_destroyed, &destruction_observer_called);
+ MessageLoopCurrent::Get()->AddDestructionObserver(&observer);
+ executor->task_runner()->PostDelayedTask(
+ FROM_HERE,
+ BindOnce(&DestructionObserverProbe::Run,
+ base::MakeRefCounted<DestructionObserverProbe>(
+ &task_destroyed, &destruction_observer_called)),
+ kDelay);
+ executor.reset();
+ EXPECT_TRUE(observer.task_destroyed_before_message_loop());
+ // The task should have been destroyed when we deleted the loop.
+ EXPECT_TRUE(task_destroyed);
+ EXPECT_TRUE(destruction_observer_called);
+}
+
+// Verify that SingleThreadTaskExecutor sets ThreadMainTaskRunner::current() and
+// it posts tasks on that message loop.
+TEST(SingleThreadTaskExecutorTest, ThreadMainTaskRunner) {
+ SingleThreadTaskExecutor executor;
+
+ scoped_refptr<Foo> foo(new Foo());
+ std::string a("a");
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&Foo::Test1ConstRef, foo, a));
+
+ // Post quit task;
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce(&RunLoop::QuitCurrentWhenIdleDeprecated));
+
+ // Now kick things off
+ RunLoop().Run();
+
+ EXPECT_EQ(foo->test_count(), 1);
+ EXPECT_EQ(foo->result(), "a");
+}
+
+TEST(SingleThreadTaskExecutorTest, type) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+ EXPECT_EQ(executor.type(), MessagePumpType::UI);
+}
+
+#if defined(OS_WIN)
+void EmptyFunction() {}
+
+void PostMultipleTasks() {
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::BindOnce(&EmptyFunction));
+ ThreadTaskRunnerHandle::Get()->PostTask(FROM_HERE,
+ base::BindOnce(&EmptyFunction));
+}
+
+static const int kSignalMsg = WM_USER + 2;
+
+void PostWindowsMessage(HWND message_hwnd) {
+ PostMessage(message_hwnd, kSignalMsg, 0, 2);
+}
+
+void EndTest(bool* did_run, HWND hwnd) {
+ *did_run = true;
+ PostMessage(hwnd, WM_CLOSE, 0, 0);
+}
+
+int kMyMessageFilterCode = 0x5002;
+
+LRESULT CALLBACK TestWndProcThunk(HWND hwnd,
+ UINT message,
+ WPARAM wparam,
+ LPARAM lparam) {
+ if (message == WM_CLOSE)
+ EXPECT_TRUE(DestroyWindow(hwnd));
+ if (message != kSignalMsg)
+ return DefWindowProc(hwnd, message, wparam, lparam);
+
+ switch (lparam) {
+ case 1:
+ // First, we post a task that will post multiple no-op tasks to make sure
+ // that the pump's incoming task queue does not become empty during the
+ // test.
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&PostMultipleTasks));
+ // Next, we post a task that posts a windows message to trigger the second
+ // stage of the test.
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&PostWindowsMessage, hwnd));
+ break;
+ case 2:
+ // Since we're about to enter a modal loop, tell the message loop that we
+ // intend to nest tasks.
+ MessageLoopCurrent::ScopedAllowApplicationTasksInNativeNestedLoop
+ allow_nestable_tasks;
+ bool did_run = false;
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, base::BindOnce(&EndTest, &did_run, hwnd));
+ // Run a nested windows-style message loop and verify that our task runs.
+ // If it doesn't, then we'll loop here until the test times out.
+ MSG msg;
+ while (GetMessage(&msg, 0, 0, 0)) {
+ if (!CallMsgFilter(&msg, kMyMessageFilterCode))
+ DispatchMessage(&msg);
+ // If this message is a WM_CLOSE, explicitly exit the modal loop.
+ // Posting a WM_QUIT should handle this, but unfortunately
+ // MessagePumpWin eats WM_QUIT messages even when running inside a modal
+ // loop.
+ if (msg.message == WM_CLOSE)
+ break;
+ }
+ EXPECT_TRUE(did_run);
+ RunLoop::QuitCurrentWhenIdleDeprecated();
+ break;
+ }
+ return 0;
+}
+
+TEST(SingleThreadTaskExecutorTest, AlwaysHaveUserMessageWhenNesting) {
+ SingleThreadTaskExecutor executor(MessagePumpType::UI);
+ HINSTANCE instance = CURRENT_MODULE();
+ WNDCLASSEX wc = {0};
+ wc.cbSize = sizeof(wc);
+ wc.lpfnWndProc = TestWndProcThunk;
+ wc.hInstance = instance;
+ wc.lpszClassName = L"SingleThreadTaskExecutorTest_HWND";
+ ATOM atom = RegisterClassEx(&wc);
+ ASSERT_TRUE(atom);
+
+ HWND message_hwnd = CreateWindow(MAKEINTATOM(atom), 0, 0, 0, 0, 0, 0,
+ HWND_MESSAGE, 0, instance, 0);
+ ASSERT_TRUE(message_hwnd) << GetLastError();
+
+ ASSERT_TRUE(PostMessage(message_hwnd, kSignalMsg, 0, 1));
+
+ RunLoop().Run();
+
+ ASSERT_TRUE(UnregisterClass(MAKEINTATOM(atom), instance));
+}
+#endif // defined(OS_WIN)
+
+// Verify that tasks posted to and code running in the scope of the same
+// SingleThreadTaskExecutor access the same SequenceLocalStorage values.
+TEST(SingleThreadTaskExecutorTest, SequenceLocalStorageSetGet) {
+ SingleThreadTaskExecutor executor;
+
+ SequenceLocalStorageSlot<int> slot;
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { slot.emplace(11); }));
+
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { EXPECT_EQ(*slot, 11); }));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(*slot, 11);
+}
+
+// Verify that tasks posted to and code running in different MessageLoops access
+// different SequenceLocalStorage values.
+TEST(SingleThreadTaskExecutorTest, SequenceLocalStorageDifferentMessageLoops) {
+ SequenceLocalStorageSlot<int> slot;
+
+ {
+ SingleThreadTaskExecutor executor;
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { slot.emplace(11); }));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_EQ(*slot, 11);
+ }
+
+ SingleThreadTaskExecutor executor;
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindLambdaForTesting([&]() { EXPECT_FALSE(slot); }));
+
+ RunLoop().RunUntilIdle();
+ EXPECT_NE(slot.GetOrCreateValue(), 11);
+}
+
+namespace {
+
+class PostTaskOnDestroy {
+ public:
+ PostTaskOnDestroy(int times) : times_remaining_(times) {}
+ ~PostTaskOnDestroy() { PostTaskWithPostingDestructor(times_remaining_); }
+
+ // Post a task that will repost itself on destruction |times| times.
+ static void PostTaskWithPostingDestructor(int times) {
+ if (times > 0) {
+ ThreadTaskRunnerHandle::Get()->PostTask(
+ FROM_HERE, BindOnce([](std::unique_ptr<PostTaskOnDestroy>) {},
+ std::make_unique<PostTaskOnDestroy>(times - 1)));
+ }
+ }
+
+ private:
+ const int times_remaining_;
+
+ DISALLOW_COPY_AND_ASSIGN(PostTaskOnDestroy);
+};
+
+} // namespace
+
+// Test that SingleThreadTaskExecutor destruction handles a task's destructor
+// posting another task.
+TEST(SingleThreadTaskExecutorDestructionTest,
+ DestroysFineWithPostTaskOnDestroy) {
+ SingleThreadTaskExecutor executor;
+
+ PostTaskOnDestroy::PostTaskWithPostingDestructor(10);
+}
+
} // namespace base
diff --git a/chromium/base/task/task_traits.h b/chromium/base/task/task_traits.h
index ac2e3a89a2d..67b8cc23ae6 100644
--- a/chromium/base/task/task_traits.h
+++ b/chromium/base/task/task_traits.h
@@ -13,7 +13,7 @@
#include <utility>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/task/task_traits_extension.h"
#include "base/traits_bag.h"
#include "build/build_config.h"
diff --git a/chromium/base/task/thread_pool/job_task_source.cc b/chromium/base/task/thread_pool/job_task_source.cc
index c86e3e0f118..5ff698c7b78 100644
--- a/chromium/base/task/thread_pool/job_task_source.cc
+++ b/chromium/base/task/thread_pool/job_task_source.cc
@@ -117,9 +117,7 @@ JobTaskSource::JoinFlag::JoinFlag() = default;
JobTaskSource::JoinFlag::~JoinFlag() = default;
void JobTaskSource::JoinFlag::SetWaiting() {
- const auto previous_value =
- value_.exchange(kWaitingForWorkerToYield, std::memory_order_relaxed);
- DCHECK(previous_value == kNotWaiting);
+ value_.store(kWaitingForWorkerToYield, std::memory_order_relaxed);
}
bool JobTaskSource::JoinFlag::ShouldWorkerYield() {
@@ -215,6 +213,7 @@ void JobTaskSource::Cancel(TaskSource::Transaction* transaction) {
bool JobTaskSource::WaitForParticipationOpportunity() {
CheckedAutoLock auto_lock(lock_);
+ DCHECK(!join_flag_.IsWaiting());
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with |state_| outside of |lock_|.
diff --git a/chromium/base/task/thread_pool/job_task_source.h b/chromium/base/task/thread_pool/job_task_source.h
index e7e578db590..b043f3269a1 100644
--- a/chromium/base/task/thread_pool/job_task_source.h
+++ b/chromium/base/task/thread_pool/job_task_source.h
@@ -150,6 +150,12 @@ class BASE_EXPORT JobTaskSource : public TaskSource {
JoinFlag();
~JoinFlag();
+ // Returns true if the status is not kNotWaiting, using
+ // std::memory_order_relaxed.
+ bool IsWaiting() {
+ return value_.load(std::memory_order_relaxed) != kNotWaiting;
+ }
+
// Sets the status as kWaitingForWorkerToYield using
// std::memory_order_relaxed.
void SetWaiting();
diff --git a/chromium/base/task/thread_pool/service_thread_unittest.cc b/chromium/base/task/thread_pool/service_thread_unittest.cc
index 7b3d3c21031..d5b536a684f 100644
--- a/chromium/base/task/thread_pool/service_thread_unittest.cc
+++ b/chromium/base/task/thread_pool/service_thread_unittest.cc
@@ -8,6 +8,7 @@
#include "base/bind.h"
#include "base/debug/stack_trace.h"
+#include "base/logging.h"
#include "base/task/thread_pool/thread_pool_impl.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/test/metrics/histogram_tester.h"
diff --git a/chromium/base/task/thread_pool/task_tracker.cc b/chromium/base/task/thread_pool/task_tracker.cc
index 1b02bf0f75a..06311fc3c96 100644
--- a/chromium/base/task/thread_pool/task_tracker.cc
+++ b/chromium/base/task/thread_pool/task_tracker.cc
@@ -13,10 +13,12 @@
#include "base/command_line.h"
#include "base/compiler_specific.h"
#include "base/json/json_writer.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/optional.h"
#include "base/sequence_token.h"
+#include "base/strings/string_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/task/task_executor.h"
@@ -25,7 +27,7 @@
#include "base/threading/thread_restrictions.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "base/values.h"
#include "build/build_config.h"
diff --git a/chromium/base/task/thread_pool/task_tracker.h b/chromium/base/task/thread_pool/task_tracker.h
index ab19ad3bb03..eebd7adde42 100644
--- a/chromium/base/task/thread_pool/task_tracker.h
+++ b/chromium/base/task/thread_pool/task_tracker.h
@@ -14,7 +14,6 @@
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/callback_forward.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
#include "base/sequence_checker.h"
diff --git a/chromium/base/task/thread_pool/task_tracker_posix.h b/chromium/base/task/thread_pool/task_tracker_posix.h
index 8f59d5368f3..c507004932b 100644
--- a/chromium/base/task/thread_pool/task_tracker_posix.h
+++ b/chromium/base/task/thread_pool/task_tracker_posix.h
@@ -8,7 +8,6 @@
#include <memory>
#include "base/base_export.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/message_loop/message_pump_type.h"
#include "base/task/thread_pool/task_tracker.h"
diff --git a/chromium/base/task/thread_pool/thread_group_impl.h b/chromium/base/task/thread_pool/thread_group_impl.h
index 26f7da99dc3..3431be2f298 100644
--- a/chromium/base/task/thread_pool/thread_group_impl.h
+++ b/chromium/base/task/thread_pool/thread_group_impl.h
@@ -12,10 +12,10 @@
#include <vector>
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/containers/stack.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
diff --git a/chromium/base/task/thread_pool/thread_pool_impl.h b/chromium/base/task/thread_pool/thread_pool_impl.h
index 57a23fd0e0e..b7b4ac73f67 100644
--- a/chromium/base/task/thread_pool/thread_pool_impl.h
+++ b/chromium/base/task/thread_pool/thread_pool_impl.h
@@ -10,7 +10,7 @@
#include "base/base_export.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
diff --git a/chromium/base/task/thread_pool/tracked_ref.h b/chromium/base/task/thread_pool/tracked_ref.h
index 3b398f1a63e..b36e8e71430 100644
--- a/chromium/base/task/thread_pool/tracked_ref.h
+++ b/chromium/base/task/thread_pool/tracked_ref.h
@@ -8,8 +8,8 @@
#include <memory>
#include "base/atomic_ref_count.h"
+#include "base/check.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/synchronization/waitable_event.h"
diff --git a/chromium/base/task/thread_pool/worker_thread.cc b/chromium/base/task/thread_pool/worker_thread.cc
index 79e5105b933..429838cf7df 100644
--- a/chromium/base/task/thread_pool/worker_thread.cc
+++ b/chromium/base/task/thread_pool/worker_thread.cc
@@ -16,7 +16,7 @@
#include "base/task/thread_pool/worker_thread_observer.h"
#include "base/threading/hang_watcher.h"
#include "base/time/time_override.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#if defined(OS_MACOSX)
#include "base/mac/scoped_nsautorelease_pool.h"
@@ -293,9 +293,8 @@ NOINLINE void WorkerThread::RunBackgroundDedicatedCOMWorker() {
void WorkerThread::RunWorker() {
DCHECK_EQ(self_, this);
- TRACE_EVENT_INSTANT0("thread_pool", "WorkerThreadThread born",
- TRACE_EVENT_SCOPE_THREAD);
- TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
+ TRACE_EVENT_INSTANT0("base", "WorkerThread born", TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_BEGIN0("base", "WorkerThread active");
if (worker_thread_observer_)
worker_thread_observer_->OnWorkerThreadMainEntry();
@@ -317,9 +316,9 @@ void WorkerThread::RunWorker() {
// A WorkerThread starts out waiting for work.
{
- TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
+ TRACE_EVENT_END0("base", "WorkerThread active");
delegate_->WaitForWork(&wake_up_event_);
- TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
+ TRACE_EVENT_BEGIN0("base", "WorkerThread active");
}
while (!ShouldExit()) {
@@ -339,10 +338,10 @@ void WorkerThread::RunWorker() {
if (ShouldExit())
break;
- TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
+ TRACE_EVENT_END0("base", "WorkerThread active");
hang_watch_scope.reset();
delegate_->WaitForWork(&wake_up_event_);
- TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
+ TRACE_EVENT_BEGIN0("base", "WorkerThread active");
continue;
}
@@ -370,9 +369,8 @@ void WorkerThread::RunWorker() {
// and as such no more member accesses should be made after this point.
self_ = nullptr;
- TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
- TRACE_EVENT_INSTANT0("thread_pool", "WorkerThreadThread dead",
- TRACE_EVENT_SCOPE_THREAD);
+ TRACE_EVENT_END0("base", "WorkerThread active");
+ TRACE_EVENT_INSTANT0("base", "WorkerThread dead", TRACE_EVENT_SCOPE_THREAD);
}
} // namespace internal
diff --git a/chromium/base/task_runner.h b/chromium/base/task_runner.h
index b1556d0ede3..d1fdc4b1fd7 100644
--- a/chromium/base/task_runner.h
+++ b/chromium/base/task_runner.h
@@ -10,8 +10,8 @@
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
+#include "base/check.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/memory/ref_counted.h"
#include "base/post_task_and_reply_with_result_internal.h"
#include "base/time/time.h"
diff --git a/chromium/base/task_runner_util.h b/chromium/base/task_runner_util.h
index 79e86a3f60f..cb96b28f2c0 100644
--- a/chromium/base/task_runner_util.h
+++ b/chromium/base/task_runner_util.h
@@ -10,7 +10,7 @@
#include "base/bind.h"
#include "base/callback.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/post_task_and_reply_with_result_internal.h"
#include "base/task_runner.h"
diff --git a/chromium/base/test/BUILD.gn b/chromium/base/test/BUILD.gn
index 86b11128e51..4d8906aa2dd 100644
--- a/chromium/base/test/BUILD.gn
+++ b/chromium/base/test/BUILD.gn
@@ -5,6 +5,7 @@
import("//build/compiled_action.gni")
import("//build/config/nacl/config.gni")
import("//build/config/ui.gni")
+import("//build_overrides/build.gni")
# Reset sources_assignment_filter for the BUILD.gn file to prevent
# regression during the migration of Chromium away from the feature.
@@ -46,13 +47,14 @@ static_library("test_support") {
"../task/sequence_manager/test/test_task_time_observer.h",
"../timer/mock_timer.cc",
"../timer/mock_timer.h",
- "../trace_event/trace_config_memory_test_util.h",
"bind_test_util.cc",
"bind_test_util.h",
"copy_only_int.cc",
"copy_only_int.h",
"gmock_callback_support.h",
"gmock_move_support.h",
+ "gtest_links.cc",
+ "gtest_links.h",
"gtest_util.cc",
"gtest_util.h",
"gtest_xml_unittest_result_printer.cc",
@@ -138,10 +140,6 @@ static_library("test_support") {
"test_waitable_event.h",
"thread_test_helper.cc",
"thread_test_helper.h",
- "trace_event_analyzer.cc",
- "trace_event_analyzer.h",
- "trace_to_file.cc",
- "trace_to_file.h",
"values_test_util.cc",
"values_test_util.h",
"with_feature_override.cc",
@@ -281,8 +279,6 @@ static_library("test_support") {
"test_file_util_posix.cc",
"test_suite.cc",
"test_suite.h",
- "trace_to_file.cc",
- "trace_to_file.h",
]
public_deps -= [ "//base:i18n" ]
deps -= [
@@ -303,6 +299,20 @@ static_library("test_support") {
"multiprocess_test.cc",
]
}
+
+ if (enable_base_tracing) {
+ sources += [
+ "../trace_event/trace_config_memory_test_util.h",
+ "trace_event_analyzer.cc",
+ "trace_event_analyzer.h",
+ ]
+ if (!is_nacl_nonsfi) {
+ sources += [
+ "trace_to_file.cc",
+ "trace_to_file.h",
+ ]
+ }
+ }
}
config("base_test_implementation") {
@@ -429,10 +439,7 @@ if (is_linux) {
# be listed in deps, not data_deps (https://crbug.com/919422).
deps = [ "//third_party/test_fonts" ]
args = []
- outputs = [
- "$root_out_dir/fontconfig_caches/fb5c91b2895aa445d23aebf7f9e2189c-le64.cache-7",
- "$root_out_dir/test_fonts/.uuid",
- ]
+ outputs = [ "$root_out_dir/fontconfig_caches/fb5c91b2895aa445d23aebf7f9e2189c-le64.cache-7" ]
}
}
}
diff --git a/chromium/base/test/OWNERS b/chromium/base/test/OWNERS
index 08d2b4c340d..11bba8aa90f 100644
--- a/chromium/base/test/OWNERS
+++ b/chromium/base/test/OWNERS
@@ -11,5 +11,9 @@ per-file *_win*=file://base/win/OWNERS
per-file *android*=file://base/test/android/OWNERS
per-file BUILD.gn=file://base/test/android/OWNERS
+# For iOS-specific changes:
+per-file *ios*=file://base/test/ios/OWNERS
+per-file BUILD.gn=file://base/test/ios/OWNERS
+
# Linux fontconfig changes
per-file *fontconfig*=file://base/nix/OWNERS
diff --git a/chromium/base/test/generate_fontconfig_caches.cc b/chromium/base/test/generate_fontconfig_caches.cc
index cd01d551ef0..84eac2b5c1c 100644
--- a/chromium/base/test/generate_fontconfig_caches.cc
+++ b/chromium/base/test/generate_fontconfig_caches.cc
@@ -25,16 +25,14 @@
// determinism. We have no way of guaranteeing that this produces correct
// results, or even has the intended effect.
int main() {
- // fontconfig generates a random uuid and uses it to match font folders with
- // the font cache. Rather than letting fontconfig generate a random uuid,
- // which introduces build non-determinism, we place a fixed uuid in the font
- // folder, which fontconfig will use to generate the cache.
base::FilePath dir_module;
base::PathService::Get(base::DIR_MODULE, &dir_module);
- base::FilePath uuid_file_path =
- dir_module.Append("test_fonts").Append(".uuid");
- const char uuid[] = "fb5c91b2895aa445d23aebf7f9e2189c";
- WriteFile(uuid_file_path, uuid);
+
+ // This is the MD5 hash of "/test_fonts", which is used as the key of the
+ // fontconfig cache.
+ // $ echo -n /test_fonts | md5sum
+ // fb5c91b2895aa445d23aebf7f9e2189c -
+ static const char kCacheKey[] = "fb5c91b2895aa445d23aebf7f9e2189c";
// fontconfig writes the mtime of the test_fonts directory into the cache. It
// presumably checks this later to ensure that the cache is still up to date.
@@ -61,6 +59,6 @@ int main() {
// Check existence of intended fontconfig cache file.
CHECK(base::PathExists(
- fontconfig_caches.Append(base::StrCat({uuid, "-le64.cache-7"}))));
+ fontconfig_caches.Append(base::StrCat({kCacheKey, "-le64.cache-7"}))));
return 0;
}
diff --git a/chromium/base/test/gtest_links.cc b/chromium/base/test/gtest_links.cc
new file mode 100644
index 00000000000..2174f464744
--- /dev/null
+++ b/chromium/base/test/gtest_links.cc
@@ -0,0 +1,44 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_links.h"
+
+#include "base/check.h"
+#include "base/strings/string_util.h"
+#include "base/test/gtest_xml_unittest_result_printer.h"
+
+namespace base {
+namespace {
+
+bool IsValidUrl(const std::string& url) {
+ // https://www.ietf.org/rfc/rfc3986.txt
+ std::set<char> valid_characters{'-', '.', '_', '~', ':', '/', '?', '#',
+ '[', ']', '@', '!', '$', '&', '\'', '(',
+ ')', '*', '+', ',', ';', '%', '='};
+ for (const char& c : url) {
+ if (!((c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9') ||
+ valid_characters.find(c) != valid_characters.end()))
+ return false;
+ }
+ return true;
+}
+
+bool IsValidName(const std::string& name) {
+ for (const char& c : name) {
+ if (!(IsAsciiAlpha(c) || IsAsciiDigit(c) || c == '/' || c == '_'))
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+void AddLinkToTestResult(const std::string& name, const std::string& url) {
+ DCHECK(IsValidName(name)) << name << " is not a valid name";
+ DCHECK(IsValidUrl(url)) << url << " is not a valid link";
+ XmlUnitTestResultPrinter::Get()->AddLink(name, url);
+}
+
+} // namespace base
diff --git a/chromium/base/test/gtest_links.h b/chromium/base/test/gtest_links.h
new file mode 100644
index 00000000000..bece31ea023
--- /dev/null
+++ b/chromium/base/test/gtest_links.h
@@ -0,0 +1,28 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TEST_GTEST_LINKS_H_
+#define BASE_TEST_GTEST_LINKS_H_
+
+#include <string>
+
+namespace base {
+
+// Add a link in the gtest xml output.
+// Only call this from a gtest test body with the same thread as the test.
+// Only works on desktop.
+// A test can call this function when the test generates a link and save it
+// as part of the test result.
+// Example: AddLinkToTestResult("image_link",
+// "https://example_googlestorage/test.png") can mean a test generates an image
+// with the url.
+// |name| is the link name. It should be unique in one test case. Name will
+// be displayed on test result page(Milo). |name| should only contains
+// ascii-letters, ascii-digits, '/' and '_'.
+// |url| the actual url.
+void AddLinkToTestResult(const std::string& name, const std::string& url);
+
+} // namespace base
+
+#endif // BASE_TEST_GTEST_LINKS_H_
diff --git a/chromium/base/test/gtest_links_unittest.cc b/chromium/base/test/gtest_links_unittest.cc
new file mode 100644
index 00000000000..1810ae8de6a
--- /dev/null
+++ b/chromium/base/test/gtest_links_unittest.cc
@@ -0,0 +1,24 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_links.h"
+
+#include "base/test/gtest_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(GtestLinksTest, AddInvalidLink) {
+ EXPECT_DCHECK_DEATH(AddLinkToTestResult("unique_link", "invalid`"));
+}
+
+TEST(GtestLinksTest, AddInvalidName) {
+ EXPECT_DCHECK_DEATH(AddLinkToTestResult("invalid-name", "http://google.com"));
+}
+
+TEST(GtestLinksTest, AddValidLink) {
+ AddLinkToTestResult("name", "http://google.com");
+}
+
+} // namespace base
diff --git a/chromium/base/test/gtest_util.h b/chromium/base/test/gtest_util.h
index 1db1fae1e2d..443fa1ad829 100644
--- a/chromium/base/test/gtest_util.h
+++ b/chromium/base/test/gtest_util.h
@@ -9,8 +9,8 @@
#include <utility>
#include <vector>
+#include "base/check_op.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "build/build_config.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -27,15 +27,8 @@
// is part of the error message), but intentionally do not expose the gtest
// death test's full |regex| parameter to avoid users having to verify the exact
// syntax of the error message produced by the DCHECK.
-
-// Official builds will eat stream parameters, so don't check the error message.
-#if defined(OFFICIAL_BUILD) && defined(NDEBUG)
-#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "")
-#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "")
-#else
#define EXPECT_DCHECK_DEATH(statement) EXPECT_DEATH(statement, "Check failed")
#define ASSERT_DCHECK_DEATH(statement) ASSERT_DEATH(statement, "Check failed")
-#endif // defined(OFFICIAL_BUILD) && defined(NDEBUG)
#else
// DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
diff --git a/chromium/base/test/gtest_xml_unittest_result_printer.cc b/chromium/base/test/gtest_xml_unittest_result_printer.cc
index 709450b5329..c7f29ce701d 100644
--- a/chromium/base/test/gtest_xml_unittest_result_printer.cc
+++ b/chromium/base/test/gtest_xml_unittest_result_printer.cc
@@ -8,7 +8,9 @@
#include "base/check.h"
#include "base/command_line.h"
#include "base/files/file_util.h"
+#include "base/strings/string_util.h"
#include "base/test/test_switches.h"
+#include "base/threading/thread_checker.h"
#include "base/time/time.h"
namespace base {
@@ -19,12 +21,29 @@ const int kDefaultTestPartResultsLimit = 10;
const char kTestPartLesultsLimitExceeded[] =
"Test part results limit exceeded. Use --test-launcher-test-part-limit to "
"increase or disable limit.";
+
+std::string EscapeUrl(const std::string& url) {
+ std::string escaped_url;
+ ReplaceChars(url, "&", "&amp;", &escaped_url);
+ ReplaceChars(escaped_url, "<", "&lt;", &escaped_url);
+ ReplaceChars(escaped_url, ">", "&gt;", &escaped_url);
+ ReplaceChars(escaped_url, "'", "&apos;", &escaped_url);
+ ReplaceChars(escaped_url, "\"", "&quot;", &escaped_url);
+ return escaped_url;
+}
+
} // namespace
-XmlUnitTestResultPrinter::XmlUnitTestResultPrinter()
- : output_file_(nullptr), open_failed_(false) {}
+XmlUnitTestResultPrinter* XmlUnitTestResultPrinter::instance_ = nullptr;
+
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter() {
+ DCHECK_EQ(instance_, nullptr);
+ instance_ = this;
+}
XmlUnitTestResultPrinter::~XmlUnitTestResultPrinter() {
+ DCHECK_EQ(instance_, this);
+ instance_ = nullptr;
if (output_file_ && !open_failed_) {
fprintf(output_file_, "</testsuites>\n");
fflush(output_file_);
@@ -32,6 +51,34 @@ XmlUnitTestResultPrinter::~XmlUnitTestResultPrinter() {
}
}
+XmlUnitTestResultPrinter* XmlUnitTestResultPrinter::Get() {
+ DCHECK(instance_);
+ DCHECK(instance_->thread_checker_.CalledOnValidThread());
+ return instance_;
+}
+
+void XmlUnitTestResultPrinter::AddLink(const std::string& name,
+ const std::string& url) {
+ DCHECK(output_file_);
+ DCHECK(!open_failed_);
+ // Escape the url so it's safe to save in xml file.
+ const std::string escaped_url = EscapeUrl(url);
+ const testing::TestInfo* info =
+ testing::UnitTest::GetInstance()->current_test_info();
+ // When this function is not called from a gtest test body, it will
+ // return null. E.g. call from Chromium itself or from test launcher.
+ // But when that happens, the previous two DCHECK won't pass. So in
+ // theory it should not be possible to reach here and the info is null.
+ DCHECK(info);
+
+ fprintf(output_file_,
+ " <link name=\"%s\" classname=\"%s\" "
+ "link_name=\"%s\">%s</link>\n",
+ info->name(), info->test_case_name(), name.c_str(),
+ escaped_url.c_str());
+ fflush(output_file_);
+}
+
bool XmlUnitTestResultPrinter::Initialize(const FilePath& output_file_path) {
DCHECK(!output_file_);
output_file_ = OpenFile(output_file_path, "w");
diff --git a/chromium/base/test/gtest_xml_unittest_result_printer.h b/chromium/base/test/gtest_xml_unittest_result_printer.h
index 93403822cfa..39d7d6d4339 100644
--- a/chromium/base/test/gtest_xml_unittest_result_printer.h
+++ b/chromium/base/test/gtest_xml_unittest_result_printer.h
@@ -9,6 +9,7 @@
#include "base/compiler_specific.h"
#include "base/macros.h"
+#include "base/threading/thread_checker.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
@@ -22,6 +23,13 @@ class XmlUnitTestResultPrinter : public testing::EmptyTestEventListener {
XmlUnitTestResultPrinter();
~XmlUnitTestResultPrinter() override;
+ static XmlUnitTestResultPrinter* Get();
+
+ // Add link in the gtest xml output.
+ // Please see AddLinkToTestResult in gtest_links.h for detailed
+ // explanation and usage.
+ void AddLink(const std::string& name, const std::string& url);
+
// Must be called before adding as a listener. Returns true on success.
bool Initialize(const FilePath& output_file_path) WARN_UNUSED_RESULT;
@@ -44,8 +52,10 @@ class XmlUnitTestResultPrinter : public testing::EmptyTestEventListener {
const std::string& summary,
const std::string& message);
- FILE* output_file_;
- bool open_failed_;
+ static XmlUnitTestResultPrinter* instance_;
+ FILE* output_file_{nullptr};
+ bool open_failed_{false};
+ ThreadChecker thread_checker_;
DISALLOW_COPY_AND_ASSIGN(XmlUnitTestResultPrinter);
};
diff --git a/chromium/base/test/gtest_xml_unittest_result_printer_unittest.cc b/chromium/base/test/gtest_xml_unittest_result_printer_unittest.cc
new file mode 100644
index 00000000000..7743ee4fde8
--- /dev/null
+++ b/chromium/base/test/gtest_xml_unittest_result_printer_unittest.cc
@@ -0,0 +1,51 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/test/gtest_xml_unittest_result_printer.h"
+
+#include "base/base64.h"
+#include "base/command_line.h"
+#include "base/files/file_util.h"
+#include "base/strings/strcat.h"
+#include "base/test/test_switches.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(XmlUnitTestResultPrinterTest, LinkInXmlFile) {
+ XmlUnitTestResultPrinter::Get()->AddLink("unique_link", "http://google.com");
+ std::string file_path =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kTestLauncherOutput);
+ std::string content;
+ ASSERT_TRUE(
+ base::ReadFileToString(FilePath::FromUTF8Unsafe(file_path), &content));
+ std::string expected_content =
+ base::StrCat({"<link name=\"LinkInXmlFile\" "
+ "classname=\"XmlUnitTestResultPrinterTest\" "
+ "link_name=\"unique_link\">",
+ "http://google.com", "</link>"});
+ EXPECT_TRUE(content.find(expected_content) != std::string::npos)
+ << expected_content << " not found in " << content;
+}
+
+TEST(XmlUnitTestResultPrinterTest, EscapedLinkInXmlFile) {
+ XmlUnitTestResultPrinter::Get()->AddLink(
+ "unique_link", "http://google.com/path?id=\"'<>&\"");
+ std::string file_path =
+ base::CommandLine::ForCurrentProcess()->GetSwitchValueASCII(
+ switches::kTestLauncherOutput);
+ std::string content;
+ ASSERT_TRUE(
+ base::ReadFileToString(FilePath::FromUTF8Unsafe(file_path), &content));
+ std::string expected_content = base::StrCat(
+ {"<link name=\"EscapedLinkInXmlFile\" "
+ "classname=\"XmlUnitTestResultPrinterTest\" "
+ "link_name=\"unique_link\">",
+ "http://google.com/path?id=&quot;&apos;&lt;&gt;&amp;&quot;", "</link>"});
+ EXPECT_TRUE(content.find(expected_content) != std::string::npos)
+ << expected_content << " not found in " << content;
+}
+
+} // namespace base
diff --git a/chromium/base/test/gtest_xml_util.cc b/chromium/base/test/gtest_xml_util.cc
index 1bac5a6b1d2..620d9bafcee 100644
--- a/chromium/base/test/gtest_xml_util.cc
+++ b/chromium/base/test/gtest_xml_util.cc
@@ -32,6 +32,17 @@ static void XmlErrorFunc(void *context, const char *message, ...) {
} // namespace
+struct Link {
+ // The name of the test case.
+ std::string name;
+ // The name of the classname of the test.
+ std::string classname;
+ // The name of the link.
+ std::string link_name;
+ // The actual link.
+ std::string link;
+};
+
bool ProcessGTestOutput(const base::FilePath& output_file,
std::vector<TestResult>* results,
bool* crashed) {
@@ -58,6 +69,8 @@ bool ProcessGTestOutput(const base::FilePath& output_file,
STATE_END,
} state = STATE_INIT;
+ std::vector<Link> links;
+
while (xml_reader.Read()) {
xml_reader.SkipToElement();
std::string node_name(xml_reader.NodeName());
@@ -137,7 +150,26 @@ bool ProcessGTestOutput(const base::FilePath& output_file,
results->pop_back();
}
+ for (const Link& link : links) {
+ if (link.name == test_name && link.classname == test_case_name) {
+ result.AddLink(link.link_name, link.link);
+ }
+ }
+ links.clear();
results->push_back(result);
+ } else if (node_name == "link" && !xml_reader.IsClosingElement()) {
+ Link link;
+ if (!xml_reader.NodeAttribute("name", &link.name))
+ return false;
+ if (!xml_reader.NodeAttribute("classname", &link.classname))
+ return false;
+ if (!xml_reader.NodeAttribute("link_name", &link.link_name))
+ return false;
+ if (!xml_reader.ReadElementContent(&link.link))
+ return false;
+ links.push_back(link);
+ } else if (node_name == "link" && xml_reader.IsClosingElement()) {
+ // Deliberately empty.
} else if (node_name == "failure" && !xml_reader.IsClosingElement()) {
std::string failure_message;
if (!xml_reader.NodeAttribute("message", &failure_message))
diff --git a/chromium/base/test/power_monitor_test_base.cc b/chromium/base/test/power_monitor_test_base.cc
index f37fb579688..6dbc3aef745 100644
--- a/chromium/base/test/power_monitor_test_base.cc
+++ b/chromium/base/test/power_monitor_test_base.cc
@@ -11,14 +11,18 @@
namespace base {
-PowerMonitorTestSource::PowerMonitorTestSource()
- : test_on_battery_power_(false) {
+PowerMonitorTestSource::PowerMonitorTestSource() {
DCHECK(MessageLoopCurrent::Get())
<< "PowerMonitorTestSource requires a MessageLoop.";
}
PowerMonitorTestSource::~PowerMonitorTestSource() = default;
+PowerObserver::DeviceThermalState
+PowerMonitorTestSource::GetCurrentThermalState() {
+ return current_thermal_state_;
+}
+
void PowerMonitorTestSource::GeneratePowerStateEvent(bool on_battery_power) {
test_on_battery_power_ = on_battery_power;
ProcessPowerEvent(POWER_STATE_EVENT);
@@ -39,6 +43,13 @@ bool PowerMonitorTestSource::IsOnBatteryPowerImpl() {
return test_on_battery_power_;
}
+void PowerMonitorTestSource::GenerateThermalThrottlingEvent(
+ PowerObserver::DeviceThermalState new_thermal_state) {
+ ProcessThermalEvent(new_thermal_state);
+ current_thermal_state_ = new_thermal_state;
+ RunLoop().RunUntilIdle();
+}
+
PowerMonitorTestObserver::PowerMonitorTestObserver()
: last_power_state_(false),
power_state_changes_(0),
@@ -62,4 +73,9 @@ void PowerMonitorTestObserver::OnResume() {
resumes_++;
}
+void PowerMonitorTestObserver::OnThermalStateChange(
+ PowerObserver::DeviceThermalState new_state) {
+ last_thermal_state_ = new_state;
+}
+
} // namespace base
diff --git a/chromium/base/test/power_monitor_test_base.h b/chromium/base/test/power_monitor_test_base.h
index 3086bb87496..ac104ff570a 100644
--- a/chromium/base/test/power_monitor_test_base.h
+++ b/chromium/base/test/power_monitor_test_base.h
@@ -14,15 +14,20 @@ class PowerMonitorTestSource : public PowerMonitorSource {
public:
PowerMonitorTestSource();
~PowerMonitorTestSource() override;
+ PowerObserver::DeviceThermalState GetCurrentThermalState() override;
void GeneratePowerStateEvent(bool on_battery_power);
void GenerateSuspendEvent();
void GenerateResumeEvent();
+ void GenerateThermalThrottlingEvent(
+ PowerObserver::DeviceThermalState new_thermal_state);
protected:
bool IsOnBatteryPowerImpl() override;
- bool test_on_battery_power_;
+ bool test_on_battery_power_ = false;
+ PowerObserver::DeviceThermalState current_thermal_state_ =
+ PowerObserver::DeviceThermalState::kUnknown;
};
class PowerMonitorTestObserver : public PowerObserver {
@@ -34,18 +39,24 @@ class PowerMonitorTestObserver : public PowerObserver {
void OnPowerStateChange(bool on_battery_power) override;
void OnSuspend() override;
void OnResume() override;
+ void OnThermalStateChange(
+ PowerObserver::DeviceThermalState new_state) override;
// Test status counts.
bool last_power_state() const { return last_power_state_; }
int power_state_changes() const { return power_state_changes_; }
int suspends() const { return suspends_; }
int resumes() const { return resumes_; }
+ PowerObserver::DeviceThermalState last_thermal_state() const {
+ return last_thermal_state_;
+ }
private:
- bool last_power_state_; // Last power state we were notified of.
+ bool last_power_state_; // Last power state we were notified of.
int power_state_changes_; // Count of OnPowerStateChange notifications.
- int suspends_; // Count of OnSuspend notifications.
- int resumes_; // Count of OnResume notifications.
+ int suspends_; // Count of OnSuspend notifications.
+ int resumes_; // Count of OnResume notifications.
+ PowerObserver::DeviceThermalState last_thermal_state_;
};
} // namespace base
diff --git a/chromium/base/test/scoped_feature_list.cc b/chromium/base/test/scoped_feature_list.cc
index 0e5afdb2e36..dd3dd0ed349 100644
--- a/chromium/base/test/scoped_feature_list.cc
+++ b/chromium/base/test/scoped_feature_list.cc
@@ -271,7 +271,7 @@ void ScopedFeatureList::InitWithFeaturesImpl(
// Restore other field trials. Note: We don't need to do anything for params
// here because the param associator already has the right state, which has
// been backed up via |original_params_| to be restored later.
- FieldTrialList::CreateTrialsFromString(existing_trial_state, {});
+ FieldTrialList::CreateTrialsFromString(existing_trial_state);
OverrideFeatures(current_enabled_features,
FeatureList::OverrideState::OVERRIDE_ENABLE_FEATURE,
diff --git a/chromium/base/test/scoped_run_loop_timeout.cc b/chromium/base/test/scoped_run_loop_timeout.cc
index 5158c5c4c84..cf548973923 100644
--- a/chromium/base/test/scoped_run_loop_timeout.cc
+++ b/chromium/base/test/scoped_run_loop_timeout.cc
@@ -7,6 +7,7 @@
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/strings/strcat.h"
#include "base/time/time.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/test/test_file_util_win.cc b/chromium/base/test/test_file_util_win.cc
index d2a861b23aa..15f724ea981 100644
--- a/chromium/base/test/test_file_util_win.cc
+++ b/chromium/base/test/test_file_util_win.cc
@@ -36,16 +36,16 @@ struct PermissionInfo {
// |length| is the length of the blob. Zero on failure.
// Returns the blob pointer, or NULL on failure.
void* GetPermissionInfo(const FilePath& path, size_t* length) {
- DCHECK(length != NULL);
+ DCHECK(length);
*length = 0;
- PACL dacl = NULL;
+ PACL dacl = nullptr;
PSECURITY_DESCRIPTOR security_descriptor;
if (GetNamedSecurityInfo(path.value().c_str(), SE_FILE_OBJECT,
- DACL_SECURITY_INFORMATION, NULL, NULL, &dacl, NULL,
- &security_descriptor) != ERROR_SUCCESS) {
- return NULL;
+ DACL_SECURITY_INFORMATION, nullptr, nullptr, &dacl,
+ nullptr, &security_descriptor) != ERROR_SUCCESS) {
+ return nullptr;
}
- DCHECK(dacl != NULL);
+ DCHECK(dacl);
*length = sizeof(PSECURITY_DESCRIPTOR) + dacl->AclSize;
PermissionInfo* info = reinterpret_cast<PermissionInfo*>(new char[*length]);
@@ -68,7 +68,7 @@ bool RestorePermissionInfo(const FilePath& path, void* info, size_t length) {
DWORD rc = SetNamedSecurityInfo(const_cast<wchar_t*>(path.value().c_str()),
SE_FILE_OBJECT, DACL_SECURITY_INFORMATION,
- NULL, NULL, &perm->dacl, NULL);
+ nullptr, nullptr, &perm->dacl, nullptr);
LocalFree(perm->security_descriptor);
char* char_array = reinterpret_cast<char*>(info);
@@ -113,8 +113,8 @@ void SyncPageCacheToDisk() {
bool EvictFileFromSystemCache(const FilePath& file) {
win::ScopedHandle file_handle(
- CreateFile(file.value().c_str(), GENERIC_READ | GENERIC_WRITE, 0, NULL,
- OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, NULL));
+ CreateFile(file.value().c_str(), GENERIC_READ | GENERIC_WRITE, 0, nullptr,
+ OPEN_EXISTING, FILE_FLAG_NO_BUFFERING, nullptr));
if (!file_handle.IsValid())
return false;
@@ -175,9 +175,9 @@ bool MakeFileUnwritable(const FilePath& path) {
}
FilePermissionRestorer::FilePermissionRestorer(const FilePath& path)
- : path_(path), info_(NULL), length_(0) {
+ : path_(path), info_(nullptr), length_(0) {
info_ = GetPermissionInfo(path_, &length_);
- DCHECK(info_ != NULL);
+ DCHECK(info_);
DCHECK_NE(0u, length_);
}
diff --git a/chromium/base/test/test_pending_task.h b/chromium/base/test/test_pending_task.h
index dc8eea1fa2f..d85b209e026 100644
--- a/chromium/base/test/test_pending_task.h
+++ b/chromium/base/test/test_pending_task.h
@@ -10,7 +10,7 @@
#include "base/callback.h"
#include "base/location.h"
#include "base/time/time.h"
-#include "base/trace_event/traced_value.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
diff --git a/chromium/base/test/test_pending_task_unittest.cc b/chromium/base/test/test_pending_task_unittest.cc
index ad7723941e4..9771e236fec 100644
--- a/chromium/base/test/test_pending_task_unittest.cc
+++ b/chromium/base/test/test_pending_task_unittest.cc
@@ -5,13 +5,15 @@
#include "base/test/test_pending_task.h"
#include "base/bind.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
+#include "base/tracing_buildflags.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest-spi.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
TEST(TestPendingTaskTest, TraceSupport) {
base::TestPendingTask task;
@@ -23,6 +25,7 @@ TEST(TestPendingTaskTest, TraceSupport) {
task.AsValueInto(&task_value);
EXPECT_THAT(task_value.ToJSON(), ::testing::HasSubstr("post_time"));
}
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
TEST(TestPendingTaskTest, ToString) {
base::TestPendingTask task;
diff --git a/chromium/base/test/test_suite.cc b/chromium/base/test/test_suite.cc
index 7aefd46640a..89046d3f070 100644
--- a/chromium/base/test/test_suite.cc
+++ b/chromium/base/test/test_suite.cc
@@ -42,6 +42,7 @@
#include "base/test/test_timeouts.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -590,6 +591,16 @@ void TestSuite::Initialize() {
}
#endif
+#if defined(DCHECK_IS_CONFIGURABLE)
+ // Default the configurable DCHECK level to FATAL when running death tests'
+ // child process, so that they behave as expected.
+ // TODO(crbug.com/1057995): Remove this in favor of the codepath in
+ // FeatureList::SetInstance() when/if OnTestStart() TestEventListeners
+ // are fixed to be invoked in the child process as expected.
+ if (command_line->HasSwitch("gtest_internal_run_death_test"))
+ logging::LOG_DCHECK = logging::LOG_FATAL;
+#endif
+
#if defined(OS_IOS)
InitIOSTestMessageLoop();
#endif // OS_IOS
@@ -650,7 +661,9 @@ void TestSuite::Initialize() {
TestTimeouts::Initialize();
+#if BUILDFLAG(ENABLE_BASE_TRACING)
trace_to_file_.BeginTracingFromCommandLineOptions();
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
debug::StartProfiling(GetProfileName());
diff --git a/chromium/base/test/test_suite.h b/chromium/base/test/test_suite.h
index 372c5f58a8a..ffad1c77d8b 100644
--- a/chromium/base/test/test_suite.h
+++ b/chromium/base/test/test_suite.h
@@ -13,11 +13,19 @@
#include <string>
#include "base/at_exit.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
-#include "base/test/trace_to_file.h"
+#include "base/tracing_buildflags.h"
#include "build/build_config.h"
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/test/trace_to_file.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+namespace logging {
+class ScopedLogAssertHandler;
+}
+
namespace testing {
class TestInfo;
}
@@ -88,7 +96,9 @@ class TestSuite {
// Basic initialization for the test suite happens here.
void PreInitialize();
+#if BUILDFLAG(ENABLE_BASE_TRACING)
test::TraceToFile trace_to_file_;
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
bool initialized_command_line_ = false;
diff --git a/chromium/base/test/test_switches.cc b/chromium/base/test/test_switches.cc
index ec022ced181..bfbf30cfe50 100644
--- a/chromium/base/test/test_switches.cc
+++ b/chromium/base/test/test_switches.cc
@@ -101,4 +101,7 @@ const char switches::kUiTestActionMaxTimeout[] = "ui-test-action-max-timeout";
// If enabled, runs unittests using the XCTest test runner.
const char switches::kEnableRunIOSUnittestsWithXCTest[] =
"enable-run-ios-unittests-with-xctest";
+// Write a compiled test json file to a location where writable.
+const char switches::kWriteCompiledTestsJsonToWritablePath[] =
+ "write-compiled-tests-json-to-writable-path";
#endif
diff --git a/chromium/base/test/test_switches.h b/chromium/base/test/test_switches.h
index 9e2e627e407..d753deecfdb 100644
--- a/chromium/base/test/test_switches.h
+++ b/chromium/base/test/test_switches.h
@@ -39,6 +39,7 @@ extern const char kUiTestActionMaxTimeout[];
#if defined(OS_IOS)
extern const char kEnableRunIOSUnittestsWithXCTest[];
+extern const char kWriteCompiledTestsJsonToWritablePath[];
#endif
} // namespace switches
diff --git a/chromium/base/test/test_timeouts.cc b/chromium/base/test/test_timeouts.cc
index e77f17569c0..8310f0841c4 100644
--- a/chromium/base/test/test_timeouts.cc
+++ b/chromium/base/test/test_timeouts.cc
@@ -23,16 +23,21 @@ namespace {
// 2) min_value.
// 3) the numerical value given by switch_name on the command line multiplied
// by kTimeoutMultiplier.
-void InitializeTimeout(const char* switch_name, int min_value, int* value) {
+void InitializeTimeout(const char* switch_name,
+ base::TimeDelta min_value,
+ base::TimeDelta* value) {
DCHECK(value);
- int command_line_timeout = 0;
+ base::TimeDelta command_line_timeout;
if (base::CommandLine::ForCurrentProcess()->HasSwitch(switch_name)) {
std::string string_value(base::CommandLine::ForCurrentProcess()->
GetSwitchValueASCII(switch_name));
- if (!base::StringToInt(string_value, &command_line_timeout)) {
+ int command_line_timeout_ms = 0;
+ if (!base::StringToInt(string_value, &command_line_timeout_ms)) {
LOG(FATAL) << "Timeout value \"" << string_value << "\" was parsed as "
- << command_line_timeout;
+ << command_line_timeout_ms;
}
+ command_line_timeout =
+ base::TimeDelta::FromMilliseconds(command_line_timeout_ms);
}
#if defined(MEMORY_SANITIZER)
@@ -80,10 +85,10 @@ bool TestTimeouts::initialized_ = false;
// The timeout values should increase in the order they appear in this block.
// static
-int TestTimeouts::tiny_timeout_ms_ = 100;
-int TestTimeouts::action_timeout_ms_ = 10000;
-int TestTimeouts::action_max_timeout_ms_ = 30000;
-int TestTimeouts::test_launcher_timeout_ms_ = 45000;
+auto TestTimeouts::tiny_timeout_ = base::TimeDelta::FromMilliseconds(100);
+auto TestTimeouts::action_timeout_ = base::TimeDelta::FromSeconds(10);
+auto TestTimeouts::action_max_timeout_ = base::TimeDelta::FromSeconds(30);
+auto TestTimeouts::test_launcher_timeout_ = base::TimeDelta::FromSeconds(45);
// static
void TestTimeouts::Initialize() {
@@ -99,7 +104,8 @@ void TestTimeouts::Initialize() {
// Note that these timeouts MUST be initialized in the correct order as
// per the CHECKS below.
- InitializeTimeout(switches::kTestTinyTimeout, 0, &tiny_timeout_ms_);
+ InitializeTimeout(switches::kTestTinyTimeout, base::TimeDelta(),
+ &tiny_timeout_);
// All timeouts other than the "tiny" one should be set to very large values
// when in a debugger or when run interactively, so that tests will not get
@@ -109,24 +115,23 @@ void TestTimeouts::Initialize() {
// hang (because it's used as a task-posting delay). In particular this
// causes problems for some iOS device tests, which are always run inside a
// debugger (thus BeingDebugged() is true even on the bots).
- int min_ui_test_action_timeout = tiny_timeout_ms_;
+ base::TimeDelta min_ui_test_action_timeout = tiny_timeout_;
if (being_debugged || base::CommandLine::ForCurrentProcess()->HasSwitch(
switches::kTestLauncherInteractive)) {
- constexpr int kVeryLargeTimeoutMs = 100'000'000;
- min_ui_test_action_timeout = kVeryLargeTimeoutMs;
+ min_ui_test_action_timeout = base::TimeDelta::FromDays(1);
}
InitializeTimeout(switches::kUiTestActionTimeout, min_ui_test_action_timeout,
- &action_timeout_ms_);
- InitializeTimeout(switches::kUiTestActionMaxTimeout, action_timeout_ms_,
- &action_max_timeout_ms_);
+ &action_timeout_);
+ InitializeTimeout(switches::kUiTestActionMaxTimeout, action_timeout_,
+ &action_max_timeout_);
// Test launcher timeout is independent from anything above action timeout.
- InitializeTimeout(switches::kTestLauncherTimeout, action_timeout_ms_,
- &test_launcher_timeout_ms_);
+ InitializeTimeout(switches::kTestLauncherTimeout, action_timeout_,
+ &test_launcher_timeout_);
// The timeout values should be increasing in the right order.
- CHECK_LE(tiny_timeout_ms_, action_timeout_ms_);
- CHECK_LE(action_timeout_ms_, action_max_timeout_ms_);
- CHECK_LE(action_timeout_ms_, test_launcher_timeout_ms_);
+ CHECK_LE(tiny_timeout_, action_timeout_);
+ CHECK_LE(action_timeout_, action_max_timeout_);
+ CHECK_LE(action_timeout_, test_launcher_timeout_);
}
diff --git a/chromium/base/test/test_timeouts.h b/chromium/base/test/test_timeouts.h
index 1bdda2a157b..1a068adbc77 100644
--- a/chromium/base/test/test_timeouts.h
+++ b/chromium/base/test/test_timeouts.h
@@ -5,7 +5,7 @@
#ifndef BASE_TEST_TEST_TIMEOUTS_H_
#define BASE_TEST_TEST_TIMEOUTS_H_
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/time/time.h"
@@ -22,14 +22,14 @@ class TestTimeouts {
// like a delay value than a timeout.
static base::TimeDelta tiny_timeout() {
DCHECK(initialized_);
- return base::TimeDelta::FromMilliseconds(tiny_timeout_ms_);
+ return tiny_timeout_;
}
// Timeout to wait for something to happen. If you are not sure
// which timeout to use, this is the one you want.
static base::TimeDelta action_timeout() {
DCHECK(initialized_);
- return base::TimeDelta::FromMilliseconds(action_timeout_ms_);
+ return action_timeout_;
}
// Timeout longer than the above, suitable to wait on success conditions which
@@ -39,23 +39,23 @@ class TestTimeouts {
// actions are compounded in the same test.
static base::TimeDelta action_max_timeout() {
DCHECK(initialized_);
- return base::TimeDelta::FromMilliseconds(action_max_timeout_ms_);
+ return action_max_timeout_;
}
// Timeout for a single test launched used built-in test launcher.
// Do not use outside of the test launcher.
static base::TimeDelta test_launcher_timeout() {
DCHECK(initialized_);
- return base::TimeDelta::FromMilliseconds(test_launcher_timeout_ms_);
+ return test_launcher_timeout_;
}
private:
static bool initialized_;
- static int tiny_timeout_ms_;
- static int action_timeout_ms_;
- static int action_max_timeout_ms_;
- static int test_launcher_timeout_ms_;
+ static base::TimeDelta tiny_timeout_;
+ static base::TimeDelta action_timeout_;
+ static base::TimeDelta action_max_timeout_;
+ static base::TimeDelta test_launcher_timeout_;
DISALLOW_IMPLICIT_CONSTRUCTORS(TestTimeouts);
};
diff --git a/chromium/base/test/trace_event_analyzer.cc b/chromium/base/test/trace_event_analyzer.cc
index 78a6b9b4cae..13af95dd6de 100644
--- a/chromium/base/test/trace_event_analyzer.cc
+++ b/chromium/base/test/trace_event_analyzer.cc
@@ -11,6 +11,7 @@
#include "base/bind.h"
#include "base/json/json_reader.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
#include "base/run_loop.h"
diff --git a/chromium/base/test/trace_event_analyzer.h b/chromium/base/test/trace_event_analyzer.h
index dcdd2e4b5ec..25a5a5372f3 100644
--- a/chromium/base/test/trace_event_analyzer.h
+++ b/chromium/base/test/trace_event_analyzer.h
@@ -100,7 +100,7 @@
#include "base/macros.h"
#include "base/memory/ref_counted.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
class Value;
diff --git a/chromium/base/test/with_feature_override.h b/chromium/base/test/with_feature_override.h
index 9a88253c30c..deafd1823f9 100644
--- a/chromium/base/test/with_feature_override.h
+++ b/chromium/base/test/with_feature_override.h
@@ -16,15 +16,18 @@ namespace test {
#define INSTANTIATE_FEATURE_OVERRIDE_TEST_SUITE(test_name) \
INSTANTIATE_TEST_SUITE_P(All, test_name, testing::Values(false, true))
-// Base class for a test fixture that must run with a feature enabled and
-// disabled. Must be the first base class of the test fixture to take effect
-// during the construction of the test fixture itself.
+// Base class for a test fixture that enables running tests twice, once with a
+// feature enabled and once with it disabled. Must be the first base class of
+// the test fixture to take effect during its construction. If
+// WithFeatureOverride is added as a parent to an existing test fixture
+// all of its existing tests need to be migrated to TEST_P.
//
// Example usage:
//
-// class MyTest : public base::WithFeatureOverride, public testing::Test {
+// class MyTest : public base::test::WithFeatureOverride, public testing::Test
+// {
// public:
-// MyTest() : WithFeatureOverride(kMyFeature){}
+// MyTest() : base::test::WithFeatureOverride(kMyFeature){}
// };
//
// TEST_P(MyTest, FooBar) {
@@ -41,8 +44,8 @@ class WithFeatureOverride : public testing::WithParamInterface<bool> {
WithFeatureOverride(const WithFeatureOverride&) = delete;
WithFeatureOverride& operator=(const WithFeatureOverride&) = delete;
- // Use to know if the configured feature provided in the ctor is enabled or
- // not.
+ // Use to know if the configured feature provided in the constructor is
+ // enabled or not.
bool IsParamFeatureEnabled();
private:
diff --git a/chromium/base/third_party/nspr/prtime.cc b/chromium/base/third_party/nspr/prtime.cc
index fe96724ac9a..f7ee352f794 100644
--- a/chromium/base/third_party/nspr/prtime.cc
+++ b/chromium/base/third_party/nspr/prtime.cc
@@ -65,13 +65,14 @@
* Unit tests are in base/time/pr_time_unittest.cc.
*/
-#include <limits.h>
-
-#include "base/logging.h"
#include "base/third_party/nspr/prtime.h"
+
+#include "base/check.h"
#include "build/build_config.h"
+#include <ctype.h>
#include <errno.h> /* for EINVAL */
+#include <limits.h>
#include <string.h>
#include <time.h>
diff --git a/chromium/base/thread_annotations.h b/chromium/base/thread_annotations.h
index fdd32f84906..ca4daac7d49 100644
--- a/chromium/base/thread_annotations.h
+++ b/chromium/base/thread_annotations.h
@@ -32,7 +32,7 @@
#ifndef BASE_THREAD_ANNOTATIONS_H_
#define BASE_THREAD_ANNOTATIONS_H_
-#include "base/logging.h"
+#include "base/check_op.h"
#include "build/build_config.h"
#if defined(__clang__)
diff --git a/chromium/base/threading/hang_watcher.cc b/chromium/base/threading/hang_watcher.cc
index 9a4cddea653..ccd1ea215c8 100644
--- a/chromium/base/threading/hang_watcher.cc
+++ b/chromium/base/threading/hang_watcher.cc
@@ -118,7 +118,6 @@ HangWatcher::HangWatcher()
DCHECK(!g_instance);
g_instance = this;
- Start();
}
HangWatcher::~HangWatcher() {
@@ -147,22 +146,21 @@ void HangWatcher::Wait() {
while (true) {
// Amount by which the actual time spent sleeping can deviate from
// the target time and still be considered timely.
- constexpr base::TimeDelta wait_drift_tolerance =
+ constexpr base::TimeDelta kWaitDriftTolerance =
base::TimeDelta::FromMilliseconds(100);
- base::TimeTicks time_before_wait = tick_clock_->NowTicks();
+ const base::TimeTicks time_before_wait = tick_clock_->NowTicks();
// Sleep until next scheduled monitoring or until signaled.
- bool was_signaled = should_monitor_.TimedWait(monitor_period_);
+ const bool was_signaled = should_monitor_.TimedWait(monitor_period_);
- if (after_wait_callback_) {
+ if (after_wait_callback_)
after_wait_callback_.Run(time_before_wait);
- }
- base::TimeTicks time_after_wait = tick_clock_->NowTicks();
- base::TimeDelta wait_time = time_after_wait - time_before_wait;
- bool wait_was_normal =
- wait_time <= (monitor_period_ + wait_drift_tolerance);
+ const base::TimeTicks time_after_wait = tick_clock_->NowTicks();
+ const base::TimeDelta wait_time = time_after_wait - time_before_wait;
+ const bool wait_was_normal =
+ wait_time <= (monitor_period_ + kWaitDriftTolerance);
if (!wait_was_normal) {
// If the time spent waiting was too high it might indicate the machine is
@@ -190,9 +188,8 @@ void HangWatcher::Wait() {
}
// Stop waiting.
- if (wait_was_normal || was_signaled) {
+ if (wait_was_normal || was_signaled)
return;
- }
}
}
@@ -202,20 +199,15 @@ void HangWatcher::Run() {
DCHECK_CALLED_ON_VALID_THREAD(hang_watcher_thread_checker_);
while (keep_monitoring_.load(std::memory_order_relaxed)) {
- // If there is nothing to watch sleep until there is.
- if (IsWatchListEmpty()) {
- should_monitor_.Wait();
- } else {
- Monitor();
+ Wait();
+ if (!IsWatchListEmpty() &&
+ keep_monitoring_.load(std::memory_order_relaxed)) {
+ Monitor();
if (after_monitor_closure_for_testing_) {
after_monitor_closure_for_testing_.Run();
}
}
-
- if (keep_monitoring_.load(std::memory_order_relaxed)) {
- Wait();
- }
}
}
@@ -238,11 +230,6 @@ ScopedClosureRunner HangWatcher::RegisterThread() {
watch_states_.push_back(
internal::HangWatchState::CreateHangWatchStateForCurrentThread());
- // Now that there is a thread to monitor we wake the HangWatcher thread.
- if (watch_states_.size() == 1) {
- should_monitor_.Signal();
- }
-
return ScopedClosureRunner(BindOnce(&HangWatcher::UnregisterThread,
Unretained(HangWatcher::GetInstance())));
}
diff --git a/chromium/base/threading/hang_watcher.h b/chromium/base/threading/hang_watcher.h
index dd496c79f7d..7d848da4475 100644
--- a/chromium/base/threading/hang_watcher.h
+++ b/chromium/base/threading/hang_watcher.h
@@ -150,6 +150,9 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate {
// non-actionable stack trace in the crash recorded.
void BlockIfCaptureInProgress();
+ // Begin executing the monitoring loop on the HangWatcher thread.
+ void Start();
+
private:
// Use to assert that functions are called on the monitoring thread.
THREAD_CHECKER(hang_watcher_thread_checker_);
@@ -211,9 +214,6 @@ class BASE_EXPORT HangWatcher : public DelegateSimpleThread::Delegate {
void CaptureHang(base::TimeTicks capture_time)
EXCLUSIVE_LOCKS_REQUIRED(watch_state_lock_) LOCKS_EXCLUDED(capture_lock_);
- // Call Run() on the HangWatcher thread.
- void Start();
-
// Stop all monitoring and join the HangWatcher thread.
void Stop();
diff --git a/chromium/base/threading/hang_watcher_unittest.cc b/chromium/base/threading/hang_watcher_unittest.cc
index 4a6033fe235..0849b98ec2c 100644
--- a/chromium/base/threading/hang_watcher_unittest.cc
+++ b/chromium/base/threading/hang_watcher_unittest.cc
@@ -95,6 +95,9 @@ class HangWatcherTest : public testing::Test {
// We're not testing the monitoring loop behavior in this test so we want to
// trigger monitoring manually.
hang_watcher_.SetMonitoringPeriodForTesting(base::TimeDelta::Max());
+
+ // Start the monitoring loop.
+ hang_watcher_.Start();
}
HangWatcherTest(const HangWatcherTest& other) = delete;
@@ -169,19 +172,6 @@ class HangWatcherBlockingThreadTest : public HangWatcherTest {
};
} // namespace
-TEST_F(HangWatcherTest, NoRegisteredThreads) {
- ASSERT_FALSE(monitor_event_.IsSignaled());
-
- // Signal to advance the Run() loop.
- base::HangWatcher::GetInstance()->SignalMonitorEventForTesting();
-
- // Monitoring should just not happen when there are no registered threads.
- // Wait a while to make sure it does not.
- ASSERT_FALSE(monitor_event_.TimedWait(base::TimeDelta::FromSeconds(1)));
-
- ASSERT_FALSE(hang_event_.IsSignaled());
-}
-
TEST_F(HangWatcherTest, NestedScopes) {
// Create a state object for the test thread since this test is single
// threaded.
@@ -307,7 +297,7 @@ class HangWatcherSnapshotTest : public testing::Test {
} // namespace
// TODO(crbug.com/2193655): Test flaky on iPad.
-TEST_F(HangWatcherSnapshotTest, DISABLED_HungThreadIDs) {
+TEST_F(HangWatcherSnapshotTest, HungThreadIDs) {
// During hang capture the list of hung threads should be populated.
hang_watcher_.SetOnHangClosureForTesting(base::BindLambdaForTesting([this]() {
EXPECT_EQ(hang_watcher_.GrabWatchStateSnapshotForTesting()
@@ -325,6 +315,8 @@ TEST_F(HangWatcherSnapshotTest, DISABLED_HungThreadIDs) {
monitor_event_.Signal();
}));
+ hang_watcher_.Start();
+
// Register the main test thread for hang watching.
auto unregister_thread_closure_ = hang_watcher_.RegisterThread();
@@ -437,6 +429,8 @@ TEST_F(HangWatcherPeriodicMonitoringTest,
// wrong reasons.
InstallAfterWaitCallback(kMonitoringPeriod);
+ hang_watcher_.Start();
+
// Unblock the test thread. No thread ever registered after the HangWatcher
// was created in the test's constructor. No monitoring should have taken
// place.
@@ -475,7 +469,9 @@ TEST_F(HangWatcherPeriodicMonitoringTest, PeriodicCallsTakePlace) {
// Monitor(). This would inhibit monitoring.
InstallAfterWaitCallback(kMonitoringPeriod);
- // Register a thread, kicks off monitoring.
+ hang_watcher_.Start();
+
+ // Register a thread,
unregister_thread_closure_ = hang_watcher_.RegisterThread();
run_loop.Run();
@@ -487,7 +483,7 @@ TEST_F(HangWatcherPeriodicMonitoringTest, PeriodicCallsTakePlace) {
// If the HangWatcher detects it slept for longer than expected it will not
// monitor.
// TODO(crbug.com/1081654): Test flaky on ChromeOS.
-TEST_F(HangWatcherPeriodicMonitoringTest, DISABLED_NoMonitorOnOverSleep) {
+TEST_F(HangWatcherPeriodicMonitoringTest, NoMonitorOnOverSleep) {
RunLoop run_loop;
// If a call to HangWatcher::Monitor() takes place the test will instantly
@@ -502,7 +498,9 @@ TEST_F(HangWatcherPeriodicMonitoringTest, DISABLED_NoMonitorOnOverSleep) {
// detect oversleeping every time. This will keep it from monitoring.
InstallAfterWaitCallback(base::TimeDelta::FromMinutes(1));
- // Register a thread, kicks off monitoring.
+ hang_watcher_.Start();
+
+ // Register a thread.
unregister_thread_closure_ = hang_watcher_.RegisterThread();
// Unblock the test thread. All waits were perceived as oversleeping so all
@@ -538,6 +536,8 @@ class HangWatchScopeBlockingTest : public testing::Test {
// Make sure no periodic monitoring takes place.
hang_watcher_.SetMonitoringPeriodForTesting(base::TimeDelta::Max());
+ hang_watcher_.Start();
+
// Register the test main thread for hang watching.
unregister_thread_closure_ = hang_watcher_.RegisterThread();
}
diff --git a/chromium/base/threading/platform_thread_win.cc b/chromium/base/threading/platform_thread_win.cc
index 9b624757165..c87b5c08d90 100644
--- a/chromium/base/threading/platform_thread_win.cc
+++ b/chromium/base/threading/platform_thread_win.cc
@@ -16,6 +16,7 @@
#include "base/strings/string_number_conversions.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/scoped_blocking_call.h"
+#include "base/threading/scoped_thread_priority.h"
#include "base/threading/thread_id_name_manager.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time_override.h"
@@ -115,6 +116,13 @@ DWORD __stdcall ThreadFunc(void* params) {
PlatformThread::CurrentId());
}
+ // Ensure thread priority is at least NORMAL before initiating thread
+ // destruction. Thread destruction on Windows holds the LdrLock while
+ // performing TLS destruction which causes hangs if performed at background
+ // priority (priority inversion) (see: http://crbug.com/1096203).
+ if (PlatformThread::GetCurrentThreadPriority() < ThreadPriority::NORMAL)
+ PlatformThread::SetCurrentThreadPriority(ThreadPriority::NORMAL);
+
return 0;
}
diff --git a/chromium/base/threading/scoped_blocking_call.cc b/chromium/base/threading/scoped_blocking_call.cc
index 62edcd2af48..e8b2900874b 100644
--- a/chromium/base/threading/scoped_blocking_call.cc
+++ b/chromium/base/threading/scoped_blocking_call.cc
@@ -8,7 +8,7 @@
#include "base/threading/thread_local.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "build/build_config.h"
namespace base {
diff --git a/chromium/base/threading/scoped_thread_priority.cc b/chromium/base/threading/scoped_thread_priority.cc
index 396071906f2..e33c76f525f 100644
--- a/chromium/base/threading/scoped_thread_priority.cc
+++ b/chromium/base/threading/scoped_thread_priority.cc
@@ -6,7 +6,7 @@
#include "base/location.h"
#include "base/threading/platform_thread.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace base {
namespace internal {
diff --git a/chromium/base/threading/sequence_bound.h b/chromium/base/threading/sequence_bound.h
index b235fe221c3..3ee8ef6e4c9 100644
--- a/chromium/base/threading/sequence_bound.h
+++ b/chromium/base/threading/sequence_bound.h
@@ -94,26 +94,6 @@ namespace base {
// SequenceBound<MyDerivedClass>(main_task_runner, ctor args);
// auto c = new SomeConsumer(std::move(widget)); // upcasts to MyClass
-namespace internal {
-
-// If we can't cast |Base*| into |Derived*|, then it's a virtual base if and
-// only if |Base| is actually a base class of |Derived|. Otherwise (including
-// unrelated types), it isn't. We default to Derived* so that the
-// specialization below will apply when the cast to |Derived*| is valid.
-template <typename Base, typename Derived, typename = Derived*>
-struct is_virtual_base_of : public std::is_base_of<Base, Derived> {};
-
-// If we can cast |Base*| into |Derived*|, then it's definitely not a virtual
-// base. When this happens, we'll match the default third template argument.
-template <typename Base, typename Derived>
-struct is_virtual_base_of<Base,
- Derived,
- decltype(static_cast<Derived*>(
- static_cast<Base*>(nullptr)))> : std::false_type {
-};
-
-} // namespace internal
-
template <typename T>
class SequenceBound {
public:
@@ -134,7 +114,9 @@ class SequenceBound {
Args&&... args)
: impl_task_runner_(std::move(task_runner)) {
// Allocate space for but do not construct an instance of |T|.
- storage_ = AlignedAlloc(sizeof(T), alignof(T));
+ // AlignedAlloc() requires alignment be a multiple of sizeof(void*).
+ storage_ = AlignedAlloc(
+ sizeof(T), sizeof(void*) > alignof(T) ? sizeof(void*) : alignof(T));
t_ = reinterpret_cast<T*>(storage_);
// Post construction to the impl thread.
diff --git a/chromium/base/threading/sequence_bound_unittest.cc b/chromium/base/threading/sequence_bound_unittest.cc
index dfaae6c832b..ecf0e3543e8 100644
--- a/chromium/base/threading/sequence_bound_unittest.cc
+++ b/chromium/base/threading/sequence_bound_unittest.cc
@@ -333,23 +333,6 @@ TEST_F(SequenceBoundTest, ResetOnNullObjectWorks) {
derived.Reset();
}
-TEST_F(SequenceBoundTest, IsVirtualBaseClassOf) {
- // Check that is_virtual_base_of<> works properly.
-
- // Neither |Base| nor |Derived| is a virtual base of the other.
- static_assert(!internal::is_virtual_base_of<Base, Derived>::value,
- "|Base| shouldn't be a virtual base of |Derived|");
- static_assert(!internal::is_virtual_base_of<Derived, Base>::value,
- "|Derived| shouldn't be a virtual base of |Base|");
-
- // |Base| should be a virtual base class of |VirtuallyDerived|, but not the
- // other way.
- static_assert(internal::is_virtual_base_of<Base, VirtuallyDerived>::value,
- "|Base| should be a virtual base of |VirtuallyDerived|");
- static_assert(!internal::is_virtual_base_of<VirtuallyDerived, Base>::value,
- "|VirtuallyDerived shouldn't be a virtual base of |Base|");
-}
-
TEST_F(SequenceBoundTest, LvalueConstructionParameter) {
// Note here that |value_ptr| is an lvalue, while |&value| would be an rvalue.
Value value = kInitialValue;
@@ -408,4 +391,10 @@ TEST_F(SequenceBoundTest, ResetWithCallbackAfterDestruction) {
loop.Run();
}
+TEST_F(SequenceBoundTest, SmallObject) {
+ class EmptyClass {};
+ SequenceBound<EmptyClass> value(task_runner_);
+ // Test passes if SequenceBound constructor does not crash in AlignedAlloc().
+}
+
} // namespace base
diff --git a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
index fa7dec1c734..8f4933e1114 100644
--- a/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
+++ b/chromium/base/threading/sequenced_task_runner_handle_unittest.cc
@@ -67,7 +67,7 @@ TEST_F(SequencedTaskRunnerHandleTest, FromThreadPoolSequencedTask) {
}
TEST_F(SequencedTaskRunnerHandleTest, NoHandleFromUnsequencedTask) {
- base::PostTask(base::BindOnce(
+ base::ThreadPool::PostTask(base::BindOnce(
[]() { EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet()); }));
task_environment_.RunUntilIdle();
}
diff --git a/chromium/base/threading/thread.cc b/chromium/base/threading/thread.cc
index d264c1d2602..8659724616a 100644
--- a/chromium/base/threading/thread.cc
+++ b/chromium/base/threading/thread.cc
@@ -278,9 +278,11 @@ void Thread::DetachFromSequence() {
}
PlatformThreadId Thread::GetThreadId() const {
- // If the thread is created but not started yet, wait for |id_| being ready.
- base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
- id_event_.Wait();
+ if (!id_event_.IsSignaled()) {
+ // If the thread is created but not started yet, wait for |id_| being ready.
+ base::ScopedAllowBaseSyncPrimitivesOutsideBlockingScope allow_wait;
+ id_event_.Wait();
+ }
return id_;
}
diff --git a/chromium/base/threading/thread_checker.h b/chromium/base/threading/thread_checker.h
index e1495344f22..48646623ab8 100644
--- a/chromium/base/threading/thread_checker.h
+++ b/chromium/base/threading/thread_checker.h
@@ -5,8 +5,8 @@
#ifndef BASE_THREADING_THREAD_CHECKER_H_
#define BASE_THREADING_THREAD_CHECKER_H_
+#include "base/check.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/strings/string_piece.h"
#include "base/thread_annotations.h"
#include "base/threading/thread_checker_impl.h"
@@ -84,7 +84,7 @@
#define DETACH_FROM_THREAD(name) (name).DetachFromThread()
#else // DCHECK_IS_ON()
#define THREAD_CHECKER(name) static_assert(true, "")
-#define DCHECK_CALLED_ON_VALID_THREAD(name, ...) EAT_STREAM_PARAMETERS
+#define DCHECK_CALLED_ON_VALID_THREAD(name, ...) EAT_CHECK_STREAM_PARAMS()
#define DETACH_FROM_THREAD(name)
#endif // DCHECK_IS_ON()
diff --git a/chromium/base/threading/thread_checker_unittest.cc b/chromium/base/threading/thread_checker_unittest.cc
index d1958896b0d..b6d4b9fb7fb 100644
--- a/chromium/base/threading/thread_checker_unittest.cc
+++ b/chromium/base/threading/thread_checker_unittest.cc
@@ -15,6 +15,7 @@
#include "base/test/gtest_util.h"
#include "base/test/test_simple_task_runner.h"
#include "base/threading/simple_thread.h"
+#include "base/threading/thread_local.h"
#include "base/threading/thread_task_runner_handle.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/chromium/base/threading/thread_local.h b/chromium/base/threading/thread_local.h
index f9762050b68..d1ab40a152c 100644
--- a/chromium/base/threading/thread_local.h
+++ b/chromium/base/threading/thread_local.h
@@ -51,7 +51,7 @@
#include <memory>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "base/threading/thread_local_internal.h"
#include "base/threading/thread_local_storage.h"
diff --git a/chromium/base/threading/thread_restrictions.cc b/chromium/base/threading/thread_restrictions.cc
index c7f07d26646..16e70800f64 100644
--- a/chromium/base/threading/thread_restrictions.cc
+++ b/chromium/base/threading/thread_restrictions.cc
@@ -4,7 +4,7 @@
#include "base/threading/thread_restrictions.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#if DCHECK_IS_ON()
diff --git a/chromium/base/threading/thread_restrictions.h b/chromium/base/threading/thread_restrictions.h
index 33df64f30e8..29a68a6b15b 100644
--- a/chromium/base/threading/thread_restrictions.h
+++ b/chromium/base/threading/thread_restrictions.h
@@ -6,9 +6,9 @@
#define BASE_THREADING_THREAD_RESTRICTIONS_H_
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
// -----------------------------------------------------------------------------
@@ -112,6 +112,7 @@ namespace audio {
class OutputDevice;
}
namespace blink {
+class DiskDataAllocator;
class RTCVideoDecoderAdapter;
class RTCVideoEncoder;
class SourceStream;
@@ -195,6 +196,9 @@ class PaintCanvasVideoRenderer;
namespace memory_instrumentation {
class OSMetrics;
}
+namespace metrics {
+class AndroidMetricsServiceClient;
+}
namespace midi {
class TaskService; // https://crbug.com/796830
}
@@ -367,6 +371,7 @@ class BASE_EXPORT ScopedAllowBlocking {
friend class AdjustOOMScoreHelper;
friend class StackSamplingProfiler;
friend class android_webview::ScopedAllowInitGLBindings;
+ friend class blink::DiskDataAllocator;
friend class chromeos::MojoUtils; // http://crbug.com/1055467
friend class content::BrowserProcessSubThread;
friend class content::PepperPrintSettingsManagerImpl;
@@ -376,6 +381,7 @@ class BASE_EXPORT ScopedAllowBlocking {
friend class cronet::CronetPrefsManager;
friend class cronet::CronetURLRequestContext;
friend class memory_instrumentation::OSMetrics;
+ friend class metrics::AndroidMetricsServiceClient;
friend class module_installer::ScopedAllowModulePakLoad;
friend class mojo::CoreLibraryInitializer;
friend class printing::LocalPrinterHandlerDefault;
diff --git a/chromium/base/threading/thread_unittest.cc b/chromium/base/threading/thread_unittest.cc
index 68a7685fb72..4f0c46f3507 100644
--- a/chromium/base/threading/thread_unittest.cc
+++ b/chromium/base/threading/thread_unittest.cc
@@ -12,6 +12,7 @@
#include "base/bind.h"
#include "base/debug/leak_annotations.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/run_loop.h"
diff --git a/chromium/base/time/time.cc b/chromium/base/time/time.cc
index d7620831555..54e2035f5ff 100644
--- a/chromium/base/time/time.cc
+++ b/chromium/base/time/time.cc
@@ -13,6 +13,7 @@
#include "base/logging.h"
#include "base/macros.h"
#include "base/no_destructor.h"
+#include "base/notreached.h"
#include "base/strings/stringprintf.h"
#include "base/third_party/nspr/prtime.h"
#include "base/time/time_override.h"
diff --git a/chromium/base/time/time.h b/chromium/base/time/time.h
index b6bc17703ce..731189bfe40 100644
--- a/chromium/base/time/time.h
+++ b/chromium/base/time/time.h
@@ -58,8 +58,8 @@
#include <limits>
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
-#include "base/logging.h"
#include "base/numerics/safe_math.h"
#include "build/build_config.h"
diff --git a/chromium/base/time/time_mac.cc b/chromium/base/time/time_mac.cc
index 676d90e3e60..eda4f20a367 100644
--- a/chromium/base/time/time_mac.cc
+++ b/chromium/base/time/time_mac.cc
@@ -18,6 +18,7 @@
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_cftyperef.h"
#include "base/mac/scoped_mach_port.h"
+#include "base/notreached.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "base/time/time_override.h"
diff --git a/chromium/base/trace_event/base_tracing.h b/chromium/base/trace_event/base_tracing.h
new file mode 100644
index 00000000000..c5831f237d2
--- /dev/null
+++ b/chromium/base/trace_event/base_tracing.h
@@ -0,0 +1,28 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_BASE_TRACING_H_
+#define BASE_TRACE_EVENT_BASE_TRACING_H_
+
+// Proxy header that provides tracing instrumentation for //base code. When
+// tracing support is disabled via the gn flag enable_base_tracing, this header
+// provides a mock implementation of the relevant trace macros instead, which
+// causes the instrumentation in //base to be compiled into no-ops.
+
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
+// Update the check in //base/PRESUBMIT.py when adding new headers here.
+// TODO(crbug/1006541): Switch to perfetto for trace event implementation.
+#include "base/trace_event/blame_context.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/trace_event/memory_dump_provider.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/traced_value.h"
+#include "base/trace_event/typed_macros.h"
+#else // BUILDFLAG(ENABLE_BASE_TRACING)
+#include "base/trace_event/trace_event_stub.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
+
+#endif // BASE_TRACE_EVENT_BASE_TRACING_H_
diff --git a/chromium/base/trace_event/builtin_categories.h b/chromium/base/trace_event/builtin_categories.h
index 7ce21d4711b..31f563dd4d5 100644
--- a/chromium/base/trace_event/builtin_categories.h
+++ b/chromium/base/trace_event/builtin_categories.h
@@ -83,6 +83,7 @@
X("GAMEPAD") \
X("gpu") \
X("gpu.capture") \
+ X("gpu.memory") \
X("headless") \
X("hwoverlays") \
X("identity") \
@@ -148,9 +149,11 @@
X("test_gpu") \
X("test_tracing") \
X("toplevel") \
+ X("toplevel.flow") \
X("ui") \
X("v8") \
X("v8.execute") \
+ X("v8.wasm") \
X("ValueStoreFrontend::Backend") \
X("views") \
X("views.frame") \
@@ -214,6 +217,7 @@
X(TRACE_DISABLED_BY_DEFAULT("power")) \
X(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler")) \
X(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler.debug")) \
+ X(TRACE_DISABLED_BY_DEFAULT("sandbox")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots")) \
@@ -223,12 +227,10 @@
X(TRACE_DISABLED_BY_DEFAULT("SyncFileSystem")) \
X(TRACE_DISABLED_BY_DEFAULT("system_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("thread_pool_diagnostics")) \
- X(TRACE_DISABLED_BY_DEFAULT("toplevel.flow")) \
X(TRACE_DISABLED_BY_DEFAULT("toplevel.ipc")) \
X(TRACE_DISABLED_BY_DEFAULT("user_action_samples")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.compile")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler")) \
- X(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.gc")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats")) \
@@ -236,7 +238,7 @@
X(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")) \
- X(TRACE_DISABLED_BY_DEFAULT("v8.wasm")) \
+ X(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed")) \
X(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.debug.overlay_planes")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.hit_testing_flow")) \
@@ -246,6 +248,7 @@
X(TRACE_DISABLED_BY_DEFAULT("viz.surface_lifetime")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.triangles")) \
X(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode")) \
+ X(TRACE_DISABLED_BY_DEFAULT("webrtc")) \
X(TRACE_DISABLED_BY_DEFAULT("worker.scheduler"))
#define INTERNAL_TRACE_INIT_CATEGORY_NAME(name) name,
diff --git a/chromium/base/trace_event/category_registry.cc b/chromium/base/trace_event/category_registry.cc
index 691336f8707..27c77740358 100644
--- a/chromium/base/trace_event/category_registry.cc
+++ b/chromium/base/trace_event/category_registry.cc
@@ -6,6 +6,7 @@
#include <string.h>
+#include <ostream>
#include <type_traits>
#include "base/check.h"
diff --git a/chromium/base/trace_event/category_registry.h b/chromium/base/trace_event/category_registry.h
index a6439d94595..cd95ba8f547 100644
--- a/chromium/base/trace_event/category_registry.h
+++ b/chromium/base/trace_event/category_registry.h
@@ -10,7 +10,7 @@
#include "base/atomicops.h"
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/stl_util.h"
#include "base/trace_event/builtin_categories.h"
#include "base/trace_event/common/trace_event_common.h"
diff --git a/chromium/base/trace_event/etw_manifest/BUILD.gn b/chromium/base/trace_event/etw_manifest/BUILD.gn
deleted file mode 100644
index a66fef9e3c8..00000000000
--- a/chromium/base/trace_event/etw_manifest/BUILD.gn
+++ /dev/null
@@ -1,27 +0,0 @@
-# Copyright 2015 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import("//build/win/message_compiler.gni")
-
-assert(is_win, "This only runs on Windows.")
-
-message_compiler("chrome_events_win") {
- visibility = [
- "//base/*",
- "//chrome:chrome_dll",
- ]
-
- sources = [ "chrome_events_win.man" ]
-
- user_mode_logging = true
-
- # The only code generated from chrome_events_win.man is a header file that
- # is included by trace_event_etw_export_win.cc, so there is no need to
- # compile any generated code. The other thing which compile_generated_code
- # controls in this context is linking in the .res file generated from the
- # manifest. However this is only needed for ETW provider registration which
- # is done by UIforETW (https://github.com/google/UIforETW) and therefore the
- # manifest resource can be skipped in Chrome.
- compile_generated_code = false
-}
diff --git a/chromium/base/trace_event/etw_manifest/chrome_events_win.man b/chromium/base/trace_event/etw_manifest/chrome_events_win.man
deleted file mode 100644
index 489d16720aa..00000000000
--- a/chromium/base/trace_event/etw_manifest/chrome_events_win.man
+++ /dev/null
@@ -1,95 +0,0 @@
-<?xml version='1.0' encoding='utf-8' standalone='yes'?>
-<assembly
- xmlns="urn:schemas-microsoft-com:asm.v3"
- xmlns:xsd="http://www.w3.org/2001/XMLSchema"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- manifestVersion="1.0"
- >
- <assemblyIdentity
- buildType="$(build.buildType)"
- language="neutral"
- name="Chrome.ETW"
- processorArchitecture="$(build.arch)"
- publicKeyToken="$(Build.WindowsPublicKeyToken)"
- version="$(build.version)"
- versionScope="nonSxS"
- />
- <instrumentation
- xmlns:win="http://manifests.microsoft.com/win/2004/08/windows/events"
- buildFilter="not build.isWow"
- >
- <events xmlns="http://schemas.microsoft.com/win/2004/08/events">
- <provider
- guid="{D2D578D9-2936-45B6-A09f-30E32715F42D}"
- messageFileName="chrome.dll"
- name="Chrome"
- resourceFileName="chrome.dll"
- symbol="CHROME"
- >
- <channels>
- <importChannel
- chid="SYSTEM"
- name="System"
- />
- </channels>
- <templates>
- <template tid="tid_chrome_event">
- <data
- inType="win:AnsiString"
- name="Name"
- />
- <data
- inType="win:AnsiString"
- name="Phase"
- />
- <data
- inType="win:AnsiString"
- name="Arg Name 1"
- />
- <data
- inType="win:AnsiString"
- name="Arg Value 1"
- />
- <data
- inType="win:AnsiString"
- name="Arg Name 2"
- />
- <data
- inType="win:AnsiString"
- name="Arg Value 2"
- />
- <data
- inType="win:AnsiString"
- name="Arg Name 3"
- />
- <data
- inType="win:AnsiString"
- name="Arg Value 3"
- />
- </template>
- </templates>
- <events>
- <event
- channel="SYSTEM"
- level="win:Informational"
- message="$(string.ChromeEvent.EventMessage)"
- opcode="win:Info"
- symbol="ChromeEvent"
- template="tid_chrome_event"
- value="1"
- />
- </events>
- </provider>
- </events>
- </instrumentation>
- <localization>
- <resources culture="en-US">
- <stringTable>
- <string
- id="ChromeEvent.EventMessage"
- value="Chrome Event: %1 (%2)"
- />
- </stringTable>
- </resources>
- </localization>
-</assembly>
diff --git a/chromium/base/trace_event/features.gni b/chromium/base/trace_event/features.gni
deleted file mode 100644
index 7d6bb2a8509..00000000000
--- a/chromium/base/trace_event/features.gni
+++ /dev/null
@@ -1,12 +0,0 @@
-# Copyright 2020 The Chromium Authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-# Features used by //base/trace_event and //services/tracing.
-declare_args() {
- # Switches the TRACE_EVENT instrumentation from base's TraceLog implementation
- # to //third_party/perfetto's client library. Not implemented yet, currently a
- # no-op to set up trybot infrastructure.
- # TODO(eseckler): Implement.
- use_perfetto_client_library = false
-}
diff --git a/chromium/base/trace_event/memory_allocator_dump.h b/chromium/base/trace_event/memory_allocator_dump.h
index 4999e85f560..b11e239b2a1 100644
--- a/chromium/base/trace_event/memory_allocator_dump.h
+++ b/chromium/base/trace_event/memory_allocator_dump.h
@@ -13,7 +13,6 @@
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
diff --git a/chromium/base/trace_event/memory_dump_manager.cc b/chromium/base/trace_event/memory_dump_manager.cc
index 0490e364a46..240b7af3bb0 100644
--- a/chromium/base/trace_event/memory_dump_manager.cc
+++ b/chromium/base/trace_event/memory_dump_manager.cc
@@ -16,6 +16,7 @@
#include "base/command_line.h"
#include "base/debug/alias.h"
#include "base/debug/stack_trace.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/sequenced_task_runner.h"
#include "base/strings/string_util.h"
diff --git a/chromium/base/trace_event/memory_infra_background_allowlist.cc b/chromium/base/trace_event/memory_infra_background_allowlist.cc
index 7d1e5744c15..dfaf6271f1f 100644
--- a/chromium/base/trace_event/memory_infra_background_allowlist.cc
+++ b/chromium/base/trace_event/memory_infra_background_allowlist.cc
@@ -26,6 +26,7 @@ const char* const kDumpProviderAllowlist[] = {
"BlinkGC",
"BlinkObjectCounters",
"BlobStorageContext",
+ "Canvas",
"ClientDiscardableSharedMemoryManager",
"DevTools",
"DiscardableSharedMemoryManager",
@@ -95,6 +96,8 @@ const char* const kAllocatorDumpNameAllowlist[] = {
"blink_objects/WorkerGlobalScope",
"blink_objects/UACSSResource",
"blink_objects/ResourceFetcher",
+ "canvas/ResourceProvider/SkSurface",
+ "canvas/ResourceProvider/SkSurface/0x?",
"components/download/controller_0x?",
"devtools/file_watcher_0x?",
"discardable",
diff --git a/chromium/base/trace_event/process_memory_dump.cc b/chromium/base/trace_event/process_memory_dump.cc
index 021e9862e21..2b095661230 100644
--- a/chromium/base/trace_event/process_memory_dump.cc
+++ b/chromium/base/trace_event/process_memory_dump.cc
@@ -8,6 +8,7 @@
#include <vector>
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
diff --git a/chromium/base/trace_event/trace_config.cc b/chromium/base/trace_event/trace_config.cc
index 5b4493f1bd6..14a670647fa 100644
--- a/chromium/base/trace_event/trace_config.cc
+++ b/chromium/base/trace_event/trace_config.cc
@@ -11,6 +11,7 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
+#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_split.h"
#include "base/trace_event/memory_dump_manager.h"
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.cc b/chromium/base/trace_event/trace_event_etw_export_win.cc
index 680cdbdc027..cf4383fe031 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.cc
+++ b/chromium/base/trace_event/trace_event_etw_export_win.cc
@@ -4,44 +4,23 @@
#include "base/trace_event/trace_event_etw_export_win.h"
+#include <evntrace.h>
+#include <guiddef.h>
#include <stddef.h>
+#include <stdlib.h>
+#include <windows.h>
#include "base/at_exit.h"
#include "base/check_op.h"
#include "base/command_line.h"
+#include "base/logging.h"
#include "base/memory/singleton.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
-
-#include <windows.h>
-
-// The GetProcAddress technique is borrowed from
-// https://github.com/google/UIforETW/tree/master/ETWProviders
-//
-// EVNTAPI is used in evntprov.h which is included by chrome_events_win.h.
-// We define EVNTAPI without the DECLSPEC_IMPORT specifier so that we can
-// implement these functions locally instead of using the import library, and
-// can therefore still run on Windows XP.
-#define EVNTAPI __stdcall
-// Include the event register/write/unregister macros compiled from the manifest
-// file. Note that this includes evntprov.h which requires a Vista+ Windows SDK.
-//
-// In SHARED_INTERMEDIATE_DIR.
-
-// Headers generated by mc.exe have a ';' at the end of extern "C" {} blocks.
-#if defined(__clang__)
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wextra-semi"
-#endif
-
-#include "base/trace_event/etw_manifest/chrome_events_win.h" // NOLINT
-
-#if defined(__clang__)
-#pragma clang diagnostic pop
-#endif
+#include "base/trace_event/trace_logging_minimal_win.h"
namespace {
@@ -51,28 +30,62 @@ namespace {
// one bit per category. We can therefore enable a particular category by
// setting its corresponding bit in the keyword. For events that are not present
// in |kFilteredEventGroupNames|, we have two bits that control their
-// behaviour. When bit 61 is enabled, any event that is not disabled by default
+// behaviour. When bit 46 is enabled, any event that is not disabled by default
// (ie. doesn't start with disabled-by-default-) will be exported. Likewise,
-// when bit 62 is enabled, any event that is disabled by default will be
+// when bit 47 is enabled, any event that is disabled by default will be
// exported.
//
-// Note that bit 63 (MSB) must always be set, otherwise tracing will be disabled
-// by ETW. Therefore, the keyword will always be greater than
-// 0x8000000000000000.
-//
// Examples of passing keywords to the provider using xperf:
// # This exports "benchmark" and "cc" events
-// xperf -start chrome -on Chrome:0x8000000000000009
+// xperf -start chrome -on Chrome:0x9
//
// # This exports "gpu", "netlog" and all other events that are not disabled by
// # default
-// xperf -start chrome -on Chrome:0xA0000000000000A0
+// xperf -start chrome -on Chrome:0x4000000000A0
//
// More info about starting a trace and keyword can be obtained by using the
// help section of xperf (xperf -help start). Note that xperf documentation
// refers to keywords as flags and there are two ways to enable them, using
// group names or the hex representation. We only support the latter. Also, we
// ignore the level.
+//
+// To avoid continually having to bump MSEdge values to next higher bits, we
+// are putting MSEdge values at the high end of the bit range and will grow
+// 'down' to lower bits for future MSEdge entries.
+//
+// As the writing of this comment, we have 4 values:
+// "navigation", // 0x40000000000
+// "ServiceWorker", // 0x80000000000
+// "edge_webview", // 0x100000000000
+// "diagnostic_event", // 0x200000000000
+//
+// This means the next value added should be:
+// "the_next_value", // 0x20000000000
+// "navigation", // 0x40000000000
+// "ServiceWorker", // 0x80000000000
+// "edge_webview", // 0x100000000000
+// "diagnostic_event", // 0x200000000000
+//
+// The addition of the "unused_bit_nn" entries keeps the existing code execution
+// routines working (ex. TraceEventETWExport::UpdateEnabledCategories()) and
+// enables others to see which bits are available.
+//
+// Example: For some new category group...
+// "latency", // 0x8000
+// "blink.user_timing", // 0x10000
+// "unused_bit_18", // 0x20000
+// "unused_bit_19", // 0x40000
+// "unused_bit_20", // 0x80000
+// ...
+// becomes:
+// "latency", // 0x8000
+// "blink.user_timing", // 0x10000
+// "new_upstream_value", // 0x20000
+// "unused_bit_19", // 0x40000
+// "unused_bit_20", // 0x80000
+//
+// The high 16 bits of the keyword have special semantics and should not be
+// set for enabling individual categories as they are reserved by winmeta.xml.
const char* const kFilteredEventGroupNames[] = {
"benchmark", // 0x1
"blink", // 0x2
@@ -93,13 +106,44 @@ const char* const kFilteredEventGroupNames[] = {
"blink.user_timing", // 0x10000
"media", // 0x20000
"loading", // 0x40000
+ "unused_bit_19", // 0x80000
+ "unused_bit_20", // 0x100000
+ "unused_bit_21", // 0x200000
+ "unused_bit_22", // 0x400000
+ "unused_bit_23", // 0x800000
+ "unused_bit_24", // 0x1000000
+ "unused_bit_25", // 0x2000000
+ "unused_bit_26", // 0x4000000
+ "unused_bit_27", // 0x8000000
+ "unused_bit_28", // 0x10000000
+ "unused_bit_29", // 0x20000000
+ "unused_bit_30", // 0x40000000
+ "unused_bit_31", // 0x80000000
+ "unused_bit_32", // 0x100000000
+ "unused_bit_33", // 0x200000000
+ "unused_bit_34", // 0x400000000
+ "unused_bit_35", // 0x800000000
+ "unused_bit_36", // 0x1000000000
+ "unused_bit_37", // 0x2000000000
+ "unused_bit_38", // 0x4000000000
+ "unused_bit_39", // 0x8000000000
+ "unused_bit_40", // 0x10000000000
+ "unused_bit_41", // 0x20000000000
+ "navigation", // 0x40000000000
+ "ServiceWorker", // 0x80000000000
+ "edge_webview", // 0x100000000000
+ "diagnostic_event", // 0x200000000000
+ "__OTHER_EVENTS", // 0x400000000000 See below
+ "__DISABLED_OTHER_EVENTS", // 0x800000000000 See below
};
-const char kOtherEventsGroupName[] = "__OTHER_EVENTS"; // 0x2000000000000000
-const char kDisabledOtherEventsGroupName[] =
- "__DISABLED_OTHER_EVENTS"; // 0x4000000000000000
-const uint64_t kOtherEventsKeywordBit = 1ULL << 61;
-const uint64_t kDisabledOtherEventsKeywordBit = 1ULL << 62;
-const size_t kNumberOfCategories = ARRAYSIZE(kFilteredEventGroupNames) + 2U;
+
+// These must be kept as the last two entries in the above array.
+constexpr uint8_t kOtherEventsGroupNameIndex = 46;
+constexpr uint8_t kDisabledOtherEventsGroupNameIndex = 47;
+
+// Max number of available keyword bits.
+constexpr size_t kMaxNumberOfGroupNames = 48;
+uint64_t g_callback_match_any_keyword = 0;
static void __stdcall EtwEnableCallback(LPCGUID SourceId,
ULONG ControlCode,
@@ -108,10 +152,19 @@ static void __stdcall EtwEnableCallback(LPCGUID SourceId,
ULONGLONG MatchAllKeyword,
PEVENT_FILTER_DESCRIPTOR FilterData,
PVOID CallbackContext) {
- // Invoke the default callback, which updates the information inside
- // CHROME_Context.
- McGenControlCallbackV2(SourceId, ControlCode, Level, MatchAnyKeyword,
- MatchAllKeyword, FilterData, CallbackContext);
+ // This callback is called in the context of an ETW OS thread to
+ // inform the process of the global state of the level and keyword
+ // across all sessions for this provider. We need to update the
+ // local keywords so we log the corresponding events. Protect the
+ // upper 16 bits reserved by winmeta.xml as they should not be used
+ // but older logging code and tools incorrectly used them.
+ g_callback_match_any_keyword = MatchAnyKeyword;
+ g_callback_match_any_keyword &= ~0xFFFF000000000000;
+
+ DVLOG(1) << "ETW Keyword"
+ << " Bits enabled in global context: " << std::hex << MatchAnyKeyword
+ << " Bits enabled in our code: " << std::hex
+ << g_callback_match_any_keyword;
base::trace_event::TraceEventETWExport::OnETWEnableUpdate();
}
@@ -123,31 +176,41 @@ namespace trace_event {
bool TraceEventETWExport::is_registration_complete_ = false;
-TraceEventETWExport::TraceEventETWExport() : etw_match_any_keyword_(0ULL) {
- // Register the ETW provider. If registration fails then the event logging
- // calls will fail. We're essentially doing the same operation as
- // EventRegisterChrome (which was auto generated for our provider by the
- // ETW manifest compiler), but instead we're passing our own callback.
+TraceEventETWExport::TraceEventETWExport() {
+ // Construct the ETW provider. If construction fails then the event logging
+ // calls will fail. We're passing a callback function as part of registration.
// This allows us to detect changes to enable/disable/keyword changes.
- // ChromeHandle and the other parameters to EventRegister are all generated
- // globals from chrome_events_win.h
- DCHECK(!ChromeHandle);
- EventRegister(&CHROME, &EtwEnableCallback, &CHROME_Context, &ChromeHandle);
+
+ // This GUID is the used to identify the Chrome provider and is used whenever
+ // ETW is enabled via tracing tools and cannot change without updating tools
+ // that collect Chrome ETW data.
+ static const GUID Chrome_GUID = {
+ 0xD2D578D9,
+ 0x2936,
+ 0x45B6,
+ {0xA0, 0x9F, 0x30, 0xE3, 0x27, 0x15, 0xF4, 0x2D}};
+
+ etw_provider_ = std::make_unique<TlmProvider>("Google.Chrome", Chrome_GUID,
+ &EtwEnableCallback);
TraceEventETWExport::is_registration_complete_ = true;
// Make sure to initialize the map with all the group names. Subsequent
// modifications will be made by the background thread and only affect the
// values of the keys (no key addition/deletion). Therefore, the map does not
// require a lock for access.
- for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++)
+ // Also set up the map from category name to keyword.
+ for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
+ uint64_t keyword = 1ULL << i;
categories_status_[kFilteredEventGroupNames[i]] = false;
- categories_status_[kOtherEventsGroupName] = false;
- categories_status_[kDisabledOtherEventsGroupName] = false;
- DCHECK_EQ(kNumberOfCategories, categories_status_.size());
+ categories_keyword_[kFilteredEventGroupNames[i]] = keyword;
+ }
+ // Make sure we stay at 48 entries, the maximum number of bits available
+ // for keyword use.
+ static_assert(ARRAYSIZE(kFilteredEventGroupNames) <= kMaxNumberOfGroupNames,
+ "Exceeded max ETW keyword bits");
}
TraceEventETWExport::~TraceEventETWExport() {
- EventUnregisterChrome();
is_registration_complete_ = false;
}
@@ -163,6 +226,52 @@ void TraceEventETWExport::EnableETWExport() {
}
// static
+uint64_t TraceEventETWExport::CategoryGroupToKeyword(
+ const uint8_t* category_state) {
+ uint64_t keyword = 0;
+
+ // To enable multiple sessions with this provider enabled we need to log the
+ // level and keyword with the event so that if the sessions differ in the
+ // level or keywords enabled we log the right events and allow ETW to
+ // route the data to the appropriate session.
+ // TODO(joel@microsoft.com) Explore better methods in future integration
+ // with perfetto.
+
+ auto* instance = GetInstance();
+ if (!instance)
+ return keyword;
+
+ // Add in the keyword for the special bits if they are set.
+ if (instance->categories_status_
+ [kFilteredEventGroupNames[kOtherEventsGroupNameIndex]]) {
+ keyword |= instance->categories_keyword_
+ [kFilteredEventGroupNames[kOtherEventsGroupNameIndex]];
+ }
+ if (instance->categories_status_
+ [kFilteredEventGroupNames[kDisabledOtherEventsGroupNameIndex]]) {
+ keyword |=
+ instance->categories_keyword_
+ [kFilteredEventGroupNames[kDisabledOtherEventsGroupNameIndex]];
+ }
+ // Add in the keyword for the categories specified at the logging site.
+ const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
+ StringPiece category_group_name = category->name();
+
+ CStringTokenizer category_group_tokens(category_group_name.begin(),
+ category_group_name.end(), ",");
+ while (category_group_tokens.GetNext()) {
+ StringPiece category_group_token = category_group_tokens.token_piece();
+
+ // Lookup the keyword for this part of the category_group_name
+ // and or in the keyword.
+ auto it = instance->categories_keyword_.find(category_group_token);
+ if (it != instance->categories_keyword_.end())
+ keyword |= it->second;
+ }
+ return keyword;
+}
+
+// static
void TraceEventETWExport::AddEvent(char phase,
const unsigned char* category_group_enabled,
const char* name,
@@ -170,10 +279,14 @@ void TraceEventETWExport::AddEvent(char phase,
const TraceArguments* args) {
// We bail early in case exporting is disabled or no consumer is listening.
auto* instance = GetInstance();
- if (!instance || !EventEnabledChromeEvent())
+ uint64_t keyword = CategoryGroupToKeyword(category_group_enabled);
+ if (!instance ||
+ !instance->etw_provider_->IsEnabled(TRACE_LEVEL_NONE, keyword)) {
return;
+ }
const char* phase_string = nullptr;
+
// Space to store the phase identifier and null-terminator, when needed.
char phase_buffer[2];
switch (phase) {
@@ -257,30 +370,54 @@ void TraceEventETWExport::AddEvent(char phase,
}
}
- EventWriteChromeEvent(
- name, phase_string, num_args > 0 ? args->names()[0] : "",
- arg_values_string[0].c_str(), num_args > 1 ? args->names()[1] : "",
- arg_values_string[1].c_str(), "", "");
+ // Log the event and include the info needed to decode it via TraceLogging
+ if (num_args == 0) {
+ instance->etw_provider_->WriteEvent(
+ name, TlmEventDescriptor(0, keyword),
+ TlmMbcsStringField("Phase", phase_string));
+ } else if (num_args == 1) {
+ instance->etw_provider_->WriteEvent(
+ name, TlmEventDescriptor(0, keyword),
+ TlmMbcsStringField("Phase", phase_string),
+ TlmMbcsStringField((args->names()[0]), (arg_values_string[0].c_str())));
+ } else if (num_args == 2) {
+ instance->etw_provider_->WriteEvent(
+ name, TlmEventDescriptor(0, keyword),
+ TlmMbcsStringField("Phase", phase_string),
+ TlmMbcsStringField((args->names()[0]), (arg_values_string[0].c_str())),
+ TlmMbcsStringField((args->names()[1]), (arg_values_string[1].c_str())));
+ } else {
+ NOTREACHED();
+ }
}
// static
-void TraceEventETWExport::AddCompleteEndEvent(const char* name) {
+void TraceEventETWExport::AddCompleteEndEvent(
+ const unsigned char* category_group_enabled,
+ const char* name) {
auto* instance = GetInstance();
- if (!instance || !EventEnabledChromeEvent())
+ uint64_t keyword = CategoryGroupToKeyword(category_group_enabled);
+ if (!instance ||
+ !instance->etw_provider_->IsEnabled(TRACE_LEVEL_NONE, keyword)) {
return;
+ }
- EventWriteChromeEvent(name, "Complete End", "", "", "", "", "", "");
+ // Log the event and include the info needed to decode it via TraceLogging
+ instance->etw_provider_->WriteEvent(
+ name, TlmEventDescriptor(0, keyword),
+ TlmMbcsStringField("Phase", "Complete End"));
}
// static
bool TraceEventETWExport::IsCategoryGroupEnabled(
StringPiece category_group_name) {
DCHECK(!category_group_name.empty());
+
auto* instance = GetInstanceIfExists();
if (instance == nullptr)
return false;
- if (!EventEnabledChromeEvent())
+ if (!instance->etw_provider_->IsEnabled())
return false;
CStringTokenizer category_group_tokens(category_group_name.begin(),
@@ -295,14 +432,15 @@ bool TraceEventETWExport::IsCategoryGroupEnabled(
}
bool TraceEventETWExport::UpdateEnabledCategories() {
- if (etw_match_any_keyword_ == CHROME_Context.MatchAnyKeyword)
+ if (etw_match_any_keyword_ == g_callback_match_any_keyword)
return false;
- // If the keyword has changed, update each category.
- // Chrome_Context.MatchAnyKeyword is set by UIforETW (or other ETW trace
- // recording tools) using the ETW infrastructure. This value will be set in
- // all Chrome processes that have registered their ETW provider.
- etw_match_any_keyword_ = CHROME_Context.MatchAnyKeyword;
+ // If the global keyword has changed, update each category. The global
+ // context is set by UIforETW (or other ETW trace recording tools)
+ // using the ETW infrastructure. When the global context changes the
+ // callback will be called to set the updated keyword bits in each
+ // browser process that has registered their ETW provider.
+ etw_match_any_keyword_ = g_callback_match_any_keyword;
for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
if (etw_match_any_keyword_ & (1ULL << i)) {
categories_status_[kFilteredEventGroupNames[i]] = true;
@@ -311,20 +449,6 @@ bool TraceEventETWExport::UpdateEnabledCategories() {
}
}
- // Also update the two default categories.
- if (etw_match_any_keyword_ & kOtherEventsKeywordBit) {
- categories_status_[kOtherEventsGroupName] = true;
- } else {
- categories_status_[kOtherEventsGroupName] = false;
- }
- if (etw_match_any_keyword_ & kDisabledOtherEventsKeywordBit) {
- categories_status_[kDisabledOtherEventsGroupName] = true;
- } else {
- categories_status_[kDisabledOtherEventsGroupName] = false;
- }
-
- DCHECK_EQ(kNumberOfCategories, categories_status_.size());
-
// Update the categories in TraceLog.
TraceLog::GetInstance()->UpdateETWCategoryGroupEnabledFlags();
@@ -332,7 +456,6 @@ bool TraceEventETWExport::UpdateEnabledCategories() {
}
bool TraceEventETWExport::IsCategoryEnabled(StringPiece category_name) const {
- DCHECK_EQ(kNumberOfCategories, categories_status_.size());
// Try to find the category and return its status if found
auto it = categories_status_.find(category_name);
if (it != categories_status_.end())
@@ -341,13 +464,19 @@ bool TraceEventETWExport::IsCategoryEnabled(StringPiece category_name) const {
// Otherwise return the corresponding default status by first checking if the
// category is disabled by default.
if (category_name.starts_with("disabled-by-default")) {
- DCHECK(categories_status_.find(kDisabledOtherEventsGroupName) !=
+ DCHECK(categories_status_.find(
+ kFilteredEventGroupNames[kDisabledOtherEventsGroupNameIndex]) !=
categories_status_.end());
- return categories_status_.find(kDisabledOtherEventsGroupName)->second;
+ return categories_status_
+ .find(kFilteredEventGroupNames[kDisabledOtherEventsGroupNameIndex])
+ ->second;
} else {
- DCHECK(categories_status_.find(kOtherEventsGroupName) !=
+ DCHECK(categories_status_.find(
+ kFilteredEventGroupNames[kOtherEventsGroupNameIndex]) !=
categories_status_.end());
- return categories_status_.find(kOtherEventsGroupName)->second;
+ return categories_status_
+ .find(kFilteredEventGroupNames[kOtherEventsGroupNameIndex])
+ ->second;
}
}
diff --git a/chromium/base/trace_event/trace_event_etw_export_win.h b/chromium/base/trace_event/trace_event_etw_export_win.h
index 0f853533d3a..8f5c8849860 100644
--- a/chromium/base/trace_event/trace_event_etw_export_win.h
+++ b/chromium/base/trace_event/trace_event_etw_export_win.h
@@ -7,13 +7,16 @@
#define BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
#include <stdint.h>
+#include <windows.h>
#include <map>
+#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
+#include "base/trace_event/trace_logging_minimal_win.h"
namespace base {
@@ -49,7 +52,8 @@ class BASE_EXPORT TraceEventETWExport {
const TraceArguments* args);
// Exports an ETW event that marks the end of a complete event.
- static void AddCompleteEndEvent(const char* name);
+ static void AddCompleteEndEvent(const unsigned char* category_group_enabled,
+ const char* name);
// Returns true if any category in the group is enabled.
static bool IsCategoryGroupEnabled(StringPiece category_group_name);
@@ -61,23 +65,31 @@ class BASE_EXPORT TraceEventETWExport {
private:
// Ensure only the provider can construct us.
friend struct StaticMemorySingletonTraits<TraceEventETWExport>;
-
TraceEventETWExport();
// Updates the list of enabled categories by consulting the ETW keyword.
// Returns true if there was a change, false otherwise.
bool UpdateEnabledCategories();
+ static uint64_t CategoryGroupToKeyword(const uint8_t* category_state);
+
// Returns true if the category is enabled.
bool IsCategoryEnabled(StringPiece category_name) const;
static bool is_registration_complete_;
+ // The keywords that were enabled last time the callback was made.
+ uint64_t etw_match_any_keyword_ = 0;
+
+ // The provider is set based on channel for MSEdge, in other Chromium
+ // based browsers all channels use the same GUID/provider.
+ std::unique_ptr<TlmProvider> etw_provider_;
+
// Maps category names to their status (enabled/disabled).
std::map<StringPiece, bool> categories_status_;
- // Local copy of the ETW keyword.
- uint64_t etw_match_any_keyword_;
+ // Maps category names to their keyword.
+ std::map<StringPiece, uint64_t> categories_keyword_;
DISALLOW_COPY_AND_ASSIGN(TraceEventETWExport);
};
diff --git a/chromium/base/trace_event/trace_event_impl.cc b/chromium/base/trace_event/trace_event_impl.cc
index c74d71c5dd1..dd902bd85d3 100644
--- a/chromium/base/trace_event/trace_event_impl.cc
+++ b/chromium/base/trace_event/trace_event_impl.cc
@@ -6,6 +6,8 @@
#include <stddef.h>
+#include <sstream>
+
#include "base/format_macros.h"
#include "base/json/string_escape.h"
#include "base/memory/ptr_util.h"
diff --git a/chromium/base/trace_event/trace_event_stub.cc b/chromium/base/trace_event/trace_event_stub.cc
new file mode 100644
index 00000000000..2b49d9c3ac4
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_stub.cc
@@ -0,0 +1,21 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/trace_event/trace_event_stub.h"
+
+namespace base {
+namespace trace_event {
+
+ConvertableToTraceFormat::~ConvertableToTraceFormat() = default;
+
+void TracedValue::AppendAsTraceFormat(std::string* out) const {}
+
+MemoryDumpProvider::~MemoryDumpProvider() = default;
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/trace_event/trace_event_stub.h b/chromium/base/trace_event/trace_event_stub.h
new file mode 100644
index 00000000000..b10e9498e61
--- /dev/null
+++ b/chromium/base/trace_event/trace_event_stub.h
@@ -0,0 +1,176 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_EVENT_STUB_H_
+#define BASE_TRACE_EVENT_TRACE_EVENT_STUB_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/strings/string_piece.h"
+#include "base/trace_event/common/trace_event_common.h"
+#include "base/trace_event/memory_allocator_dump_guid.h"
+#include "base/values.h"
+
+#define TRACE_STR_COPY(str) str
+#define TRACE_ID_WITH_SCOPE(scope, ...) 0
+#define TRACE_ID_GLOBAL(id) 0
+#define TRACE_ID_LOCAL(id) 0
+
+namespace trace_event_internal {
+
+const unsigned long long kNoId = 0;
+
+template <typename... Args>
+void Ignore(Args&&... args) {}
+
+struct IgnoredValue {
+ template <typename... Args>
+ IgnoredValue(Args&&... args) {}
+};
+
+} // namespace trace_event_internal
+
+#define INTERNAL_TRACE_IGNORE(...) \
+ (false ? trace_event_internal::Ignore(__VA_ARGS__) : (void)0)
+
+#define INTERNAL_TRACE_EVENT_ADD(...) INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED(...) INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(...) INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_TASK_EXECUTION(...) INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_LOG_MESSAGE(...) INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(...) \
+ INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP(...) \
+ INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS(...) \
+ INTERNAL_TRACE_IGNORE(__VA_ARGS__)
+
+#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
+ trace_event_internal::IgnoredValue
+
+#define TRACE_ID_MANGLE(val) (val)
+
+#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(cat) INTERNAL_TRACE_IGNORE(cat);
+#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() false
+
+#define TRACE_EVENT_API_CURRENT_THREAD_ID 0
+
+// Typed macros. For these, we have to erase the extra args entirely, as they
+// may include a lambda that refers to protozero message types (which aren't
+// available in the stub). This may trigger "unused variable" errors at the
+// callsite, which have to be addressed at the callsite (e.g. via
+// ignore_result()).
+#define TRACE_EVENT_BEGIN(category, name, ...) \
+ INTERNAL_TRACE_IGNORE(category, name)
+#define TRACE_EVENT_END(category, ...) INTERNAL_TRACE_IGNORE(category)
+#define TRACE_EVENT(category, name, ...) INTERNAL_TRACE_IGNORE(category, name)
+#define TRACE_EVENT_INSTANT(category, name, scope, ...) \
+ INTERNAL_TRACE_IGNORE(category, name, scope)
+
+namespace base {
+namespace trace_event {
+
+class BASE_EXPORT ConvertableToTraceFormat {
+ public:
+ ConvertableToTraceFormat() = default;
+ virtual ~ConvertableToTraceFormat();
+
+ // Append the class info to the provided |out| string. The appended
+ // data must be a valid JSON object. Strings must be properly quoted, and
+ // escaped. There is no processing applied to the content after it is
+ // appended.
+ virtual void AppendAsTraceFormat(std::string* out) const = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
+};
+
+class BASE_EXPORT TracedValue : public ConvertableToTraceFormat {
+ public:
+ explicit TracedValue(size_t capacity = 0) {}
+
+ void EndDictionary() {}
+ void EndArray() {}
+
+ void SetInteger(const char* name, int value) {}
+ void SetDouble(const char* name, double value) {}
+ void SetBoolean(const char* name, bool value) {}
+ void SetString(const char* name, base::StringPiece value) {}
+ void SetValue(const char* name, TracedValue* value) {}
+ void BeginDictionary(const char* name) {}
+ void BeginArray(const char* name) {}
+
+ void SetIntegerWithCopiedName(base::StringPiece name, int value) {}
+ void SetDoubleWithCopiedName(base::StringPiece name, double value) {}
+ void SetBooleanWithCopiedName(base::StringPiece name, bool value) {}
+ void SetStringWithCopiedName(base::StringPiece name,
+ base::StringPiece value) {}
+ void SetValueWithCopiedName(base::StringPiece name, TracedValue* value) {}
+ void BeginDictionaryWithCopiedName(base::StringPiece name) {}
+ void BeginArrayWithCopiedName(base::StringPiece name) {}
+
+ void AppendInteger(int) {}
+ void AppendDouble(double) {}
+ void AppendBoolean(bool) {}
+ void AppendString(base::StringPiece) {}
+ void BeginArray() {}
+ void BeginDictionary() {}
+
+ void AppendAsTraceFormat(std::string* out) const override;
+};
+
+class BASE_EXPORT TracedValueJSON : public TracedValue {
+ public:
+ explicit TracedValueJSON(size_t capacity = 0) : TracedValue(capacity) {}
+
+ std::unique_ptr<base::Value> ToBaseValue() const { return nullptr; }
+ std::string ToJSON() const { return ""; }
+ std::string ToFormattedJSON() const { return ""; }
+};
+
+class BASE_EXPORT BlameContext {
+ public:
+ BlameContext(const char* category,
+ const char* name,
+ const char* type,
+ const char* scope,
+ int64_t id,
+ const BlameContext* parent_context) {}
+
+ void Initialize() {}
+ void Enter() {}
+ void Leave() {}
+ void TakeSnapshot() {}
+
+ const char* category() const { return nullptr; }
+ const char* name() const { return nullptr; }
+ const char* type() const { return nullptr; }
+ const char* scope() const { return nullptr; }
+ int64_t id() const { return 0; }
+};
+
+struct MemoryDumpArgs;
+class ProcessMemoryDump;
+
+class BASE_EXPORT MemoryDumpProvider {
+ public:
+ virtual ~MemoryDumpProvider();
+
+ virtual bool OnMemoryDump(const MemoryDumpArgs& args,
+ ProcessMemoryDump* pmd) = 0;
+
+ protected:
+ MemoryDumpProvider() = default;
+
+ DISALLOW_COPY_AND_ASSIGN(MemoryDumpProvider);
+};
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TRACE_EVENT_STUB_H_
diff --git a/chromium/base/trace_event/trace_event_unittest.cc b/chromium/base/trace_event/trace_event_unittest.cc
index 7cdfe504b8c..70f1e1fa260 100644
--- a/chromium/base/trace_event/trace_event_unittest.cc
+++ b/chromium/base/trace_event/trace_event_unittest.cc
@@ -17,6 +17,7 @@
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
@@ -2317,7 +2318,7 @@ bool IsArgNameWhitelisted(const char* arg_name) {
return base::MatchPattern(arg_name, "granular_arg_whitelisted");
}
-bool IsTraceEventArgsWhitelisted(const char* category_group_name,
+bool IsTraceEventArgsAllowlisted(const char* category_group_name,
const char* event_name,
ArgumentNameFilterPredicate* arg_filter) {
if (base::MatchPattern(category_group_name, "toplevel") &&
@@ -2338,7 +2339,7 @@ bool IsTraceEventArgsWhitelisted(const char* category_group_name,
TEST_F(TraceEventTestFixture, ArgsWhitelisting) {
TraceLog::GetInstance()->SetArgumentFilterPredicate(
- base::BindRepeating(&IsTraceEventArgsWhitelisted));
+ base::BindRepeating(&IsTraceEventArgsAllowlisted));
TraceLog::GetInstance()->SetEnabled(
TraceConfig(kRecordAllCategoryFilter, "enable-argument-filter"),
diff --git a/chromium/base/trace_event/trace_log.cc b/chromium/base/trace_event/trace_log.cc
index c0d85ea90f1..ac2bc911840 100644
--- a/chromium/base/trace_event/trace_log.cc
+++ b/chromium/base/trace_event/trace_log.cc
@@ -16,6 +16,7 @@
#include "base/command_line.h"
#include "base/debug/leak_annotations.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted_memory.h"
@@ -172,7 +173,7 @@ void ForEachCategoryFilter(const unsigned char* category_group_enabled,
}
// The fallback arguments filtering function will filter away every argument.
-bool DefaultIsTraceEventArgsWhitelisted(
+bool DefaultIsTraceEventArgsAllowlisted(
const char* category_group_name,
const char* event_name,
base::trace_event::ArgumentNameFilterPredicate* arg_name_filter) {
@@ -993,7 +994,7 @@ void TraceLog::FinishFlush(int generation, bool discard_events) {
// use the safe default filtering predicate.
if (argument_filter_predicate_.is_null()) {
argument_filter_predicate =
- base::BindRepeating(&DefaultIsTraceEventArgsWhitelisted);
+ base::BindRepeating(&DefaultIsTraceEventArgsAllowlisted);
} else {
argument_filter_predicate = argument_filter_predicate_;
}
@@ -1488,7 +1489,7 @@ void TraceLog::UpdateTraceEventDurationExplicit(
#if defined(OS_WIN)
// Generate an ETW event that marks the end of a complete event.
if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT)
- TraceEventETWExport::AddCompleteEndEvent(name);
+ TraceEventETWExport::AddCompleteEndEvent(category_group_enabled, name);
#endif // OS_WIN
if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) {
diff --git a/chromium/base/trace_event/trace_logging_minimal_win.cc b/chromium/base/trace_event/trace_logging_minimal_win.cc
new file mode 100644
index 00000000000..6a346a0e281
--- /dev/null
+++ b/chromium/base/trace_event/trace_logging_minimal_win.cc
@@ -0,0 +1,351 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/trace_logging_minimal_win.h"
+
+#include <evntrace.h>
+
+#include "base/check_op.h"
+
+/*
+EventSetInformation configuration macros:
+
+TraceLogging works best if the EventSetInformation API can be used to notify
+ETW that the provider uses TraceLogging event encoding.
+
+The EventSetInformation API is available on Windows 8 and later. (It is also
+available on fully-patched Windows 7, but not on Windows 7 RTM).
+
+The TLM_HAVE_EVENT_SET_INFORMATION and TLM_EVENT_SET_INFORMATION macros can
+be set before compiling this file to control how the TlmProvider class deals
+with the EventSetInformation API.
+
+If these macros are not set, the default behavior is to check the WINVER
+macro at compile time:
+
+- If WINVER is set to Windows 7 or before, TlmProvider will use GetProcAddress
+ to locate EventSetInformation, and then invoke it if present. This is less
+ efficient, but works on older versions of Windows.
+- If WINVER is set to Windows 8 or later, TlmProvider will directly invoke
+ EventSetInformation. This is more efficient, but the resulting application
+ will only work correctly on newer versions of Windows.
+
+If you need to run on Windows 7 RTM, but for some reason need to set WINVER to
+Windows 8 or higher, you can override the default behavior by defining
+TLM_HAVE_EVENT_SET_INFORMATION=2 when compiling this file.
+
+Details:
+- The TLM_EVENT_SET_INFORMATION macro can be set the name of a replacement
+ function that TlmProvider should use instead of EventSetInformation.
+- The TLM_HAVE_EVENT_SET_INFORMATION macro can be set to 0 (disable the use of
+ EventSetInformation), 1 (directly invoke EventSetInformation), or 2 (try to
+ locate EventSetInformation via GetProcAddress, and invoke if found).
+*/
+
+// This code needs to run on Windows 7 and this is magic which
+// removes static linking to EventSetInformation
+#define TLM_HAVE_EVENT_SET_INFORMATION 2
+
+#ifndef TLM_EVENT_SET_INFORMATION
+#define TLM_EVENT_SET_INFORMATION EventSetInformation
+#ifndef TLM_HAVE_EVENT_SET_INFORMATION
+#if WINVER < 0x0602 || !defined(EVENT_FILTER_TYPE_SCHEMATIZED)
+// Find "EventSetInformation" via GetModuleHandleExW+GetProcAddress
+#define TLM_HAVE_EVENT_SET_INFORMATION 2
+#else
+// Directly invoke TLM_EVENT_SET_INFORMATION(...)
+#define TLM_HAVE_EVENT_SET_INFORMATION 1
+#endif
+#endif
+#elif !defined(TLM_HAVE_EVENT_SET_INFORMATION)
+// Directly invoke TLM_EVENT_SET_INFORMATION(...)
+#define TLM_HAVE_EVENT_SET_INFORMATION 1
+#endif
+
+TlmProvider::~TlmProvider() {
+ Unregister();
+}
+
+TlmProvider::TlmProvider(const char* provider_name,
+ const GUID& provider_guid,
+ PENABLECALLBACK enable_callback,
+ void* enable_callback_context) noexcept {
+ int32_t status = Register(provider_name, provider_guid, enable_callback,
+ enable_callback_context);
+ DCHECK_EQ(status, ERROR_SUCCESS);
+}
+
+// Appends a nul-terminated string to a metadata block.
+// Returns new meta_data_index value, or -1 for overflow.
+uint16_t TlmProvider::AppendNameToMetadata(char* metadata,
+ uint16_t metadata_size,
+ uint16_t metadata_index,
+ const char* name) const noexcept {
+ uint16_t index = metadata_index;
+ DCHECK_LE(index, metadata_size);
+
+ const size_t cch = strlen(name) + 1;
+ if (cch > static_cast<unsigned>(metadata_size - index)) {
+ index = -1;
+ } else {
+ memcpy(metadata + index, name, cch);
+ index += static_cast<uint16_t>(cch);
+ }
+
+ return index;
+}
+
+void TlmProvider::Unregister() noexcept {
+ if (reg_handle_ == 0)
+ return;
+
+ int32_t status = EventUnregister(reg_handle_);
+ DCHECK_EQ(status, ERROR_SUCCESS);
+ reg_handle_ = 0;
+ level_plus1_ = 0;
+}
+
+int32_t TlmProvider::Register(const char* provider_name,
+ const GUID& provider_guid,
+ PENABLECALLBACK enable_callback,
+ void* enable_callback_context) noexcept {
+ // Calling Register when already registered is a fatal error.
+ CHECK_EQ(reg_handle_, 0ULL);
+
+ // provider_metadata_ for tracelogging has the following format:
+ // UINT16 metadata_size;
+ // char NullTerminatedUtf8ProviderName[];
+ // ( + optional extension data, not used here)
+
+ // Append the provider name starting at offset 2 (skip MetadataSize).
+ provider_metadata_size_ = AppendNameToMetadata(
+ provider_metadata_, kMaxProviderMetadataSize, 2, provider_name);
+ if (provider_metadata_size_ > kMaxProviderMetadataSize) {
+ DCHECK_GT(provider_metadata_size_, kMaxProviderMetadataSize);
+ return ERROR_BUFFER_OVERFLOW;
+ }
+
+ // Fill in MetadataSize field at offset 0.
+ *reinterpret_cast<uint16_t*>(provider_metadata_) = provider_metadata_size_;
+
+ enable_callback_ = enable_callback;
+ enable_callback_context_ = enable_callback_context;
+ int32_t status =
+ EventRegister(&provider_guid, StaticEnableCallback, this, &reg_handle_);
+ if (status != ERROR_SUCCESS)
+ return status;
+
+#if TLM_HAVE_EVENT_SET_INFORMATION == 1
+
+ // Best-effort, ignore failure.
+ status =
+ TLM_EVENT_SET_INFORMATION(reg_handle_, EventProviderSetTraits,
+ provider_metadata_, provider_metadata_size_);
+
+#elif TLM_HAVE_EVENT_SET_INFORMATION == 2
+
+ HMODULE eventing_lib;
+ if (GetModuleHandleExW(0, L"api-ms-win-eventing-provider-l1-1-0.dll",
+ &eventing_lib) ||
+ GetModuleHandleExW(0, L"advapi32.dll", &eventing_lib)) {
+ typedef ULONG(WINAPI * PFEventSetInformation)(
+ REGHANDLE reg_handle, EVENT_INFO_CLASS information_class,
+ PVOID event_information, ULONG information_length);
+ PFEventSetInformation event_set_information_ptr =
+ reinterpret_cast<decltype(&::EventSetInformation)>(
+ GetProcAddress(eventing_lib, "EventSetInformation"));
+ if (event_set_information_ptr) {
+ // Best-effort, ignore failure.
+ status = event_set_information_ptr(reg_handle_, EventProviderSetTraits,
+ provider_metadata_,
+ provider_metadata_size_);
+ DCHECK_EQ(status, ERROR_SUCCESS);
+ }
+
+ FreeLibrary(eventing_lib);
+ }
+
+#else // TLM_HAVE_EVENT_SET_INFORMATION == 0
+
+ // Make no attempt to invoke EventSetInformation.
+
+#endif // TLM_HAVE_EVENT_SET_INFORMATION
+
+ return status;
+}
+
+bool TlmProvider::IsEnabled() const noexcept {
+ return 0 < level_plus1_;
+}
+
+bool TlmProvider::IsEnabled(uint8_t level) const noexcept {
+ return level < level_plus1_;
+}
+
+bool TlmProvider::IsEnabled(uint8_t level, uint64_t keyword) const noexcept {
+ return level < level_plus1_ && KeywordEnabled(keyword);
+}
+
+bool TlmProvider::IsEnabled(
+ const EVENT_DESCRIPTOR& event_descriptor) const noexcept {
+ return event_descriptor.Level < level_plus1_ &&
+ KeywordEnabled(event_descriptor.Keyword);
+}
+
+void TlmProvider::StaticEnableCallback(const GUID* source_id,
+ ULONG is_enabled,
+ UCHAR level,
+ ULONGLONG match_any_keyword,
+ ULONGLONG match_all_keyword,
+ PEVENT_FILTER_DESCRIPTOR filter_data,
+ PVOID callback_context) {
+ if (!callback_context)
+ return;
+
+ TlmProvider* pProvider = static_cast<TlmProvider*>(callback_context);
+ switch (is_enabled) {
+ case EVENT_CONTROL_CODE_DISABLE_PROVIDER:
+ pProvider->level_plus1_ = 0;
+ break;
+ case EVENT_CONTROL_CODE_ENABLE_PROVIDER:
+ pProvider->level_plus1_ =
+ level != 0 ? static_cast<unsigned>(level) + 1u : 256u;
+ pProvider->keyword_any_ = match_any_keyword;
+ pProvider->keyword_all_ = match_all_keyword;
+ break;
+ }
+
+ if (pProvider->enable_callback_) {
+ pProvider->enable_callback_(source_id, is_enabled, level, match_any_keyword,
+ match_all_keyword, filter_data,
+ pProvider->enable_callback_context_);
+ }
+}
+
+uint16_t TlmProvider::EventBegin(char* metadata,
+ const char* event_name) const noexcept {
+ // EventMetadata for tracelogging has the following format
+ // UINT16 MetadataSize;
+ // BYTE SpecialFlags[]; // Not used, so always size 1.
+ // char NullTerminatedUtf8EventName[];
+ // ( + field definitions)
+
+ uint16_t index = 2; // Skip MetadataSize field.
+
+ metadata[index] = 0; // Set SpecialFlags[0] = 0.
+ index++; // sizeof(SpecialFlags) == 1.
+
+ index =
+ AppendNameToMetadata(metadata, kMaxEventMetadataSize, index, event_name);
+ return index;
+}
+
+char TlmProvider::EventAddField(char* metadata,
+ uint16_t* metadata_index,
+ uint8_t in_type,
+ uint8_t out_type,
+ const char* field_name) const noexcept {
+ DCHECK_LT(in_type, 0x80);
+ DCHECK_LT(out_type, 0x80);
+
+ // FieldDefinition =
+ // char NullTerminatedUtf8FieldName[];
+ // BYTE InType;
+ // BYTE OutType; // Only present if high bit set in InType.
+ // ( + optional extension data not used here)
+
+ if (*metadata_index >= kMaxEventMetadataSize)
+ return 0;
+
+ *metadata_index = AppendNameToMetadata(metadata, kMaxEventMetadataSize,
+ *metadata_index, field_name);
+ if (*metadata_index >= kMaxEventMetadataSize)
+ return 0;
+
+ if (out_type == 0) {
+ // 1-byte encoding: inType + TlgOutNULL.
+ if (1 > kMaxEventMetadataSize - *metadata_index) {
+ *metadata_index = -1;
+ return 0;
+ }
+
+ metadata[*metadata_index] = in_type;
+ *metadata_index += 1;
+ return 0;
+ }
+ // 2-byte encoding: in_type + out_type.
+ if (kMaxEventMetadataSize - *metadata_index < 2) {
+ *metadata_index = -1;
+ return 0;
+ }
+
+ // Set high bit to indicate presence of OutType.
+ metadata[*metadata_index] = in_type | 0x80;
+ *metadata_index += 1;
+ metadata[*metadata_index] = out_type;
+ *metadata_index += 1;
+ return 0;
+}
+
+int32_t TlmProvider::EventEnd(
+ char* metadata,
+ uint16_t meta_data_index,
+ EVENT_DATA_DESCRIPTOR* descriptors,
+ uint32_t descriptors_index,
+ const EVENT_DESCRIPTOR& event_descriptor) const noexcept {
+ if (meta_data_index > kMaxEventMetadataSize) {
+ return ERROR_BUFFER_OVERFLOW;
+ }
+
+ // Fill in EventMetadata's MetadataSize field.
+ *reinterpret_cast<uint16_t*>(metadata) = meta_data_index;
+
+ descriptors[0].Ptr = reinterpret_cast<ULONG_PTR>(provider_metadata_);
+ descriptors[0].Size = provider_metadata_size_;
+ descriptors[0].Reserved = EVENT_DATA_DESCRIPTOR_TYPE_PROVIDER_METADATA;
+
+ descriptors[1].Ptr = reinterpret_cast<ULONG_PTR>(metadata);
+ descriptors[1].Size = meta_data_index;
+ descriptors[1].Reserved = EVENT_DATA_DESCRIPTOR_TYPE_EVENT_METADATA;
+
+ return EventWrite(reg_handle_, &event_descriptor, descriptors_index,
+ descriptors);
+}
+
+bool TlmProvider::KeywordEnabled(uint64_t keyword) const noexcept {
+ return keyword == 0 ||
+ ((keyword & keyword_any_) && (keyword & keyword_all_) == keyword_all_);
+}
+
+TlmMbcsStringField::TlmMbcsStringField(const char* name,
+ const char* value) noexcept
+ : TlmFieldBase(name), value_(value) {
+ DCHECK_NE(Name(), nullptr);
+ DCHECK_NE(value_, nullptr);
+}
+
+const char* TlmMbcsStringField::Value() const noexcept {
+ return value_;
+}
+
+void TlmMbcsStringField::FillEventDescriptor(
+ EVENT_DATA_DESCRIPTOR* descriptors) const noexcept {
+ EventDataDescCreate(&descriptors[0], value_, strlen(value_) + 1);
+}
+
+TlmUtf8StringField::TlmUtf8StringField(const char* name,
+ const char* value) noexcept
+ : TlmFieldBase(name), value_(value) {
+ DCHECK_NE(Name(), nullptr);
+ DCHECK_NE(value_, nullptr);
+}
+
+const char* TlmUtf8StringField::Value() const noexcept {
+ return value_;
+}
+
+void TlmUtf8StringField::FillEventDescriptor(
+ EVENT_DATA_DESCRIPTOR* descriptors) const noexcept {
+ EventDataDescCreate(&descriptors[0], value_, strlen(value_) + 1);
+}
diff --git a/chromium/base/trace_event/trace_logging_minimal_win.h b/chromium/base/trace_event/trace_logging_minimal_win.h
new file mode 100644
index 00000000000..e7b640e9f21
--- /dev/null
+++ b/chromium/base/trace_event/trace_logging_minimal_win.h
@@ -0,0 +1,393 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TRACE_LOGGING_MINIMAL_WIN_H_
+#define BASE_TRACE_EVENT_TRACE_LOGGING_MINIMAL_WIN_H_
+
+/*
+ * TraceLogging minimal dynamic provider
+ *
+ * TlmProvider is a simple class that implements an Event Tracing for Windows
+ * (ETW) provider that generates TraceLogging events with string fields. Unlike
+ * the Windows SDK's TraceLoggingProvider.h, this provider class supports
+ * runtime-variable settings for event name, level, keyword, and field name.
+ *
+ * Note that this approach is not recommended for general use. Support for
+ * runtime-variable settings is not normally needed, and it requires extra
+ * buffering as compared to the approach used by TraceLoggingProvider.h. It is
+ * needed in this case because we're trying to feed data from the existing call
+ * sites (which use a runtime-variable function-call syntax) into ETW. If this
+ * were new code, it would be better to update each call site to use a syntax
+ * compatible with compile-time event settings compatible with structured
+ * logging like TraceLoggingProvider.h.
+ */
+
+#include <stdint.h>
+#include <windows.h>
+// Evntprov.h must come after windows.h.
+#include <evntprov.h>
+// TODO(joel@microsoft.com) Update headers and use defined constants instead
+// of magic numbers after crbug.com/1089996 is resolved.
+
+/*
+ * An instance of TlmProvider represents a logger through which data can be
+ * sent to Event Tracing for Windows (ETW). This logger generates
+ * TraceLogging-encoded events (compatible with the events generated by the
+ * Windows SDK's TraceLoggingProvider.h header). In most cases, a developer
+ * would prefer using TraceLoggingProvider.h over TlmProvider
+ * (TraceLoggingProvider.h is more efficient and more full-featured), but
+ * TlmProvider allows for configuring the event parameters (event name,
+ * level, keyword, field names) at runtime (TraceLoggingProvider.h requires
+ * these to be set at compile time).
+ *
+ * Note that the Register/Unregister operations are relatively expensive, so
+ * the TlmProvider instance should be a long-lived variable (i.e. global
+ * variable, static variable, or field of a long-lived object), not a local
+ * variable andnot a field of a short-lived object.
+ *
+ * Note that provider name and provider GUID are a tightly-bound pair, i.e.
+ * they should each uniquely map to each other. Once a provider name and
+ * provider GUID have been used together, no other GUID should be used with
+ * that name and no other name should be used with that GUID. Normally this
+ * goal is achieved by using a hashing algorithm to generate the GUID from
+ * a hash of the name.
+ *
+ * Note that each event should use a non-zero level and a non-zero keyword.
+ * Predefined level constants are defined in <evntrace.h>: 0=Always,
+ * 1=Critical, 2=Error, 3=Warning, 4=Info, 5=Verbose (other level values can
+ * be used but are not well-defined and are not generally useful). A keyword
+ * is a bitmask of "category" bits, where each bit indicates whether or not
+ * the event belongs in a particular category of event. The low 48 bits are
+ * user-defined and the upper 16 bits are Microsoft-defined (in <winmeta.h>).
+ *
+ * General usage:
+ *
+ * // During component initialization (main or DllMain), call Register().
+ * // Note that there is an overload of the TlmProvider constructor that
+ * // calls Register(), but it's often convenient to do this manually
+ * // (i.e. to control the timing of the call to Register).
+ * my_provider.Register(
+ * "MyCompany.MyComponentName",
+ * MyComponentGuid);
+ *
+ * // To log an event with minimal code:
+ * my_provider.WriteEvent("MyEventName",
+ * TlmEventDescriptor(
+ * TRACE_LEVEL_VERBOSE, // Level defined in <evntrace.h>
+ * 0x20), // Keyword bits are user-defined.
+ * // Value must not be null for the string fields.
+ * TlmUtf8StringField("MyUtf8Field", GetValue1()),
+ * TlmMbcsStringField("MyAsciiField", GetValue2()));
+ *
+ * // Note that the minimal-code example has a bit of overhead, as it
+ * // will make the calls to GetValue1(), GetValue2(), and WriteEvent()
+ * // even if nobody is listening to the event. WriteEvent() will return
+ // immediately if nobody is listening, but there is still some
+ * // overhead. To minimize the overhead when nobody is listening,
+ * // add an extra IF condition:
+ * static const auto MyEventDescriptor = TlmEventDescriptor(
+ * TRACE_LEVEL_VERBOSE, // Level defined in <evntrace.h>
+ * 0x20); // Keyword bits are user-defined.
+ * if (my_provider.IsEnabled(MyEventDescriptor))
+ * {
+ * // The IF condition is primarily to prevent unnecessary
+ * // calls to GetValue1() and GetValue2().
+ * my_provider.WriteEvent("MyEventName",
+ * MyEventDescriptor,
+ * // Value must not be null for the string fields.
+ * TlmUtf8StringField("MyUtf8Field", GetValue1()),
+ * TlmMbcsStringField("MyAsciiField", GetValue2()));
+ * }
+ *
+ * // During component shutdown (main or DllMain), call Unregister().
+ * // Note that the TlmProvider destructor will also call
+ * // Unregister(), butit's often convenient to do this manually
+ * // (i.e. to control the timingof the call to Unregister).
+ * my_provider.Unregister();
+ */
+
+class TlmProvider {
+ public:
+ // Initialize a provider in the unregistered state.
+ // Note that WriteEvent and Unregister operations on an unregistered
+ // provider are safe no-ops.
+ constexpr TlmProvider() noexcept = default;
+
+ // Initializes a provider and attempts to register it.
+ // If there is an error, provider will be left unregistered.
+ // Note that WriteEvent and Unregister operations on an unregistered
+ // provider are safe no-ops.
+ TlmProvider(const char* provider_name,
+ const GUID& provider_guid,
+ PENABLECALLBACK enable_callback = nullptr,
+ void* enable_callback_context = nullptr) noexcept;
+
+ // If provider is registered, unregisters provider.
+ ~TlmProvider();
+
+ // Disable copy operations.
+ TlmProvider(const TlmProvider&) = delete;
+ TlmProvider& operator=(const TlmProvider&) = delete;
+
+ // Unregisters this provider.
+ // Calling Unregister on an unregistered provider is a safe no-op.
+ // Not thread safe - caller must ensure serialization between calls to
+ // Register() and calls to Unregister().
+ void Unregister() noexcept;
+
+ // Registers this provider. Returns Win32 error code or 0 for success.
+ // Error code is primarily for debugging and should generally be ignored
+ // in production (failure to register means Unregister and WriteEvent are
+ // safe no-ops.)
+ // Calling Register on an already-registered provider is a fatal error.
+ // Not thread safe - caller must ensure serialization between calls to
+ // Register() and calls to Unregister().
+ int32_t Register(const char* provider_name,
+ const GUID& provider_guid,
+ PENABLECALLBACK enable_callback = nullptr,
+ void* enable_callback_context = nullptr) noexcept;
+
+ // Returns true if any active trace listeners are interested in any events
+ // from this provider.
+ // Equivalent to IsEnabled(0, 0).
+ bool IsEnabled() const noexcept;
+
+ // Returns true if any active trace listeners are interested in events
+ // from this provider with the specified level.
+ // Equivalent to IsEnabled(level, 0).
+ bool IsEnabled(uint8_t level) const noexcept;
+
+ // Returns true if any active trace listeners are interested in events
+ // from this provider with the specified level and keyword.
+ bool IsEnabled(uint8_t level, uint64_t keyword) const noexcept;
+
+ // Returns true if any active trace listeners are interested in events
+ // from this provider with the specified level and keyword.
+ // Equivalent to IsEnabled(event_descriptor.level, event_descriptor.keyword).
+ bool IsEnabled(const EVENT_DESCRIPTOR& event_descriptor) const noexcept;
+
+ // If any active trace listeners are interested in events from this provider
+ // with the specified level and keyword, packs the data into an event and
+ // sends it to ETW. Returns Win32 error code or 0 for success.
+ template <class... FieldTys>
+ int32_t WriteEvent(const char* event_name,
+ const EVENT_DESCRIPTOR& event_descriptor,
+ const FieldTys&... event_fields) const noexcept {
+ if (!IsEnabled(event_descriptor)) {
+ // If nobody is listening, report success.
+ return 0;
+ }
+ // Pack the event metadata.
+ char metadata[kMaxEventMetadataSize];
+ uint16_t metadata_index;
+ metadata_index = EventBegin(metadata, event_name);
+ { // scope for dummy array (simulates a C++17 comma-fold expression)
+ bool dummy[sizeof...(FieldTys) == 0 ? 1 : sizeof...(FieldTys)] = {
+ EventAddField(metadata, &metadata_index, event_fields.in_type_,
+ event_fields.out_type_, event_fields.Name())...};
+ DCHECK(dummy);
+ }
+
+ // Pack the event data.
+ constexpr uint8_t kDescriptorsCount =
+ 2 + DataDescCountSum<FieldTys...>::value;
+ EVENT_DATA_DESCRIPTOR descriptors[kDescriptorsCount];
+ uint8_t descriptors_index = 2;
+ { // scope for dummy array (simulates a C++17 comma-fold expression)
+ bool dummy[sizeof...(FieldTys) == 0 ? 1 : sizeof...(FieldTys)] = {
+ EventDescriptorFill(descriptors, &descriptors_index,
+ event_fields)...};
+ DCHECK(dummy);
+ }
+
+ // Finalize event and call EventWrite.
+ return EventEnd(metadata, metadata_index, descriptors, descriptors_index,
+ event_descriptor);
+ }
+
+ private:
+ // Size of the buffer used for provider metadata (field within the
+ // TlmProvider object). Provider metadata consists of the nul-terminated
+ // provider name plus a few sizes and flags, so this buffer needs to be
+ // just a few bytes larger than the largest expected provider name.
+ static constexpr uint16_t kMaxProviderMetadataSize = 128;
+
+ // Size of the buffer used for event metadata (stack-allocated in the
+ // WriteEvent method). Event metadata consists of nul-terminated event
+ // name, nul-terminated field names, field types (1 or 2 bytes per field),
+ // and a few bytes for sizes and flags.
+ static constexpr uint16_t kMaxEventMetadataSize = 256;
+
+ template <class... FieldTys>
+ struct DataDescCountSum; // undefined
+
+ template <>
+ struct DataDescCountSum<> {
+ static constexpr uint8_t value = 0;
+ };
+
+ template <class FieldTy1, class... FieldTyRest>
+ struct DataDescCountSum<FieldTy1, FieldTyRest...> {
+ static constexpr uint8_t value =
+ FieldTy1::data_desc_count_ + DataDescCountSum<FieldTyRest...>::value;
+ };
+
+ template <class FieldTy>
+ static char EventDescriptorFill(EVENT_DATA_DESCRIPTOR* descriptors,
+ uint8_t* pdescriptors_index,
+ const FieldTy& event_field) noexcept {
+ event_field.FillEventDescriptor(&descriptors[*pdescriptors_index]);
+ *pdescriptors_index += FieldTy::data_desc_count_;
+ return 0;
+ }
+
+ // This is called from the OS, so use the required call type.
+ static void __stdcall StaticEnableCallback(
+ const GUID* source_id,
+ ULONG is_enabled,
+ UCHAR level,
+ ULONGLONG match_any_keyword,
+ ULONGLONG match_all_keyword,
+ PEVENT_FILTER_DESCRIPTOR FilterData,
+ PVOID callback_context);
+
+ // Returns initial value of metadata_index.
+ uint16_t EventBegin(char* metadata, const char* event_name) const noexcept;
+
+ char EventAddField(char* metadata,
+ uint16_t* metadata_index,
+ uint8_t in_type,
+ uint8_t out_type,
+ const char* field_name) const noexcept;
+
+ // Returns Win32 error code, or 0 for success.
+ int32_t EventEnd(char* metadata,
+ uint16_t metadata_index,
+ EVENT_DATA_DESCRIPTOR* descriptors,
+ uint32_t descriptors_index,
+ const EVENT_DESCRIPTOR& event_descriptor) const noexcept;
+
+ bool KeywordEnabled(uint64_t keyword) const noexcept;
+
+ uint16_t AppendNameToMetadata(char* metadata,
+ uint16_t metadata_size,
+ uint16_t metadata_index,
+ const char* name) const noexcept;
+
+ uint32_t level_plus1_ = 0;
+ uint32_t provider_metadata_size_ = 0;
+ uint64_t keyword_any_ = 0;
+ uint64_t keyword_all_ = 0;
+ uint64_t reg_handle_ = 0;
+ PENABLECALLBACK enable_callback_ = nullptr;
+ void* enable_callback_context_ = nullptr;
+ char provider_metadata_[kMaxProviderMetadataSize] = {};
+};
+
+// Base class for field types.
+template <uint8_t data_desc_count,
+ uint8_t in_type,
+ uint8_t out_type = 0> // Default out_type is TlgOutNULL
+class TlmFieldBase {
+ public:
+ constexpr const char* Name() const noexcept { return name_; }
+
+ protected:
+ explicit constexpr TlmFieldBase(const char* name) noexcept : name_(name) {}
+
+ private:
+ friend class TlmProvider;
+
+ static constexpr uint8_t data_desc_count_ = data_desc_count;
+ static constexpr uint8_t in_type_ = in_type;
+ static constexpr uint8_t out_type_ = out_type;
+
+ const char* name_;
+};
+
+// Class that represents an event field containing nul-terminated MBCS data.
+class TlmMbcsStringField
+ : public TlmFieldBase<1, 2> // 1 data descriptor, Type = TlgInANSISTRING
+{
+ public:
+ // name is a utf-8 nul-terminated string.
+ // value is MBCS nul-terminated string (assumed to be in system's default code
+ // page).
+ TlmMbcsStringField(const char* name, const char* value) noexcept;
+
+ const char* Value() const noexcept;
+
+ void FillEventDescriptor(EVENT_DATA_DESCRIPTOR* descriptors) const noexcept;
+
+ private:
+ const char* value_;
+};
+
+// Class that represents an event field containing nul-terminated UTF-8 data.
+class TlmUtf8StringField
+ : public TlmFieldBase<1, 2, 35> // 1 data descriptor, Type =
+ // TlgInANSISTRING + TlgOutUTF8
+{
+ public:
+ // name and value are utf-8 nul-terminated strings.
+ TlmUtf8StringField(const char* name, const char* value) noexcept;
+
+ const char* Value() const noexcept;
+
+ void FillEventDescriptor(EVENT_DATA_DESCRIPTOR* descriptors) const noexcept;
+
+ private:
+ const char* value_;
+};
+
+// Helper for creating event descriptors for use with WriteEvent.
+constexpr EVENT_DESCRIPTOR TlmEventDescriptor(uint8_t level,
+ uint64_t keyword) noexcept {
+ return {
+
+ // Id
+ // TraceLogging generally uses the event's Name instead of Id+Version,
+ // so Id is normally set to 0 for TraceLogging events.
+ 0,
+
+ // Version
+ // TraceLogging generally uses the event's Name instead of Id+Version,
+ // so Version is normally set to 0 for TraceLogging events.
+ 0,
+
+ // Channel (WINEVENT_CHANNEL_*)
+ // Setting Channel = 11 allows TraceLogging events to be decoded
+ // correctly even if they were collected on older operating systems.
+ // If a TraceLogging event sets channel to a value other than 11, the
+ // event will only decode correctly if it was collected on an
+ // operating system that has built-in TraceLogging support, i.e.
+ // Windows 7sp1 + patch, Windows 8.1 + patch, or Windows 10+.
+ 11, // = WINEVENT_CHANNEL_TRACELOGGING
+
+ // Level (WINEVENT_LEVEL_*)
+ // 0=always, 1=fatal, 2=error, 3=warning, 4=info, 5=verbose.
+ // Levels higher than 5 are for user-defined debug levels.
+ level,
+
+ // Opcode (WINEVENT_OPCODE_*)
+ // Set Opcode for special semantics such as starting/ending an
+ // activity.
+ 0, // = WINEVENT_OPCODE_INFO
+
+ // Task
+ // Set Task for user-defined semantics.
+ 0, // = WINEVENT_TASK_NONE
+
+ // Keyword
+ // A keyword is a 64-bit value used for filtering events. Each bit of
+ // the keyword indicates whether the event belongs to a particular
+ // category of events. The top 16 bits of keyword have
+ // Microsoft-defined semantics and should be set to 0. The low 48 bits
+ // of keyword have user-defined semantics. All events should use a
+ // nonzero keyword to support effective event filtering (events with
+ // keyword set to 0 always pass keyword filtering).
+ keyword};
+}
+
+#endif // BASE_TRACE_EVENT_TRACE_LOGGING_MINIMAL_WIN_H_ \ No newline at end of file
diff --git a/chromium/base/trace_event/typed_macros.h b/chromium/base/trace_event/typed_macros.h
new file mode 100644
index 00000000000..8b792c4d52b
--- /dev/null
+++ b/chromium/base/trace_event/typed_macros.h
@@ -0,0 +1,74 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TYPED_MACROS_H_
+#define BASE_TRACE_EVENT_TYPED_MACROS_H_
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/typed_macros_internal.h"
+#include "build/build_config.h"
+
+// Needed not for this file, but for every user of the TRACE_EVENT macros for
+// the lambda definition. So included here for convenience.
+#include "third_party/perfetto/include/perfetto/tracing/event_context.h"
+#include "third_party/perfetto/protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+#if defined(TRACE_EVENT_BEGIN)
+#error "Another copy of perfetto tracing macros have been included"
+#endif
+
+// This file implements typed event macros [1] that will be provided by the
+// Perfetto client library in the future, as a stop-gap to support typed trace
+// events in Chrome until we are ready to switch to the client library's
+// implementation of trace macros.
+// [1] https://perfetto.dev/docs/instrumentation/track-events
+// TODO(crbug/1006541): Replace this file with the Perfetto client library.
+
+// Begin a thread-scoped slice under |category| with the title |name|. Both
+// strings must be static constants. The track event is only recorded if
+// |category| is enabled for a tracing session.
+//
+// Rest of parameters can contain: a perfetto::Track object for asynchronous
+// events and a lambda used to fill typed event. Should be passed in that exact
+// order when both are used.
+//
+// When lambda is passed as an argument, it is executed synchronously.
+//
+// TODO(nuskos): Give a simple example once we have a typed event that doesn't
+// need interning.
+// TRACE_EVENT_BEGIN("log", "LogMessage",
+// [](perfetto::EventContext ctx) {
+// auto* event = ctx.event();
+// // Fill in some field in track_event.
+// });
+#define TRACE_EVENT_BEGIN(category, name, ...) \
+ TRACING_INTERNAL_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__)
+
+// End a thread-scoped slice under |category|.
+#define TRACE_EVENT_END(category, ...) \
+ TRACING_INTERNAL_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_END, category, \
+ trace_event_internal::kTraceEventEndName, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__)
+
+// Begin a thread-scoped slice which gets automatically closed when going out
+// of scope.
+//
+// BEWARE: similarly to TRACE_EVENT_BEGIN, this macro does accept a track, but
+// it does not work properly and should not be used.
+// TODO(b/154583431): figure out how to fix or disallow that and update the
+// comment.
+//
+// Similarly to TRACE_EVENT_BEGIN, when lambda is passed as an argument, it is
+// executed synchronously.
+#define TRACE_EVENT(category, name, ...) \
+ TRACING_INTERNAL_SCOPED_ADD_TRACE_EVENT(category, name, ##__VA_ARGS__)
+
+// Emit a single event called "name" immediately, with zero duration.
+#define TRACE_EVENT_INSTANT(category, name, scope, ...) \
+ TRACING_INTERNAL_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_INSTANT, category, name, \
+ scope, ##__VA_ARGS__)
+
+#endif // BASE_TRACE_EVENT_TYPED_MACROS_H_
diff --git a/chromium/base/trace_event/typed_macros_embedder_support.h b/chromium/base/trace_event/typed_macros_embedder_support.h
new file mode 100644
index 00000000000..d06c5b8a359
--- /dev/null
+++ b/chromium/base/trace_event/typed_macros_embedder_support.h
@@ -0,0 +1,65 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TYPED_MACROS_EMBEDDER_SUPPORT_H_
+#define BASE_TRACE_EVENT_TYPED_MACROS_EMBEDDER_SUPPORT_H_
+
+#include "base/base_export.h"
+#include "base/trace_event/trace_event.h"
+#include "third_party/perfetto/protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+namespace base {
+namespace trace_event {
+
+class BASE_EXPORT TrackEventHandle {
+ public:
+ using TrackEvent = perfetto::protos::pbzero::TrackEvent;
+
+ class BASE_EXPORT CompletionListener {
+ public:
+ // Implemented in typed_macros_internal.h.
+ virtual ~CompletionListener();
+ virtual void OnTrackEventCompleted() = 0;
+ };
+
+ // Creates a handle to |event| which notifies |listener| on the handle's
+ // destruction, i.e. after the event lambda has emitted any typed arguments
+ // into the event. Note that |listener| must outlive the TRACE_EVENT call,
+ // i.e. cannot be destroyed until OnTrackEventCompleted() is called. Ownership
+ // of both TrackEvent and the listener remains with the caller.
+ TrackEventHandle(TrackEvent* event, CompletionListener* listener)
+ : event_(event), listener_(listener) {}
+
+ // Creates an invalid handle.
+ TrackEventHandle() : TrackEventHandle(nullptr, nullptr) {}
+
+ ~TrackEventHandle() {
+ if (listener_)
+ listener_->OnTrackEventCompleted();
+ }
+
+ explicit operator bool() const { return event_; }
+ TrackEvent& operator*() const { return *event_; }
+ TrackEvent* operator->() const { return event_; }
+ TrackEvent* get() const { return event_; }
+
+ private:
+ TrackEvent* event_;
+ CompletionListener* listener_;
+};
+
+using PrepareTrackEventFunction = TrackEventHandle (*)(TraceEvent*);
+
+// Embedder should call this (only once) to set the callback invoked when a
+// typed event should be emitted. The callback function may be executed on any
+// thread. Implemented in typed_macros_internal.h.
+BASE_EXPORT void EnableTypedTraceEvents(
+ PrepareTrackEventFunction typed_event_callback);
+
+BASE_EXPORT void ResetTypedTraceEventsForTesting();
+
+} // namespace trace_event
+} // namespace base
+
+#endif // BASE_TRACE_EVENT_TYPED_MACROS_EMBEDDER_SUPPORT_H_
diff --git a/chromium/base/trace_event/typed_macros_internal.cc b/chromium/base/trace_event/typed_macros_internal.cc
new file mode 100644
index 00000000000..89825fde5cb
--- /dev/null
+++ b/chromium/base/trace_event/typed_macros_internal.cc
@@ -0,0 +1,95 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/typed_macros_internal.h"
+
+#include "base/optional.h"
+#include "base/time/time.h"
+#include "base/trace_event/thread_instruction_count.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/typed_macros.h"
+
+namespace {
+
+base::ThreadTicks ThreadNow() {
+ return base::ThreadTicks::IsSupported()
+ ? base::subtle::ThreadTicksNowIgnoringOverride()
+ : base::ThreadTicks();
+}
+
+base::trace_event::ThreadInstructionCount ThreadInstructionNow() {
+ return base::trace_event::ThreadInstructionCount::IsSupported()
+ ? base::trace_event::ThreadInstructionCount::Now()
+ : base::trace_event::ThreadInstructionCount();
+}
+
+base::trace_event::PrepareTrackEventFunction g_typed_event_callback = nullptr;
+
+} // namespace
+
+namespace base {
+namespace trace_event {
+
+void EnableTypedTraceEvents(PrepareTrackEventFunction typed_event_callback) {
+ g_typed_event_callback = typed_event_callback;
+}
+
+void ResetTypedTraceEventsForTesting() {
+ g_typed_event_callback = nullptr;
+}
+
+TrackEventHandle::CompletionListener::~CompletionListener() = default;
+
+} // namespace trace_event
+} // namespace base
+
+namespace trace_event_internal {
+
+base::trace_event::TrackEventHandle CreateTrackEvent(
+ char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ base::TimeTicks ts,
+ bool explicit_track) {
+ DCHECK(phase == TRACE_EVENT_PHASE_BEGIN || phase == TRACE_EVENT_PHASE_END ||
+ phase == TRACE_EVENT_PHASE_INSTANT);
+ DCHECK(category_group_enabled);
+
+ if (!g_typed_event_callback)
+ return base::trace_event::TrackEventHandle();
+
+ const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+ auto* trace_log = base::trace_event::TraceLog::GetInstance();
+ DCHECK(trace_log);
+ if (!trace_log->ShouldAddAfterUpdatingState(phase, category_group_enabled,
+ name, trace_event_internal::kNoId,
+ thread_id, nullptr)) {
+ return base::trace_event::TrackEventHandle();
+ }
+
+ if (ts.is_null()) {
+ ts = TRACE_TIME_TICKS_NOW();
+ } else {
+ flags |= TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP;
+ }
+
+ // Only emit thread time / instruction count for events on the default track
+ // without explicit timestamp.
+ base::ThreadTicks thread_now;
+ base::trace_event::ThreadInstructionCount thread_instruction_now;
+ if ((flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP) == 0 && !explicit_track) {
+ thread_now = ThreadNow();
+ thread_instruction_now = ThreadInstructionNow();
+ }
+
+ base::trace_event::TraceEvent event(
+ thread_id, ts, thread_now, thread_instruction_now, phase,
+ category_group_enabled, name, trace_event_internal::kGlobalScope,
+ trace_event_internal::kNoId, trace_event_internal::kNoId, nullptr, flags);
+
+ return g_typed_event_callback(&event);
+}
+
+} // namespace trace_event_internal
diff --git a/chromium/base/trace_event/typed_macros_internal.h b/chromium/base/trace_event/typed_macros_internal.h
new file mode 100644
index 00000000000..51e91b7e56e
--- /dev/null
+++ b/chromium/base/trace_event/typed_macros_internal.h
@@ -0,0 +1,176 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TRACE_EVENT_TYPED_MACROS_INTERNAL_H_
+#define BASE_TRACE_EVENT_TYPED_MACROS_INTERNAL_H_
+
+#include "base/base_export.h"
+#include "base/optional.h"
+#include "base/time/time.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/typed_macros_embedder_support.h"
+#include "build/build_config.h"
+#include "third_party/perfetto/include/perfetto/protozero/message_handle.h"
+#include "third_party/perfetto/include/perfetto/tracing/event_context.h"
+#include "third_party/perfetto/include/perfetto/tracing/track.h"
+#include "third_party/perfetto/protos/perfetto/trace/track_event/track_event.pbzero.h"
+
+// These macros should not be called directly. They are intended to be used by
+// macros in //base/trace_event/typed_macros.h only.
+
+#define TRACING_INTERNAL_CONCAT2(a, b) a##b
+#define TRACING_INTERNAL_CONCAT(a, b) TRACING_INTERNAL_CONCAT2(a, b)
+#define TRACING_INTERNAL_UID(prefix) TRACING_INTERNAL_CONCAT(prefix, __LINE__)
+
+#define TRACING_INTERNAL_ADD_TRACE_EVENT(phase, category, name, flags, ...) \
+ do { \
+ INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category); \
+ if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
+ trace_event_internal::AddTraceEvent( \
+ phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
+ flags, ##__VA_ARGS__); \
+ } \
+ } while (false)
+
+#define TRACING_INTERNAL_SCOPED_ADD_TRACE_EVENT(category, name, ...) \
+ struct { \
+ struct ScopedTraceEvent { \
+ /* The parameter is an implementation detail. It allows the */ \
+ /* anonymous struct to use aggregate initialization to invoke the */ \
+ /* lambda to emit the begin event with the proper reference capture */ \
+ /* for any TrackEventArgumentFunction in |__VA_ARGS__|. This is */ \
+ /* required so that the scoped event is exactly ONE line and can't */ \
+ /* escape the scope if used in a single line if statement. */ \
+ ScopedTraceEvent(...) {} \
+ ~ScopedTraceEvent() { \
+ /* TODO(nuskos): Remove the empty string passed as the |name| */ \
+ /* field. As described in macros.h we shouldn't need it in our */ \
+ /* end state. */ \
+ TRACING_INTERNAL_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_END, category, "", \
+ TRACE_EVENT_FLAG_NONE, \
+ [](perfetto::EventContext) {}); \
+ } \
+ } event; \
+ } TRACING_INTERNAL_UID(scoped_event){[&]() { \
+ TRACING_INTERNAL_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_BEGIN, category, name, \
+ TRACE_EVENT_FLAG_NONE, ##__VA_ARGS__); \
+ return 0; \
+ }()};
+
+namespace trace_event_internal {
+
+// Copy of function with the same name from Perfetto client library.
+template <typename T>
+static constexpr bool IsValidTraceLambdaImpl(
+ typename std::enable_if<static_cast<bool>(
+ sizeof(std::declval<T>()(std::declval<perfetto::EventContext>()),
+ 0))>::type* = nullptr) {
+ return true;
+}
+
+template <typename T>
+static constexpr bool IsValidTraceLambdaImpl(...) {
+ return false;
+}
+
+template <typename T>
+static constexpr bool IsValidTraceLambda() {
+ return IsValidTraceLambdaImpl<T>(nullptr);
+}
+
+// The perfetto client library does not use event names for
+// TRACE_EVENT_PHASE_END. However, TraceLog expects all TraceEvents to have
+// event names. So, until we move over to the client library, we will use this
+// empty name for all TRACE_EVENT_PHASE_END typed events.
+constexpr char kTraceEventEndName[] = "";
+
+base::trace_event::TrackEventHandle BASE_EXPORT
+CreateTrackEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ base::TimeTicks timestamp,
+ bool explicit_track);
+
+template <
+ typename TrackEventArgumentFunction = void (*)(perfetto::EventContext),
+ typename ArgumentFunctionCheck = typename std::enable_if<
+ IsValidTraceLambda<TrackEventArgumentFunction>()>::type>
+static inline void AddTraceEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ const perfetto::Track& track,
+ base::TimeTicks timestamp,
+ TrackEventArgumentFunction argument_func) {
+ base::trace_event::TrackEventHandle track_event =
+ CreateTrackEvent(phase, category_group_enabled, name, flags, timestamp,
+ track.uuid != perfetto::Track().uuid);
+ if (!track_event)
+ return;
+
+ if (track)
+ track_event->set_track_uuid(track.uuid);
+
+ argument_func(perfetto::EventContext(track_event.get()));
+}
+
+template <
+ typename TrackEventArgumentFunction = void (*)(perfetto::EventContext),
+ typename ArgumentFunctionCheck = typename std::enable_if<
+ IsValidTraceLambda<TrackEventArgumentFunction>()>::type,
+ typename TrackType,
+ typename TrackTypeCheck = typename std::enable_if<
+ std::is_convertible<TrackType, perfetto::Track>::value>::type>
+static inline void AddTraceEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ const TrackType& track,
+ TrackEventArgumentFunction argument_func) {
+ AddTraceEvent(phase, category_group_enabled, name, flags, track,
+ base::TimeTicks(), argument_func);
+}
+
+template <
+ typename TrackEventArgumentFunction = void (*)(perfetto::EventContext),
+ typename ArgumentFunctionCheck = typename std::enable_if<
+ IsValidTraceLambda<TrackEventArgumentFunction>()>::type>
+static inline void AddTraceEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ TrackEventArgumentFunction argument_func) {
+ AddTraceEvent(phase, category_group_enabled, name, flags, perfetto::Track(),
+ base::TimeTicks(), argument_func);
+}
+
+template <typename TrackType,
+ typename TrackTypeCheck = typename std::enable_if<
+ std::is_convertible<TrackType, perfetto::Track>::value>::type>
+inline void AddTraceEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ const TrackType& track) {
+ AddTraceEvent(phase, category_group_enabled, name, flags, track,
+ base::TimeTicks(), [](perfetto::EventContext ctx) {});
+}
+
+template <typename TrackType,
+ typename TrackTypeCheck = typename std::enable_if<
+ std::is_convertible<TrackType, perfetto::Track>::value>::type>
+inline void AddTraceEvent(char phase,
+ const unsigned char* category_group_enabled,
+ const char* name,
+ unsigned int flags,
+ const TrackType& track,
+ base::TimeTicks timestamp) {
+ AddTraceEvent(phase, category_group_enabled, name, flags, track, timestamp,
+ [](perfetto::EventContext ctx) {});
+}
+
+} // namespace trace_event_internal
+
+#endif // BASE_TRACE_EVENT_TYPED_MACROS_INTERNAL_H_
diff --git a/chromium/base/trace_event/typed_macros_unittest.cc b/chromium/base/trace_event/typed_macros_unittest.cc
new file mode 100644
index 00000000000..741b43fba3f
--- /dev/null
+++ b/chromium/base/trace_event/typed_macros_unittest.cc
@@ -0,0 +1,111 @@
+// Copyright (c) 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/trace_event/typed_macros.h"
+
+#include "base/synchronization/waitable_event.h"
+#include "base/trace_event/trace_log.h"
+#include "base/trace_event/typed_macros_embedder_support.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/perfetto/include/perfetto/protozero/scattered_heap_buffer.h"
+
+#include "third_party/perfetto/protos/perfetto/trace/track_event/log_message.pbzero.h"
+
+namespace base {
+namespace trace_event {
+
+namespace {
+
+constexpr const char kRecordAllCategoryFilter[] = "*";
+
+void CancelTraceAsync(WaitableEvent* flush_complete_event) {
+ TraceLog::GetInstance()->CancelTracing(base::BindRepeating(
+ [](WaitableEvent* complete_event,
+ const scoped_refptr<base::RefCountedString>&, bool has_more_events) {
+ if (!has_more_events)
+ complete_event->Signal();
+ },
+ base::Unretained(flush_complete_event)));
+}
+
+void CancelTrace() {
+ WaitableEvent flush_complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
+ WaitableEvent::InitialState::NOT_SIGNALED);
+ CancelTraceAsync(&flush_complete_event);
+ flush_complete_event.Wait();
+}
+
+struct TestTrackEvent;
+TestTrackEvent* g_test_track_event;
+
+struct TestTrackEvent : public TrackEventHandle::CompletionListener {
+ public:
+ TestTrackEvent() {
+ CHECK_EQ(g_test_track_event, nullptr)
+ << "Another instance of TestTrackEvent is already active";
+ g_test_track_event = this;
+ }
+
+ ~TestTrackEvent() override { g_test_track_event = nullptr; }
+
+ void OnTrackEventCompleted() override {
+ EXPECT_FALSE(event_completed);
+ event_completed = true;
+ }
+
+ protozero::HeapBuffered<perfetto::protos::pbzero::TrackEvent> event;
+ bool prepare_called = false;
+ bool event_completed = false;
+};
+
+TrackEventHandle PrepareTrackEvent(TraceEvent*) {
+ CHECK_NE(g_test_track_event, nullptr) << "TestTrackEvent not set yet";
+ g_test_track_event->prepare_called = true;
+ return TrackEventHandle(g_test_track_event->event.get(), g_test_track_event);
+}
+
+class TypedTraceEventTest : public testing::Test {
+ public:
+ TypedTraceEventTest() { EnableTypedTraceEvents(&PrepareTrackEvent); }
+
+ ~TypedTraceEventTest() override { ResetTypedTraceEventsForTesting(); }
+
+ protected:
+ TestTrackEvent event_;
+};
+
+} // namespace
+
+TEST_F(TypedTraceEventTest, CallbackExecutedWhenTracingEnabled) {
+ TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
+ TraceLog::RECORDING_MODE);
+
+ TRACE_EVENT("cat", "Name", [this](perfetto::EventContext ctx) {
+ EXPECT_EQ(ctx.event(), event_.event.get());
+ perfetto::protos::pbzero::LogMessage* log = ctx.event()->set_log_message();
+ log->set_body_iid(1);
+ });
+
+ EXPECT_TRUE(event_.prepare_called);
+ EXPECT_FALSE(event_.event.empty());
+ EXPECT_TRUE(event_.event_completed);
+
+ CancelTrace();
+}
+
+TEST_F(TypedTraceEventTest, CallbackNotExecutedWhenTracingDisabled) {
+ TRACE_EVENT("cat", "Name", [this](perfetto::EventContext ctx) {
+ EXPECT_EQ(ctx.event(), event_.event.get());
+ perfetto::protos::pbzero::LogMessage* log = ctx.event()->set_log_message();
+ log->set_body_iid(1);
+ });
+
+ EXPECT_FALSE(event_.prepare_called);
+ EXPECT_TRUE(event_.event.empty());
+ EXPECT_FALSE(event_.event_completed);
+}
+
+} // namespace trace_event
+} // namespace base
diff --git a/chromium/base/unguessable_token.h b/chromium/base/unguessable_token.h
index 895dbc46c4c..15577f8f32a 100644
--- a/chromium/base/unguessable_token.h
+++ b/chromium/base/unguessable_token.h
@@ -11,8 +11,8 @@
#include <tuple>
#include "base/base_export.h"
+#include "base/check.h"
#include "base/hash/hash.h"
-#include "base/logging.h"
#include "base/token.h"
namespace base {
diff --git a/chromium/base/unguessable_token_unittest.cc b/chromium/base/unguessable_token_unittest.cc
index 80f50fae91a..5d4ecc98634 100644
--- a/chromium/base/unguessable_token_unittest.cc
+++ b/chromium/base/unguessable_token_unittest.cc
@@ -8,7 +8,6 @@
#include <sstream>
#include <type_traits>
-#include "base/value_conversions.h"
#include "base/values.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -69,15 +68,6 @@ TEST(UnguessableTokenTest, VerifySerialization) {
EXPECT_EQ(token, Deserialized);
}
-TEST(UnguessableTokenTest, VerifyValueSerialization) {
- UnguessableToken token = UnguessableToken::Create();
- Value value = CreateUnguessableTokenValue(token);
-
- UnguessableToken deserialized;
- EXPECT_TRUE(GetValueAsUnguessableToken(value, &deserialized));
- EXPECT_EQ(token, deserialized);
-}
-
// Common case (~88% of the time) - no leading zeroes in high_ nor low_.
TEST(UnguessableTokenTest, VerifyToString1) {
UnguessableToken token =
diff --git a/chromium/base/util/BUILD.gn b/chromium/base/util/BUILD.gn
index d7a680b87fe..7905e238cd9 100644
--- a/chromium/base/util/BUILD.gn
+++ b/chromium/base/util/BUILD.gn
@@ -7,6 +7,7 @@ import("//testing/test.gni")
test("base_util_unittests") {
deps = [
"memory_pressure:unittests",
+ "ranges:unittests",
"timer:unittests",
"type_safety:tests",
"values:unittests",
diff --git a/chromium/base/util/memory_pressure/memory_pressure_voter.cc b/chromium/base/util/memory_pressure/memory_pressure_voter.cc
index 56c2d70b24d..2c7e29d9f82 100644
--- a/chromium/base/util/memory_pressure/memory_pressure_voter.cc
+++ b/chromium/base/util/memory_pressure/memory_pressure_voter.cc
@@ -7,7 +7,7 @@
#include <numeric>
#include "base/stl_util.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
namespace util {
diff --git a/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc b/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc
index 74f966381a4..576a83742b6 100644
--- a/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc
+++ b/chromium/base/util/memory_pressure/multi_source_memory_pressure_monitor.cc
@@ -9,7 +9,7 @@
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/time/time.h"
-#include "base/trace_event/trace_event.h"
+#include "base/trace_event/base_tracing.h"
#include "base/util/memory_pressure/system_memory_pressure_evaluator.h"
namespace util {
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator.cc
index a13c40dc7aa..c624b928cb0 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator.cc
@@ -7,6 +7,8 @@
#include "build/build_config.h"
#if defined(OS_CHROMEOS)
+#include "base/logging.h"
+#include "base/system/sys_info.h"
#include "base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.h"
#elif defined(OS_FUCHSIA)
#include "base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.h"
@@ -28,8 +30,9 @@ SystemMemoryPressureEvaluator::CreateDefaultSystemEvaluator(
return std::make_unique<util::chromeos::SystemMemoryPressureEvaluator>(
monitor->CreateVoter());
}
- LOG(ERROR) << "No MemoryPressureMonitor created because the kernel does "
- "not support notifications.";
+ LOG_IF(ERROR, base::SysInfo::IsRunningOnChromeOS())
+ << "No MemoryPressureMonitor created because the kernel does not have "
+ "support.";
#elif defined(OS_FUCHSIA)
return std::make_unique<util::SystemMemoryPressureEvaluatorFuchsia>(
monitor->CreateVoter());
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc
index f8b3791b2b0..4acac008e83 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos.cc
@@ -10,6 +10,7 @@
#include "base/bind.h"
#include "base/files/file_util.h"
+#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/no_destructor.h"
#include "base/posix/eintr_wrapper.h"
@@ -76,8 +77,11 @@ GetMemoryPressureLevelFromAvailable(int available_mb,
uint64_t ReadFileToUint64(const base::FilePath& file) {
std::string file_contents;
- if (!ReadFileToString(file, &file_contents))
+ if (!base::ReadFileToStringNonBlocking(file, &file_contents)) {
+ PLOG_IF(ERROR, base::SysInfo::IsRunningOnChromeOS())
+ << "Unable to read uint64 from: " << file;
return 0;
+ }
TrimWhitespaceASCII(file_contents, base::TRIM_ALL, &file_contents);
uint64_t file_contents_uint64 = 0;
if (!base::StringToUint64(file_contents, &file_contents_uint64))
@@ -211,7 +215,8 @@ std::vector<int> SystemMemoryPressureEvaluator::GetMarginFileParts(
const std::string& file) {
std::vector<int> margin_values;
std::string margin_contents;
- if (base::ReadFileToString(base::FilePath(file), &margin_contents)) {
+ if (base::ReadFileToStringNonBlocking(base::FilePath(file),
+ &margin_contents)) {
std::vector<std::string> margins =
base::SplitString(margin_contents, base::kWhitespaceASCII,
base::TRIM_WHITESPACE, base::SPLIT_WANT_NONEMPTY);
@@ -226,7 +231,8 @@ std::vector<int> SystemMemoryPressureEvaluator::GetMarginFileParts(
margin_values.push_back(value);
}
} else {
- LOG(ERROR) << "Unable to read margin file: " << kMarginMemFile;
+ PLOG_IF(ERROR, base::SysInfo::IsRunningOnChromeOS())
+ << "Unable to read margin file: " << kMarginMemFile;
}
return margin_values;
}
@@ -305,8 +311,9 @@ uint64_t SystemMemoryPressureEvaluator::CalculateReservedFreeKB(
uint64_t SystemMemoryPressureEvaluator::GetReservedMemoryKB() {
std::string file_contents;
- if (!ReadFileToString(base::FilePath("/proc/zoneinfo"), &file_contents)) {
- LOG(ERROR) << "Couldn't get /proc/zoneinfo";
+ if (!base::ReadFileToStringNonBlocking(base::FilePath("/proc/zoneinfo"),
+ &file_contents)) {
+ PLOG(ERROR) << "Couldn't get /proc/zoneinfo";
return 0;
}
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc
index 10dd507b5fc..617ec32499a 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_chromeos_unittest.cc
@@ -251,7 +251,7 @@ TEST(ChromeOSSystemMemoryPressureEvaluatorTest, CheckMemoryPressure) {
std::vector<base::MemoryPressureListener::MemoryPressureLevel>
pressure_events;
auto listener = std::make_unique<base::MemoryPressureListener>(
- base::BindRepeating(&OnMemoryPressure, &pressure_events));
+ FROM_HERE, base::BindRepeating(&OnMemoryPressure, &pressure_events));
MultiSourceMemoryPressureMonitor monitor;
monitor.ResetSystemEvaluatorForTesting();
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.cc
index 8e4a9a98197..4ab74247683 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia.cc
@@ -6,8 +6,8 @@
#include <lib/sys/cpp/component_context.h>
-#include "base/fuchsia/default_context.h"
#include "base/fuchsia/fuchsia_logging.h"
+#include "base/fuchsia/process_context.h"
#include "base/util/memory_pressure/memory_pressure_voter.h"
namespace util {
@@ -34,13 +34,11 @@ SystemMemoryPressureEvaluatorFuchsia::SystemMemoryPressureEvaluatorFuchsia(
std::unique_ptr<util::MemoryPressureVoter> voter)
: util::SystemMemoryPressureEvaluator(std::move(voter)), binding_(this) {
binding_.set_error_handler([](zx_status_t status) {
- // TODO(https://crbug.com/1020698): Update fuchsia.web docs to make this a
- // required service, and make this a FATAL log.
- ZX_LOG(WARNING, status) << "fuchsia.memorypressure.Provider disconnected.";
+ ZX_LOG(FATAL, status) << "fuchsia.memorypressure.Provider disconnected";
});
DVLOG(1) << "Registering for memory pressure updates.";
- auto provider = base::fuchsia::ComponentContextForCurrentProcess()
+ auto provider = base::ComponentContextForProcess()
->svc()
->Connect<fuchsia::memorypressure::Provider>();
provider->RegisterWatcher(binding_.NewBinding());
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia_unittest.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia_unittest.cc
index 545a404bd92..3cd3aae600f 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia_unittest.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_fuchsia_unittest.cc
@@ -60,13 +60,18 @@ class SystemMemoryPressureEvaluatorFuchsiaTest
fuchsia::memorypressure::WatcherPtr watcher_;
};
-TEST_F(SystemMemoryPressureEvaluatorFuchsiaTest, ProviderUnavailable) {
+using SystemMemoryPressureEvaluatorFuchsiaDeathTest =
+ SystemMemoryPressureEvaluatorFuchsiaTest;
+
+TEST_F(SystemMemoryPressureEvaluatorFuchsiaDeathTest, ProviderUnavailable) {
auto voter = std::make_unique<MockMemoryPressureVoter>();
SystemMemoryPressureEvaluatorFuchsia evaluator(std::move(voter));
// Spin the loop to allow the evaluator to notice that the Provider is not
- // available, to verify that that doesn't trigger a fatal failure.
- base::RunLoop().RunUntilIdle();
+ // available and verify that this causes a fatal failure.
+ ASSERT_DEATH(base::RunLoop().RunUntilIdle(),
+ "fuchsia\\.memorypressure\\.Provider disconnected: "
+ "ZX_ERR_PEER_CLOSED \\(-24\\)");
}
TEST_F(SystemMemoryPressureEvaluatorFuchsiaTest, Basic) {
diff --git a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_win_unittest.cc b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_win_unittest.cc
index 3e0297366b6..4fc5591373f 100644
--- a/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_win_unittest.cc
+++ b/chromium/base/util/memory_pressure/system_memory_pressure_evaluator_win_unittest.cc
@@ -214,6 +214,7 @@ TEST_F(WinSystemMemoryPressureEvaluatorTest, CheckMemoryPressure) {
true, monitor.CreateVoter());
base::MemoryPressureListener listener(
+ FROM_HERE,
base::BindRepeating(&TestSystemMemoryPressureEvaluator::OnMemoryPressure,
base::Unretained(&evaluator)));
diff --git a/chromium/base/util/ranges/BUILD.gn b/chromium/base/util/ranges/BUILD.gn
new file mode 100644
index 00000000000..455c47d6877
--- /dev/null
+++ b/chromium/base/util/ranges/BUILD.gn
@@ -0,0 +1,26 @@
+# Copyright 2020 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+source_set("ranges") {
+ sources = [
+ "algorithm.h",
+ "functional.h",
+ "iterator.h",
+ ]
+}
+
+source_set("unittests") {
+ testonly = true
+ sources = [
+ "algorithm_unittest.cc",
+ "functional_unittest.cc",
+ "iterator_unittest.cc",
+ ]
+
+ deps = [
+ ":ranges",
+ "//testing/gmock",
+ "//testing/gtest",
+ ]
+}
diff --git a/chromium/base/util/ranges/OWNERS b/chromium/base/util/ranges/OWNERS
new file mode 100644
index 00000000000..766efe6c4af
--- /dev/null
+++ b/chromium/base/util/ranges/OWNERS
@@ -0,0 +1,2 @@
+jdoerrie@chromium.org
+pkasting@chromium.org
diff --git a/chromium/base/util/ranges/README.md b/chromium/base/util/ranges/README.md
new file mode 100644
index 00000000000..141a734defe
--- /dev/null
+++ b/chromium/base/util/ranges/README.md
@@ -0,0 +1,146 @@
+# `util::ranges`
+
+This directory aims to implement a C++14 version of the new `std::ranges`
+algorithms that were introduced in C++20. These implementations are added to the
+`::util::ranges` namespace, and callers can access them by including
+[`base/util/ranges/algorithm.h`](https://source.chromium.org/chromium/chromium/src/+/master:base/util/ranges/algorithm.h).
+
+## Similarities with C++20:
+
+### Automatically deducing `begin()` and `end()`
+As probably one of the most important changes for readability and usability, all
+algorithms in `util::ranges` have overloads for ranges of elements, which allow
+callers to no longer specify `begin()` and `end()` iterators themselves.
+
+Before:
+```c++
+bool HasEvens(const std::vector<int>& vec) {
+ return std::any_of(vec.begin(), vec.end(), [](int i) { return i % 2 == 0; });
+}
+```
+
+After:
+```c++
+bool HasEvens(const std::vector<int>& vec) {
+ return util::ranges::any_of(vec, [](int i) { return i % 2 == 0; });
+}
+```
+
+Furthermore, these overloads also support binding to temporaries, so that
+applying algorithms to return values is easier:
+
+```c++
+std::vector<int> GetNums();
+```
+
+Before:
+
+```c++
+bool HasEvens() {
+ std::vector<int> nums = GetNums();
+ return std::any_of(nums.begin(), nums.end(),
+ [](int i) { return i % 2 == 0; });
+}
+```
+
+After:
+```c++
+bool HasEvens() {
+ return util::ranges::any_of(GetNums(), [](int i) { return i % 2 == 0; });
+}
+```
+
+### Support for Projections
+In addition to supporting automatically deducing the `begin()` and `end()`
+iterator for ranges, the `util::ranges::` algorithms also support projections,
+that can be applied to arguments prior to passing it to supplied transformations
+or predicates. This is especially useful when ordering a collection of classes
+by a specific data member of the class. Example:
+
+Before:
+```cpp
+std::sort(suggestions->begin(), suggestions->end(),
+ [](const autofill::Suggestion& a, const autofill::Suggestion& b) {
+ return a.match < b.match;
+ });
+```
+
+After:
+```cpp
+util::ranges::sort(*suggestions, /*comp=*/{}, &autofill::Suggestion::match);
+```
+
+Anything that is callable can be used as a projection. This includes
+`FunctionObjects` like function pointers or functors, but also pointers to
+member function and pointers to data members, as shown above. When not specified
+a projection defaults to `util::ranges::identity`, which simply perfectly
+forwards its argument.
+
+Projections are supported in both range and iterator-pair overloads of the
+`util::ranges::` algorithms, for example `util::ranges::all_of` has the
+following signatures:
+
+```cpp
+template <typename InputIterator, typename Pred, typename Proj = identity>
+bool all_of(InputIterator first, InputIterator last, Pred pred, Proj proj = {});
+
+template <typename Range, typename Pred, typename Proj = identity>
+bool all_of(Range&& range, Pred pred, Proj proj = {});
+```
+
+## Differences from C++20:
+To simplify the implementation of the `util::ranges::` algorithms, they dispatch
+to the `std::` algorithms found in C++14. This leads to the following list of
+differences from C++20. Since most of these differences are differences in the
+library and not in the language, they could be addressed in the future by adding
+corresponding implementations.
+
+### Lack of Constraints
+Due to the lack of support for concepts in the language, the algorithms in
+`util::ranges` do not have the constraints that are present on the algorithms in
+`std::ranges`. Instead, they support any type, much like C++14's `std::`
+algorithms. In the future this might be addressed by adding corresponding
+constraints via SFINAE, should the need arise.
+
+### Lack of Range Primitives
+Due to C++14's lack of `std::ranges` concepts like sentinels and other range
+primitives, algorithms taking a `[first, last)` pair rather than a complete
+range, do not support different types for `first` and `last`. Since they rely on
+C++14's implementation, the type must be the same. This could be addressed in
+the future by implementing support for sentinel types ourselves.
+
+### Lack of `constexpr`
+The `util::ranges` algorithms can only be used in a `constexpr` context when
+they call underlying `std::` algorithms that are themselves `constexpr`. Before
+C++20, only `std::min`, `std::max` and `std::minmax` are annotated
+appropriately, so code like `constexpr bool foo = util::ranges::any_of(...);`
+will fail because the compiler will not find a `constexpr std::any_of`. This
+could be addressed by either upgrading Chromium's STL to C++20, or implementing
+`constexpr` versions of some of these algorithms ourselves.
+
+### Lack of post C++14 algorithms
+Since all algorithms in `util::ranges` dispatch to their C++14 equivalent,
+`std::` algorithms that are not present in C++14 have no implementation in
+`util::ranges`. This list of algorithms includes the following:
+
+- [`std::for_each_n`](https://en.cppreference.com/w/cpp/algorithm/for_each_n) (added in C++17)
+- [`std::sample`](https://en.cppreference.com/w/cpp/algorithm/sample) (added in C++17)
+- [`std::clamp`](https://en.cppreference.com/w/cpp/algorithm/clamp) (added in C++17)
+
+### Return Types
+Some of the algorithms in `std::ranges::` have different return types than their
+equivalent in `std::`. For example, while `std::for_each` returns the passed-in
+`Function`, `std::ranges::for_each` returns a `std::ranges::for_each_result`,
+consisting of the `last` iterator and the function.
+
+In the cases where the return type differs, `util::ranges::` algorithms will
+continue to return the old return type.
+
+### No blocking of ADL
+The algorithms defined in `std::ranges` are not found by ADL, and inhibit ADL
+when found by [unqualified name lookup][1]. This is done to be able to enforce
+the constraints specified by those algorithms and commonly implemented by using
+function objects instead of regular functions. Since we don't support
+constrained algorithms yet, we don't implement the blocking of ADL either.
+
+[1]: https://wg21.link/algorithms.requirements#2
diff --git a/chromium/base/util/ranges/algorithm.h b/chromium/base/util/ranges/algorithm.h
new file mode 100644
index 00000000000..4f9c1a6e745
--- /dev/null
+++ b/chromium/base/util/ranges/algorithm.h
@@ -0,0 +1,1352 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UTIL_RANGES_ALGORITHM_H_
+#define BASE_UTIL_RANGES_ALGORITHM_H_
+
+#include <algorithm>
+#include <iterator>
+#include <utility>
+
+#include "base/util/ranges/functional.h"
+#include "base/util/ranges/iterator.h"
+
+namespace util {
+namespace ranges {
+
+namespace internal {
+
+// Returns a transformed version of the unary predicate `pred` applying `proj`
+// to its argument before invoking `pred` on it.
+// Ensures that the return type of `invoke(pred, ...)` is convertible to bool.
+template <typename Pred, typename Proj>
+constexpr auto ProjectedUnaryPredicate(Pred& pred, Proj& proj) noexcept {
+ return [&pred, &proj](auto&& arg) -> bool {
+ return invoke(pred, invoke(proj, std::forward<decltype(arg)>(arg)));
+ };
+}
+
+// Returns a transformed version of the binary predicate `pred` applying `proj1`
+// and `proj2` to its arguments before invoking `pred` on them.
+// Ensures that the return type of `invoke(pred, ...)` is convertible to bool.
+template <typename Pred, typename Proj1, typename Proj2>
+constexpr auto ProjectedBinaryPredicate(Pred& pred,
+ Proj1& proj1,
+ Proj2& proj2) noexcept {
+ return [&pred, &proj1, &proj2](auto&& lhs, auto&& rhs) -> bool {
+ return invoke(pred, invoke(proj1, std::forward<decltype(lhs)>(lhs)),
+ invoke(proj2, std::forward<decltype(rhs)>(rhs)));
+ };
+}
+
+// This alias is used below to restrict iterator based APIs to types for which
+// `iterator_category` is defined. This is required in situations where
+// otherwise an undesired overload would be chosen, e.g. copy_if. In spirit this
+// is similar to C++20's std::input_or_output_iterator, a concept that each
+// iterator should satisfy.
+template <typename Iter>
+using iterator_category_t =
+ typename std::iterator_traits<Iter>::iterator_category;
+
+} // namespace internal
+
+// [alg.nonmodifying] Non-modifying sequence operations
+// Reference: https://wg21.link/alg.nonmodifying
+
+// [alg.all.of] All of
+// Reference: https://wg21.link/alg.all.of
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `false` if `E(i)` is `false` for some iterator `i` in the range
+// `[first, last)`, and `true` otherwise.
+//
+// Complexity: At most `last - first` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.all.of#:~:text=ranges::all_of(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr bool all_of(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::all_of(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `false` if `E(i)` is `false` for some iterator `i` in `range`, and
+// `true` otherwise.
+//
+// Complexity: At most `size(range)` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.all.of#:~:text=ranges::all_of(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr bool all_of(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::all_of(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.any.of] Any of
+// Reference: https://wg21.link/alg.any.of
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `true` if `E(i)` is `true` for some iterator `i` in the range
+// `[first, last)`, and `false` otherwise.
+//
+// Complexity: At most `last - first` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.any.of#:~:text=ranges::any_of(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr bool any_of(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::any_of(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `true` if `E(i)` is `true` for some iterator `i` in `range`, and
+// `false` otherwise.
+//
+// Complexity: At most `size(range)` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.any.of#:~:text=ranges::any_of(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr bool any_of(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::any_of(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.none.of] None of
+// Reference: https://wg21.link/alg.none.of
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `false` if `E(i)` is `true` for some iterator `i` in the range
+// `[first, last)`, and `true` otherwise.
+//
+// Complexity: At most `last - first` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.none.of#:~:text=ranges::none_of(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr bool none_of(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::none_of(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `invoke(pred, invoke(proj, *i))`.
+//
+// Returns: `false` if `E(i)` is `true` for some iterator `i` in `range`, and
+// `true` otherwise.
+//
+// Complexity: At most `size(range)` applications of the predicate and any
+// projection.
+//
+// Reference: https://wg21.link/alg.none.of#:~:text=ranges::none_of(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr bool none_of(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::none_of(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.foreach] For each
+// Reference: https://wg21.link/alg.foreach
+
+// Effects: Calls `invoke(f, invoke(proj, *i))` for every iterator `i` in the
+// range `[first, last)`, starting from `first` and proceeding to `last - 1`.
+//
+// Returns: `f`
+// Note: std::ranges::for_each(I first,...) returns a for_each_result, rather
+// than an invocable. For simplicitly we match std::for_each's return type
+// instead.
+//
+// Complexity: Applies `f` and `proj` exactly `last - first` times.
+//
+// Remarks: If `f` returns a result, the result is ignored.
+//
+// Reference: https://wg21.link/alg.foreach#:~:text=ranges::for_each(I
+template <typename InputIterator,
+ typename Fun,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto for_each(InputIterator first,
+ InputIterator last,
+ Fun f,
+ Proj proj = {}) {
+ std::for_each(first, last, [&f, &proj](auto&& arg) {
+ return invoke(f, invoke(proj, std::forward<decltype(arg)>(arg)));
+ });
+
+ return f;
+}
+
+// Effects: Calls `invoke(f, invoke(proj, *i))` for every iterator `i` in the
+// range `range`, starting from `begin(range)` and proceeding to `end(range) -
+// 1`.
+//
+// Returns: `f`
+// Note: std::ranges::for_each(R&& r,...) returns a for_each_result, rather
+// than an invocable. For simplicitly we match std::for_each's return type
+// instead.
+//
+// Complexity: Applies `f` and `proj` exactly `size(range)` times.
+//
+// Remarks: If `f` returns a result, the result is ignored.
+//
+// Reference: https://wg21.link/alg.foreach#:~:text=ranges::for_each(R
+template <typename Range, typename Fun, typename Proj = identity>
+constexpr auto for_each(Range&& range, Fun f, Proj proj = {}) {
+ return ranges::for_each(ranges::begin(range), ranges::end(range),
+ std::move(f), std::move(proj));
+}
+
+// [alg.find] Find
+// Reference: https://wg21.link/alg.find
+
+// Let `E(i)` be `bool(invoke(proj, *i) == value)`.
+//
+// Returns: The first iterator `i` in the range `[first, last)` for which `E(i)`
+// is `true`. Returns `last` if no such iterator is found.
+//
+// Complexity: At most `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find(I
+template <typename InputIterator,
+ typename T,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto find(InputIterator first,
+ InputIterator last,
+ const T& value,
+ Proj proj = {}) {
+ // Note: In order to be able to apply `proj` to each element in [first, last)
+ // we are dispatching to std::find_if instead of std::find.
+ return std::find_if(first, last, [&proj, &value](auto&& lhs) {
+ return invoke(proj, std::forward<decltype(lhs)>(lhs)) == value;
+ });
+}
+
+// Let `E(i)` be `bool(invoke(proj, *i) == value)`.
+//
+// Returns: The first iterator `i` in `range` for which `E(i)` is `true`.
+// Returns `end(range)` if no such iterator is found.
+//
+// Complexity: At most `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find(R
+template <typename Range, typename T, typename Proj = identity>
+constexpr auto find(Range&& range, const T& value, Proj proj = {}) {
+ return ranges::find(ranges::begin(range), ranges::end(range), value,
+ std::move(proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`.
+//
+// Returns: The first iterator `i` in the range `[first, last)` for which `E(i)`
+// is `true`. Returns `last` if no such iterator is found.
+//
+// Complexity: At most `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find_if(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto find_if(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::find_if(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`.
+//
+// Returns: The first iterator `i` in `range` for which `E(i)` is `true`.
+// Returns `end(range)` if no such iterator is found.
+//
+// Complexity: At most `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find_if(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr auto find_if(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::find_if(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// Let `E(i)` be `bool(!invoke(pred, invoke(proj, *i)))`.
+//
+// Returns: The first iterator `i` in the range `[first, last)` for which `E(i)`
+// is `true`. Returns `last` if no such iterator is found.
+//
+// Complexity: At most `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find_if_not(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto find_if_not(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::find_if_not(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `bool(!invoke(pred, invoke(proj, *i)))`.
+//
+// Returns: The first iterator `i` in `range` for which `E(i)` is `true`.
+// Returns `end(range)` if no such iterator is found.
+//
+// Complexity: At most `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Reference: https://wg21.link/alg.find#:~:text=ranges::find_if_not(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr auto find_if_not(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::find_if_not(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.find.end] Find end
+// Reference: https://wg21.link/alg.find.end
+
+// Let:
+// - `E(i,n)` be `invoke(pred, invoke(proj1, *(i + n)),
+// invoke(proj2, *(first2 + n)))`
+//
+// - `i` be `last1` if `[first2, last2)` is empty, or if
+// `(last2 - first2) > (last1 - first1)` is `true`, or if there is no iterator
+// in the range `[first1, last1 - (last2 - first2))` such that for every
+// non-negative integer `n < (last2 - first2)`, `E(i,n)` is `true`. Otherwise
+// `i` is the last such iterator in `[first1, last1 - (last2 - first2))`.
+//
+// Returns: `i`
+// Note: std::ranges::find_end(I1 first1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::find_end's return type instead.
+//
+// Complexity:
+// At most `(last2 - first2) * (last1 - first1 - (last2 - first2) + 1)`
+// applications of the corresponding predicate and any projections.
+//
+// Reference: https://wg21.link/alg.find.end#:~:text=ranges::find_end(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr auto find_end(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return std::find_end(first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj1, proj2));
+}
+
+// Let:
+// - `E(i,n)` be `invoke(pred, invoke(proj1, *(i + n)),
+// invoke(proj2, *(first2 + n)))`
+//
+// - `i` be `end(range1)` if `range2` is empty, or if
+// `size(range2) > size(range1)` is `true`, or if there is no iterator in the
+// range `[begin(range1), end(range1) - size(range2))` such that for every
+// non-negative integer `n < size(range2)`, `E(i,n)` is `true`. Otherwise `i`
+// is the last such iterator in `[begin(range1), end(range1) - size(range2))`.
+//
+// Returns: `i`
+// Note: std::ranges::find_end(R1&& r1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::find_end's return type instead.
+//
+// Complexity: At most `size(range2) * (size(range1) - size(range2) + 1)`
+// applications of the corresponding predicate and any projections.
+//
+// Reference: https://wg21.link/alg.find.end#:~:text=ranges::find_end(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity>
+constexpr auto find_end(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return ranges::find_end(ranges::begin(range1), ranges::end(range1),
+ ranges::begin(range2), ranges::end(range2),
+ std::move(pred), std::move(proj1), std::move(proj2));
+}
+
+// [alg.find.first.of] Find first
+// Reference: https://wg21.link/alg.find.first.of
+
+// Let `E(i,j)` be `bool(invoke(pred, invoke(proj1, *i), invoke(proj2, *j)))`.
+//
+// Effects: Finds an element that matches one of a set of values.
+//
+// Returns: The first iterator `i` in the range `[first1, last1)` such that for
+// some iterator `j` in the range `[first2, last2)` `E(i,j)` holds. Returns
+// `last1` if `[first2, last2)` is empty or if no such iterator is found.
+//
+// Complexity: At most `(last1 - first1) * (last2 - first2)` applications of the
+// corresponding predicate and any projections.
+//
+// Reference:
+// https://wg21.link/alg.find.first.of#:~:text=ranges::find_first_of(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr auto find_first_of(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return std::find_first_of(
+ first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj1, proj2));
+}
+
+// Let `E(i,j)` be `bool(invoke(pred, invoke(proj1, *i), invoke(proj2, *j)))`.
+//
+// Effects: Finds an element that matches one of a set of values.
+//
+// Returns: The first iterator `i` in `range1` such that for some iterator `j`
+// in `range2` `E(i,j)` holds. Returns `end(range1)` if `range2` is empty or if
+// no such iterator is found.
+//
+// Complexity: At most `size(range1) * size(range2)` applications of the
+// corresponding predicate and any projections.
+//
+// Reference:
+// https://wg21.link/alg.find.first.of#:~:text=ranges::find_first_of(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity>
+constexpr auto find_first_of(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return ranges::find_first_of(
+ ranges::begin(range1), ranges::end(range1), ranges::begin(range2),
+ ranges::end(range2), std::move(pred), std::move(proj1), std::move(proj2));
+}
+
+// [alg.adjacent.find] Adjacent find
+// Reference: https://wg21.link/alg.adjacent.find
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i), invoke(proj, *(i + 1))))`.
+//
+// Returns: The first iterator `i` such that both `i` and `i + 1` are in the
+// range `[first, last)` for which `E(i)` holds. Returns `last` if no such
+// iterator is found.
+//
+// Complexity: Exactly `min((i - first) + 1, (last - first) - 1)` applications
+// of the corresponding predicate, where `i` is `adjacent_find`'s return value.
+//
+// Reference:
+// https://wg21.link/alg.adjacent.find#:~:text=ranges::adjacent_find(I
+template <typename ForwardIterator,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<ForwardIterator>>
+constexpr auto adjacent_find(ForwardIterator first,
+ ForwardIterator last,
+ Pred pred = {},
+ Proj proj = {}) {
+ return std::adjacent_find(
+ first, last, internal::ProjectedBinaryPredicate(pred, proj, proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i), invoke(proj, *(i + 1))))`.
+//
+// Returns: The first iterator `i` such that both `i` and `i + 1` are in the
+// range `range` for which `E(i)` holds. Returns `end(range)` if no such
+// iterator is found.
+//
+// Complexity: Exactly `min((i - begin(range)) + 1, size(range) - 1)`
+// applications of the corresponding predicate, where `i` is `adjacent_find`'s
+// return value.
+//
+// Reference:
+// https://wg21.link/alg.adjacent.find#:~:text=ranges::adjacent_find(R
+template <typename Range,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity>
+constexpr auto adjacent_find(Range&& range, Pred pred = {}, Proj proj = {}) {
+ return ranges::adjacent_find(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.count] Count
+// Reference: https://wg21.link/alg.count
+
+// Let `E(i)` be `invoke(proj, *i) == value`.
+//
+// Effects: Returns the number of iterators `i` in the range `[first, last)` for
+// which `E(i)` holds.
+//
+// Complexity: Exactly `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Reference: https://wg21.link/alg.count#:~:text=ranges::count(I
+template <typename InputIterator,
+ typename T,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto count(InputIterator first,
+ InputIterator last,
+ const T& value,
+ Proj proj = {}) {
+ // Note: In order to be able to apply `proj` to each element in [first, last)
+ // we are dispatching to std::count_if instead of std::count.
+ return std::count_if(first, last, [&proj, &value](auto&& lhs) {
+ return invoke(proj, std::forward<decltype(lhs)>(lhs)) == value;
+ });
+}
+
+// Let `E(i)` be `invoke(proj, *i) == value`.
+//
+// Effects: Returns the number of iterators `i` in `range` for which `E(i)`
+// holds.
+//
+// Complexity: Exactly `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Reference: https://wg21.link/alg.count#:~:text=ranges::count(R
+template <typename Range, typename T, typename Proj = identity>
+constexpr auto count(Range&& range, const T& value, Proj proj = {}) {
+ return ranges::count(ranges::begin(range), ranges::end(range), value,
+ std::move(proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`.
+//
+// Effects: Returns the number of iterators `i` in the range `[first, last)` for
+// which `E(i)` holds.
+//
+// Complexity: Exactly `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Reference: https://wg21.link/alg.count#:~:text=ranges::count_if(I
+template <typename InputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>>
+constexpr auto count_if(InputIterator first,
+ InputIterator last,
+ Pred pred,
+ Proj proj = {}) {
+ return std::count_if(first, last,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`.
+//
+// Effects: Returns the number of iterators `i` in `range` for which `E(i)`
+// holds.
+//
+// Complexity: Exactly `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Reference: https://wg21.link/alg.count#:~:text=ranges::count_if(R
+template <typename Range, typename Pred, typename Proj = identity>
+constexpr auto count_if(Range&& range, Pred pred, Proj proj = {}) {
+ return ranges::count_if(ranges::begin(range), ranges::end(range),
+ std::move(pred), std::move(proj));
+}
+
+// [mismatch] Mismatch
+// Reference: https://wg21.link/mismatch
+
+// Let `E(n)` be `!invoke(pred, invoke(proj1, *(first1 + n)),
+// invoke(proj2, *(first2 + n)))`.
+//
+// Let `N` be `min(last1 - first1, last2 - first2)`.
+//
+// Returns: `{ first1 + n, first2 + n }`, where `n` is the smallest integer in
+// `[0, N)` such that `E(n)` holds, or `N` if no such integer exists.
+//
+// Complexity: At most `N` applications of the corresponding predicate and any
+// projections.
+//
+// Reference: https://wg21.link/mismatch#:~:text=ranges::mismatch(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr auto mismatch(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return std::mismatch(first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj1, proj2));
+}
+
+// Let `E(n)` be `!invoke(pred, invoke(proj1, *(begin(range1) + n)),
+// invoke(proj2, *(begin(range2) + n)))`.
+//
+// Let `N` be `min(size(range1), size(range2))`.
+//
+// Returns: `{ begin(range1) + n, begin(range2) + n }`, where `n` is the
+// smallest integer in `[0, N)` such that `E(n)` holds, or `N` if no such
+// integer exists.
+//
+// Complexity: At most `N` applications of the corresponding predicate and any
+// projections.
+//
+// Reference: https://wg21.link/mismatch#:~:text=ranges::mismatch(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity>
+constexpr auto mismatch(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return ranges::mismatch(ranges::begin(range1), ranges::end(range1),
+ ranges::begin(range2), ranges::end(range2),
+ std::move(pred), std::move(proj1), std::move(proj2));
+}
+
+// [alg.equal] Equal
+// Reference: https://wg21.link/alg.equal
+
+// Let `E(i)` be
+// `invoke(pred, invoke(proj1, *i), invoke(proj2, *(first2 + (i - first1))))`.
+//
+// Returns: If `last1 - first1 != last2 - first2`, return `false.` Otherwise
+// return `true` if `E(i)` holds for every iterator `i` in the range `[first1,
+// last1)`. Otherwise, returns `false`.
+//
+// Complexity: If the types of `first1`, `last1`, `first2`, and `last2` meet the
+// `RandomAccessIterator` requirements and `last1 - first1 != last2 - first2`,
+// then no applications of the corresponding predicate and each projection;
+// otherwise, at most `min(last1 - first1, last2 - first2)` applications of the
+// corresponding predicate and any projections.
+//
+// Reference: https://wg21.link/alg.equal#:~:text=ranges::equal(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr bool equal(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return std::equal(first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj1, proj2));
+}
+
+// Let `E(i)` be
+// `invoke(pred, invoke(proj1, *i),
+// invoke(proj2, *(begin(range2) + (i - begin(range1)))))`.
+//
+// Returns: If `size(range1) != size(range2)`, return `false.` Otherwise return
+// `true` if `E(i)` holds for every iterator `i` in `range1`. Otherwise, returns
+// `false`.
+//
+// Complexity: If the types of `begin(range1)`, `end(range1)`, `begin(range2)`,
+// and `end(range2)` meet the `RandomAccessIterator` requirements and
+// `size(range1) != size(range2)`, then no applications of the corresponding
+// predicate and each projection;
+// otherwise, at most `min(size(range1), size(range2))` applications of the
+// corresponding predicate and any projections.
+//
+// Reference: https://wg21.link/alg.equal#:~:text=ranges::equal(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity>
+constexpr bool equal(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return ranges::equal(ranges::begin(range1), ranges::end(range1),
+ ranges::begin(range2), ranges::end(range2),
+ std::move(pred), std::move(proj1), std::move(proj2));
+}
+
+// [alg.is.permutation] Is permutation
+// Reference: https://wg21.link/alg.is.permutation
+
+// Returns: If `last1 - first1 != last2 - first2`, return `false`. Otherwise
+// return `true` if there exists a permutation of the elements in the range
+// `[first2, last2)`, bounded by `[pfirst, plast)`, such that
+// `ranges::equal(first1, last1, pfirst, plast, pred, proj, proj)` returns
+// `true`; otherwise, returns `false`.
+//
+// Complexity: No applications of the corresponding predicate if
+// ForwardIterator1 and ForwardIterator2 meet the requirements of random access
+// iterators and `last1 - first1 != last2 - first2`. Otherwise, exactly
+// `last1 - first1` applications of the corresponding predicate and projections
+// if `ranges::equal(first1, last1, first2, last2, pred, proj, proj)` would
+// return true;
+// otherwise, at worst `O(N^2)`, where `N` has the value `last1 - first1`.
+//
+// Note: While std::ranges::is_permutation supports different projections for
+// the first and second range, this is currently not supported due to
+// dispatching to std::is_permutation, which demands that `pred` is an
+// equivalence relation.
+// TODO(https://crbug.com/1071094): Consider supporing different projections in
+// the future.
+//
+// Reference:
+// https://wg21.link/alg.is.permutation#:~:text=ranges::is_permutation(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr bool is_permutation(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj proj = {}) {
+ return std::is_permutation(
+ first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj, proj));
+}
+
+// Returns: If `size(range1) != size(range2)`, return `false`. Otherwise return
+// `true` if there exists a permutation of the elements in `range2`, bounded by
+// `[pbegin, pend)`, such that
+// `ranges::equal(range1, [pbegin, pend), pred, proj, proj)` returns `true`;
+// otherwise, returns `false`.
+//
+// Complexity: No applications of the corresponding predicate if Range1 and
+// Range2 meet the requirements of random access ranges and
+// `size(range1) != size(range2)`. Otherwise, exactly `size(range1)`
+// applications of the corresponding predicate and projections if
+// `ranges::equal(range1, range2, pred, proj, proj)` would return true;
+// otherwise, at worst `O(N^2)`, where `N` has the value `size(range1)`.
+//
+// Note: While std::ranges::is_permutation supports different projections for
+// the first and second range, this is currently not supported due to
+// dispatching to std::is_permutation, which demands that `pred` is an
+// equivalence relation.
+// TODO(https://crbug.com/1071094): Consider supporing different projections in
+// the future.
+//
+// Reference:
+// https://wg21.link/alg.is.permutation#:~:text=ranges::is_permutation(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity>
+constexpr bool is_permutation(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj proj = {}) {
+ return ranges::is_permutation(ranges::begin(range1), ranges::end(range1),
+ ranges::begin(range2), ranges::end(range2),
+ std::move(pred), std::move(proj));
+}
+
+// [alg.search] Search
+// Reference: https://wg21.link/alg.search
+
+// Returns: `i`, where `i` is the first iterator in the range
+// `[first1, last1 - (last2 - first2))` such that for every non-negative integer
+// `n` less than `last2 - first2` the condition
+// `bool(invoke(pred, invoke(proj1, *(i + n)), invoke(proj2, *(first2 + n))))`
+// is `true`.
+// Returns `last1` if no such iterator exists.
+// Note: std::ranges::search(I1 first1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::search's return type instead.
+//
+// Complexity: At most `(last1 - first1) * (last2 - first2)` applications of the
+// corresponding predicate and projections.
+//
+// Reference: https://wg21.link/alg.search#:~:text=ranges::search(I1
+template <typename ForwardIterator1,
+ typename ForwardIterator2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity,
+ typename = internal::iterator_category_t<ForwardIterator1>,
+ typename = internal::iterator_category_t<ForwardIterator2>>
+constexpr auto search(ForwardIterator1 first1,
+ ForwardIterator1 last1,
+ ForwardIterator2 first2,
+ ForwardIterator2 last2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return std::search(first1, last1, first2, last2,
+ internal::ProjectedBinaryPredicate(pred, proj1, proj2));
+}
+
+// Returns: `i`, where `i` is the first iterator in the range
+// `[begin(range1), end(range1) - size(range2))` such that for every
+// non-negative integer `n` less than `size(range2)` the condition
+// `bool(invoke(pred, invoke(proj1, *(i + n)),
+// invoke(proj2, *(begin(range2) + n))))` is `true`.
+// Returns `end(range1)` if no such iterator exists.
+// Note: std::ranges::search(R1&& r1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::search's return type instead.
+//
+// Complexity: At most `size(range1) * size(range2)` applications of the
+// corresponding predicate and projections.
+//
+// Reference: https://wg21.link/alg.search#:~:text=ranges::search(R1
+template <typename Range1,
+ typename Range2,
+ typename Pred = ranges::equal_to,
+ typename Proj1 = identity,
+ typename Proj2 = identity>
+constexpr auto search(Range1&& range1,
+ Range2&& range2,
+ Pred pred = {},
+ Proj1 proj1 = {},
+ Proj2 proj2 = {}) {
+ return ranges::search(ranges::begin(range1), ranges::end(range1),
+ ranges::begin(range2), ranges::end(range2),
+ std::move(pred), std::move(proj1), std::move(proj2));
+}
+
+// Mandates: The type `Size` is convertible to an integral type.
+//
+// Returns: `i` where `i` is the first iterator in the range
+// `[first, last - count)` such that for every non-negative integer `n` less
+// than `count`, the following condition holds:
+// `invoke(pred, invoke(proj, *(i + n)), value)`.
+// Returns `last` if no such iterator is found.
+// Note: std::ranges::search_n(I1 first1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::search_n's return type instead.
+//
+// Complexity: At most `last - first` applications of the corresponding
+// predicate and projection.
+//
+// Reference: https://wg21.link/alg.search#:~:text=ranges::search_n(I
+template <typename ForwardIterator,
+ typename Size,
+ typename T,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<ForwardIterator>>
+constexpr auto search_n(ForwardIterator first,
+ ForwardIterator last,
+ Size count,
+ const T& value,
+ Pred pred = {},
+ Proj proj = {}) {
+ // The second arg is guaranteed to be `value`, so we'll simply apply the
+ // identity projection.
+ identity value_proj;
+ return std::search_n(
+ first, last, count, value,
+ internal::ProjectedBinaryPredicate(pred, proj, value_proj));
+}
+
+// Mandates: The type `Size` is convertible to an integral type.
+//
+// Returns: `i` where `i` is the first iterator in the range
+// `[begin(range), end(range) - count)` such that for every non-negative integer
+// `n` less than `count`, the following condition holds:
+// `invoke(pred, invoke(proj, *(i + n)), value)`.
+// Returns `end(arnge)` if no such iterator is found.
+// Note: std::ranges::search_n(R1&& r1,...) returns a range, rather than an
+// iterator. For simplicitly we match std::search_n's return type instead.
+//
+// Complexity: At most `size(range)` applications of the corresponding predicate
+// and projection.
+//
+// Reference: https://wg21.link/alg.search#:~:text=ranges::search_n(R
+template <typename Range,
+ typename Size,
+ typename T,
+ typename Pred = ranges::equal_to,
+ typename Proj = identity>
+constexpr auto search_n(Range&& range,
+ Size count,
+ const T& value,
+ Pred pred = {},
+ Proj proj = {}) {
+ return ranges::search_n(ranges::begin(range), ranges::end(range), count,
+ value, std::move(pred), std::move(proj));
+}
+
+// [alg.modifying.operations] Mutating sequence operations
+// Reference: https://wg21.link/alg.modifying.operations
+
+// [alg.copy] Copy
+// Reference: https://wg21.link/alg.copy
+
+// Let N be `last - first`.
+//
+// Preconditions: `result` is not in the range `[first, last)`.
+//
+// Effects: Copies elements in the range `[first, last)` into the range
+// `[result, result + N)` starting from `first` and proceeding to `last`. For
+// each non-negative integer `n < N` , performs `*(result + n) = *(first + n)`.
+//
+// Returns: `result + N`
+//
+// Complexity: Exactly `N` assignments.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy(I
+template <typename InputIterator,
+ typename OutputIterator,
+ typename = internal::iterator_category_t<InputIterator>,
+ typename = internal::iterator_category_t<OutputIterator>>
+constexpr auto copy(InputIterator first,
+ InputIterator last,
+ OutputIterator result) {
+ return std::copy(first, last, result);
+}
+
+// Let N be `size(range)`.
+//
+// Preconditions: `result` is not in `range`.
+//
+// Effects: Copies elements in `range` into the range `[result, result + N)`
+// starting from `begin(range)` and proceeding to `end(range)`. For each
+// non-negative integer `n < N` , performs
+// *(result + n) = *(begin(range) + n)`.
+//
+// Returns: `result + N`
+//
+// Complexity: Exactly `N` assignments.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy(R
+template <typename Range,
+ typename OutputIterator,
+ typename = internal::iterator_category_t<OutputIterator>>
+constexpr auto copy(Range&& range, OutputIterator result) {
+ return ranges::copy(ranges::begin(range), ranges::end(range), result);
+}
+
+// Let `N` be `max(0, n)`.
+//
+// Mandates: The type `Size` is convertible to an integral type.
+//
+// Effects: For each non-negative integer `i < N`, performs
+// `*(result + i) = *(first + i)`.
+//
+// Returns: `result + N`
+//
+// Complexity: Exactly `N` assignments.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy_n
+template <typename InputIterator,
+ typename Size,
+ typename OutputIterator,
+ typename = internal::iterator_category_t<InputIterator>,
+ typename = internal::iterator_category_t<OutputIterator>>
+constexpr auto copy_n(InputIterator first, Size n, OutputIterator result) {
+ return std::copy_n(first, n, result);
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`, and `N` be the number
+// of iterators `i` in the range `[first, last)` for which the condition `E(i)`
+// holds.
+//
+// Preconditions: The ranges `[first, last)` and
+// `[result, result + (last - first))` do not overlap.
+//
+// Effects: Copies all of the elements referred to by the iterator `i` in the
+// range `[first, last)` for which `E(i)` is true.
+//
+// Returns: `result + N`
+//
+// Complexity: Exactly `last - first` applications of the corresponding
+// predicate and any projection.
+//
+// Remarks: Stable.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy_if(I
+template <typename InputIterator,
+ typename OutputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<InputIterator>,
+ typename = internal::iterator_category_t<OutputIterator>>
+constexpr auto copy_if(InputIterator first,
+ InputIterator last,
+ OutputIterator result,
+ Pred pred,
+ Proj proj = {}) {
+ return std::copy_if(first, last, result,
+ internal::ProjectedUnaryPredicate(pred, proj));
+}
+
+// Let `E(i)` be `bool(invoke(pred, invoke(proj, *i)))`, and `N` be the number
+// of iterators `i` in `range` for which the condition `E(i)` holds.
+//
+// Preconditions: `range` and `[result, result + size(range))` do not overlap.
+//
+// Effects: Copies all of the elements referred to by the iterator `i` in
+// `range` for which `E(i)` is true.
+//
+// Returns: `result + N`
+//
+// Complexity: Exactly `size(range)` applications of the corresponding predicate
+// and any projection.
+//
+// Remarks: Stable.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy_if(R
+template <typename Range,
+ typename OutputIterator,
+ typename Pred,
+ typename Proj = identity,
+ typename = internal::iterator_category_t<OutputIterator>>
+constexpr auto copy_if(Range&& range,
+ OutputIterator result,
+ Pred pred,
+ Proj proj = {}) {
+ return ranges::copy_if(ranges::begin(range), ranges::end(range), result,
+ std::move(pred), std::move(proj));
+}
+
+// Let `N` be `last - first`.
+//
+// Preconditions: `result` is not in the range `(first, last]`.
+//
+// Effects: Copies elements in the range `[first, last)` into the range
+// `[result - N, result)` starting from `last - 1` and proceeding to `first`.
+// For each positive integer `n ≤ N`, performs `*(result - n) = *(last - n)`.
+//
+// Returns: `result - N`
+//
+// Complexity: Exactly `N` assignments.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy_backward(I
+template <typename BidirectionalIterator1,
+ typename BidirectionalIterator2,
+ typename = internal::iterator_category_t<BidirectionalIterator1>,
+ typename = internal::iterator_category_t<BidirectionalIterator2>>
+constexpr auto copy_backward(BidirectionalIterator1 first,
+ BidirectionalIterator1 last,
+ BidirectionalIterator2 result) {
+ return std::copy_backward(first, last, result);
+}
+
+// Let `N` be `size(range)`.
+//
+// Preconditions: `result` is not in the range `(begin(range), end(range)]`.
+//
+// Effects: Copies elements in `range` into the range `[result - N, result)`
+// starting from `end(range) - 1` and proceeding to `begin(range)`. For each
+// positive integer `n ≤ N`, performs `*(result - n) = *(end(range) - n)`.
+//
+// Returns: `result - N`
+//
+// Complexity: Exactly `N` assignments.
+//
+// Reference: https://wg21.link/alg.copy#:~:text=ranges::copy_backward(I
+template <typename Range,
+ typename BidirectionalIterator,
+ typename = internal::iterator_category_t<BidirectionalIterator>>
+constexpr auto copy_backward(Range&& range, BidirectionalIterator result) {
+ return ranges::copy_backward(ranges::begin(range), ranges::end(range),
+ result);
+}
+
+// [alg.move] Move
+// Reference: https://wg21.link/alg.move
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.swap] Swap
+// Reference: https://wg21.link/alg.swap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.transform] Transform
+// Reference: https://wg21.link/alg.transform
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.replace] Replace
+// Reference: https://wg21.link/alg.replace
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.fill] Fill
+// Reference: https://wg21.link/alg.fill
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.generate] Generate
+// Reference: https://wg21.link/alg.generate
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.remove] Remove
+// Reference: https://wg21.link/alg.remove
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.unique] Unique
+// Reference: https://wg21.link/alg.unique
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.reverse] Reverse
+// Reference: https://wg21.link/alg.reverse
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.rotate] Rotate
+// Reference: https://wg21.link/alg.rotate
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.random.shuffle] Shuffle
+// Reference: https://wg21.link/alg.random.shuffle
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.nonmodifying] Sorting and related operations
+// Reference: https://wg21.link/alg.sorting
+
+// [alg.sort] Sorting
+// Reference: https://wg21.link/alg.sort
+
+// [sort] sort
+// Reference: https://wg21.link/sort
+
+// TODO(crbug.com/1071094): Implement.
+
+// [stable.sort] stable_sort
+// Reference: https://wg21.link/stable.sort
+
+// TODO(crbug.com/1071094): Implement.
+
+// [partial.sort] partial_sort
+// Reference: https://wg21.link/partial.sort
+
+// TODO(crbug.com/1071094): Implement.
+
+// [partial.sort.copy] partial_sort_copy
+// Reference: https://wg21.link/partial.sort.copy
+
+// TODO(crbug.com/1071094): Implement.
+
+// [is.sorted] is_sorted
+// Reference: https://wg21.link/is.sorted
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.nth.element] Nth element
+// Reference: https://wg21.link/alg.nth.element
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.binary.search] Binary search
+// Reference: https://wg21.link/alg.binary.search
+
+// [lower.bound] lower_bound
+// Reference: https://wg21.link/lower.bound
+
+// Preconditions: The elements `e` of `[first, last)` are partitioned with
+// respect to the expression `bool(invoke(comp, invoke(proj, e), value))`.
+//
+// Returns: The furthermost iterator `i` in the range `[first, last]` such that
+// for every iterator `j` in the range `[first, i)`,
+// `bool(invoke(comp, invoke(proj, *j), value))` is true.
+//
+// Complexity: At most `log(last - first) + O(1)` comparisons and projections.
+//
+// Reference: https://wg21.link/lower.bound#:~:text=ranges::lower_bound(I
+template <typename ForwardIterator,
+ typename T,
+ typename Proj = identity,
+ typename Comp = ranges::less,
+ typename = internal::iterator_category_t<ForwardIterator>>
+constexpr auto lower_bound(ForwardIterator first,
+ ForwardIterator last,
+ const T& value,
+ Comp comp = {},
+ Proj proj = {}) {
+ // The second arg is guaranteed to be `value`, so we'll simply apply the
+ // identity projection.
+ identity value_proj;
+ return std::lower_bound(
+ first, last, value,
+ internal::ProjectedBinaryPredicate(comp, proj, value_proj));
+}
+
+// Preconditions: The elements `e` of `[first, last)` are partitioned with
+// respect to the expression `bool(invoke(comp, invoke(proj, e), value))`.
+//
+// Returns: The furthermost iterator `i` in the range
+// `[begin(range), end(range)]` such that for every iterator `j` in the range
+// `[begin(range), i)`, `bool(invoke(comp, invoke(proj, *j), value))` is true.
+//
+// Complexity: At most `log(size(range)) + O(1)` comparisons and projections.
+//
+// Reference: https://wg21.link/lower.bound#:~:text=ranges::lower_bound(R
+template <typename Range,
+ typename T,
+ typename Proj = identity,
+ typename Comp = ranges::less>
+constexpr auto lower_bound(Range&& range,
+ const T& value,
+ Comp comp = {},
+ Proj proj = {}) {
+ return ranges::lower_bound(ranges::begin(range), ranges::end(range), value,
+ std::move(comp), std::move(proj));
+}
+
+// [upper.bound] upper_bound
+// Reference: https://wg21.link/upper.bound
+
+// TODO(crbug.com/1071094): Implement.
+
+// [equal.range] equal_range
+// Reference: https://wg21.link/equal.range
+
+// TODO(crbug.com/1071094): Implement.
+
+// [binary.search] binary_search
+// Reference: https://wg21.link/binary.search
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.partitions] Partitions
+// Reference: https://wg21.link/alg.partitions
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.merge] Merge
+// Reference: https://wg21.link/alg.merge
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.set.operations] Set operations on sorted structures
+// Reference: https://wg21.link/alg.set.operations
+
+// [includes] includes
+// Reference: https://wg21.link/includes
+
+// TODO(crbug.com/1071094): Implement.
+
+// [set.union] set_union
+// Reference: https://wg21.link/set.union
+
+// TODO(crbug.com/1071094): Implement.
+
+// [set.intersection] set_intersection
+// Reference: https://wg21.link/set.intersection
+
+// TODO(crbug.com/1071094): Implement.
+
+// [set.difference] set_difference
+// Reference: https://wg21.link/set.difference
+
+// TODO(crbug.com/1071094): Implement.
+
+// [set.symmetric.difference] set_symmetric_difference
+// Reference: https://wg21.link/set.symmetric.difference
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.heap.operations] Heap operations
+// Reference: https://wg21.link/alg.heap.operations
+
+// [push.heap] push_heap
+// Reference: https://wg21.link/push.heap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [pop.heap] pop_heap
+// Reference: https://wg21.link/pop.heap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [make.heap] make_heap
+// Reference: https://wg21.link/make.heap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [sort.heap] sort_heap
+// Reference: https://wg21.link/sort.heap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [is.heap] is_heap
+// Reference: https://wg21.link/is.heap
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.min.max] Minimum and maximum
+// Reference: https://wg21.link/alg.min.max
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.lex.comparison] Lexicographical comparison
+// Reference: https://wg21.link/alg.lex.comparison
+
+// TODO(crbug.com/1071094): Implement.
+
+// [alg.permutation.generators] Permutation generators
+// Reference: https://wg21.link/alg.permutation.generators
+
+// TODO(crbug.com/1071094): Implement.
+
+} // namespace ranges
+
+} // namespace util
+
+#endif // BASE_UTIL_RANGES_ALGORITHM_H_
diff --git a/chromium/base/util/ranges/algorithm_unittest.cc b/chromium/base/util/ranges/algorithm_unittest.cc
new file mode 100644
index 00000000000..8b424a506b9
--- /dev/null
+++ b/chromium/base/util/ranges/algorithm_unittest.cc
@@ -0,0 +1,398 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/util/ranges/algorithm.h"
+
+#include <algorithm>
+#include <utility>
+
+#include "base/util/ranges/functional.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using ::testing::ElementsAre;
+using ::testing::Field;
+
+namespace util {
+
+namespace {
+
+struct Int {
+ int value = 0;
+};
+
+} // namespace
+
+TEST(RangesTest, AllOf) {
+ auto is_non_zero = [](int i) { return i != 0; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_TRUE(ranges::all_of(array + 1, array + 6, is_non_zero));
+ EXPECT_FALSE(ranges::all_of(array, is_non_zero));
+
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_TRUE(ranges::all_of(values + 1, ranges::end(values), is_non_zero,
+ &Int::value));
+ EXPECT_FALSE(ranges::all_of(values, is_non_zero, &Int::value));
+}
+
+TEST(RangesTest, AnyOf) {
+ auto is_even = [](int i) { return i % 2 == 0; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_FALSE(ranges::any_of(array + 5, array + 6, is_even));
+ EXPECT_TRUE(ranges::any_of(array, is_even));
+
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_FALSE(
+ ranges::any_of(values + 3, ranges::end(values), is_even, &Int::value));
+ EXPECT_TRUE(ranges::any_of(values, is_even, &Int::value));
+}
+
+TEST(RangesTest, NoneOf) {
+ auto is_zero = [](int i) { return i == 0; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_TRUE(ranges::none_of(array + 1, array + 6, is_zero));
+ EXPECT_FALSE(ranges::none_of(array, is_zero));
+
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_TRUE(
+ ranges::none_of(values + 1, ranges::end(values), is_zero, &Int::value));
+ EXPECT_FALSE(ranges::none_of(values, is_zero, &Int::value));
+}
+
+TEST(RangesTest, ForEach) {
+ auto times_two = [](int& i) { i *= 2; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ ranges::for_each(array, array + 3, times_two);
+ EXPECT_THAT(array, ElementsAre(0, 2, 4, 3, 4, 5));
+
+ ranges::for_each(array + 3, array + 6, times_two);
+ EXPECT_THAT(array, ElementsAre(0, 2, 4, 6, 8, 10));
+
+ EXPECT_EQ(times_two, ranges::for_each(array, times_two));
+ EXPECT_THAT(array, ElementsAre(0, 4, 8, 12, 16, 20));
+
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_EQ(times_two, ranges::for_each(values, times_two, &Int::value));
+ EXPECT_THAT(values,
+ ElementsAre(Field(&Int::value, 0), Field(&Int::value, 4),
+ Field(&Int::value, 8), Field(&Int::value, 10)));
+}
+
+TEST(RangesTest, Find) {
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_EQ(array + 6, ranges::find(array + 1, array + 6, 0));
+ EXPECT_EQ(array, ranges::find(array, 0));
+
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_EQ(values, ranges::find(values, values, 0, &Int::value));
+ EXPECT_EQ(ranges::end(values), ranges::find(values, 3, &Int::value));
+}
+
+TEST(RangesTest, FindIf) {
+ auto is_at_least_5 = [](int i) { return i >= 5; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_EQ(array + 5, ranges::find_if(array, array + 5, is_at_least_5));
+ EXPECT_EQ(array + 5, ranges::find_if(array, is_at_least_5));
+
+ auto is_odd = [](int i) { return i % 2 == 1; };
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_EQ(values + 3,
+ ranges::find_if(values, values + 3, is_odd, &Int::value));
+ EXPECT_EQ(values + 3, ranges::find_if(values, is_odd, &Int::value));
+}
+
+TEST(RangesTest, FindIfNot) {
+ auto is_less_than_5 = [](int i) { return i < 5; };
+ int array[] = {0, 1, 2, 3, 4, 5};
+
+ EXPECT_EQ(array + 5, ranges::find_if_not(array, array + 5, is_less_than_5));
+ EXPECT_EQ(array + 5, ranges::find_if_not(array, is_less_than_5));
+
+ auto is_even = [](int i) { return i % 2 == 0; };
+ Int values[] = {{0}, {2}, {4}, {5}};
+ EXPECT_EQ(values + 3,
+ ranges::find_if_not(values, values + 3, is_even, &Int::value));
+ EXPECT_EQ(values + 3, ranges::find_if_not(values, is_even, &Int::value));
+}
+
+TEST(RangesTest, FindEnd) {
+ int array1[] = {0, 1, 2};
+ int array2[] = {4, 5, 6};
+ int array3[] = {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4,
+ 0, 1, 2, 3, 0, 1, 2, 0, 1, 0};
+
+ EXPECT_EQ(array3 + 15, ranges::find_end(array3, ranges::end(array3), array1,
+ ranges::end(array1)));
+ EXPECT_EQ(ranges::end(array3), ranges::find_end(array3, ranges::end(array3),
+ array2, ranges::end(array2)));
+ EXPECT_EQ(array3 + 4,
+ ranges::find_end(array3, ranges::end(array3), array2, array2 + 2));
+
+ Int ints1[] = {{0}, {1}, {2}};
+ Int ints2[] = {{4}, {5}, {6}};
+
+ EXPECT_EQ(array3 + 15, ranges::find_end(array3, ints1, ranges::equal_to{},
+ identity{}, &Int::value));
+ EXPECT_EQ(ranges::end(array3),
+ ranges::find_end(array3, ints2, ranges::equal_to{}, identity{},
+ &Int::value));
+}
+
+TEST(RangesTest, FindFirstOf) {
+ int array1[] = {1, 2, 3};
+ int array2[] = {7, 8, 9};
+ int array3[] = {0, 1, 2, 3, 4, 5, 0, 1, 2, 3};
+
+ EXPECT_EQ(array3 + 1, ranges::find_first_of(array3, ranges::end(array3),
+ array1, ranges::end(array1)));
+ EXPECT_EQ(ranges::end(array3),
+ ranges::find_first_of(array3, ranges::end(array3), array2,
+ ranges::end(array2)));
+ Int ints1[] = {{1}, {2}, {3}};
+ Int ints2[] = {{7}, {8}, {9}};
+
+ EXPECT_EQ(array3 + 1, ranges::find_first_of(array3, ints1, ranges::equal_to{},
+ identity{}, &Int::value));
+ EXPECT_EQ(ranges::end(array3),
+ ranges::find_first_of(array3, ints2, ranges::equal_to{}, identity{},
+ &Int::value));
+}
+
+TEST(RangesTest, AdjacentFind) {
+ int array[] = {1, 2, 3, 3};
+ EXPECT_EQ(array + 2, ranges::adjacent_find(array, ranges::end(array)));
+ EXPECT_EQ(array,
+ ranges::adjacent_find(array, ranges::end(array), ranges::less{}));
+
+ Int ints[] = {{6}, {6}, {5}, {4}};
+ EXPECT_EQ(ints, ranges::adjacent_find(ints, ranges::equal_to{}, &Int::value));
+ EXPECT_EQ(ranges::end(ints),
+ ranges::adjacent_find(ints, ranges::less{}, &Int::value));
+}
+
+TEST(RangesTest, Count) {
+ int array[] = {1, 2, 3, 3};
+ EXPECT_EQ(1, ranges::count(array, array + 4, 1));
+ EXPECT_EQ(1, ranges::count(array, array + 4, 2));
+ EXPECT_EQ(1, ranges::count(array, array + 3, 3));
+ EXPECT_EQ(2, ranges::count(array, array + 4, 3));
+
+ Int ints[] = {{1}, {2}, {3}, {3}};
+ EXPECT_EQ(1, ranges::count(ints, 1, &Int::value));
+ EXPECT_EQ(1, ranges::count(ints, 2, &Int::value));
+ EXPECT_EQ(2, ranges::count(ints, 3, &Int::value));
+}
+
+TEST(RangesTest, CountIf) {
+ auto is_even = [](int i) { return i % 2 == 0; };
+ int array[] = {1, 2, 3, 3};
+ EXPECT_EQ(0, ranges::count_if(array, array + 1, is_even));
+ EXPECT_EQ(1, ranges::count_if(array, array + 2, is_even));
+ EXPECT_EQ(1, ranges::count_if(array, array + 3, is_even));
+ EXPECT_EQ(1, ranges::count_if(array, array + 4, is_even));
+
+ auto is_odd = [](int i) { return i % 2 == 1; };
+ Int ints[] = {{1}, {2}, {3}, {3}};
+ EXPECT_EQ(1, ranges::count_if(ints, is_even, &Int::value));
+ EXPECT_EQ(3, ranges::count_if(ints, is_odd, &Int::value));
+}
+
+TEST(RangesTest, Mismatch) {
+ int array1[] = {1, 3, 6, 7};
+ int array2[] = {1, 3};
+ int array3[] = {1, 3, 5, 7};
+ EXPECT_EQ(std::make_pair(array1 + 2, array2 + 2),
+ ranges::mismatch(array1, array1 + 4, array2, array2 + 2));
+ EXPECT_EQ(std::make_pair(array1 + 2, array3 + 2),
+ ranges::mismatch(array1, array1 + 4, array3, array3 + 4));
+
+ EXPECT_EQ(std::make_pair(array1 + 2, array2 + 2),
+ ranges::mismatch(array1, array2));
+ EXPECT_EQ(std::make_pair(array1 + 2, array3 + 2),
+ ranges::mismatch(array1, array3));
+}
+
+TEST(RangesTest, Equal) {
+ int array1[] = {1, 3, 6, 7};
+ int array2[] = {1, 3, 5, 7};
+ EXPECT_TRUE(ranges::equal(array1, array1 + 2, array2, array2 + 2));
+ EXPECT_FALSE(ranges::equal(array1, array1 + 4, array2, array2 + 4));
+ EXPECT_FALSE(ranges::equal(array1, array1 + 2, array2, array2 + 3));
+
+ Int ints[] = {{1}, {3}, {5}, {7}};
+ EXPECT_TRUE(ranges::equal(ints, array2,
+ [](Int lhs, int rhs) { return lhs.value == rhs; }));
+ EXPECT_TRUE(
+ ranges::equal(array2, ints, ranges::equal_to{}, identity{}, &Int::value));
+}
+
+TEST(RangesTest, IsPermutation) {
+ int array1[] = {1, 3, 6, 7};
+ int array2[] = {7, 3, 1, 6};
+ int array3[] = {1, 3, 5, 7};
+
+ EXPECT_TRUE(ranges::is_permutation(array1, array1 + 4, array2, array2 + 4));
+ EXPECT_FALSE(ranges::is_permutation(array1, array1 + 4, array3, array3 + 4));
+
+ EXPECT_TRUE(ranges::is_permutation(array1, array2));
+ EXPECT_FALSE(ranges::is_permutation(array1, array3));
+
+ Int ints1[] = {{1}, {3}, {5}, {7}};
+ Int ints2[] = {{1}, {5}, {3}, {7}};
+ EXPECT_TRUE(ranges::is_permutation(
+ ints1, ints2, [](Int lhs, Int rhs) { return lhs.value == rhs.value; }));
+
+ EXPECT_TRUE(
+ ranges::is_permutation(ints1, ints2, ranges::equal_to{}, &Int::value));
+}
+
+TEST(RangesTest, Search) {
+ int array1[] = {0, 1, 2, 3};
+ int array2[] = {0, 1, 5, 3};
+ int array3[] = {0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 4};
+
+ EXPECT_EQ(array3 + 3,
+ ranges::search(array3, array3 + 12, array1, array1 + 4));
+ EXPECT_EQ(array3 + 12,
+ ranges::search(array3, array3 + 12, array2, array2 + 4));
+
+ EXPECT_EQ(array3 + 3, ranges::search(array3, array1));
+ EXPECT_EQ(array3 + 12, ranges::search(array3, array2));
+
+ Int ints1[] = {{0}, {1}, {2}, {3}};
+ Int ints2[] = {{0}, {1}, {5}, {3}};
+
+ EXPECT_EQ(ints1 + 4, ranges::search(ints1, ints2, ranges::equal_to{},
+ &Int::value, &Int::value));
+
+ EXPECT_EQ(array3 + 3, ranges::search(array3, ints1, {}, {}, &Int::value));
+ EXPECT_EQ(array3 + 12, ranges::search(array3, ints2, {}, {}, &Int::value));
+}
+
+TEST(RangesTest, SearchN) {
+ int array[] = {0, 0, 1, 1, 2, 2};
+
+ EXPECT_EQ(array, ranges::search_n(array, array + 6, 1, 0));
+ EXPECT_EQ(array + 2, ranges::search_n(array, array + 6, 1, 1));
+ EXPECT_EQ(array + 4, ranges::search_n(array, array + 6, 1, 2));
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 1, 3));
+
+ EXPECT_EQ(array, ranges::search_n(array, array + 6, 2, 0));
+ EXPECT_EQ(array + 2, ranges::search_n(array, array + 6, 2, 1));
+ EXPECT_EQ(array + 4, ranges::search_n(array, array + 6, 2, 2));
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 2, 3));
+
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 3, 0));
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 3, 1));
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 3, 2));
+ EXPECT_EQ(array + 6, ranges::search_n(array, array + 6, 3, 3));
+
+ Int ints[] = {{0}, {0}, {1}, {1}, {2}, {2}};
+ EXPECT_EQ(ints, ranges::search_n(ints, 1, 0, {}, &Int::value));
+ EXPECT_EQ(ints + 2, ranges::search_n(ints, 1, 1, {}, &Int::value));
+ EXPECT_EQ(ints + 4, ranges::search_n(ints, 1, 2, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 1, 3, {}, &Int::value));
+
+ EXPECT_EQ(ints, ranges::search_n(ints, 2, 0, {}, &Int::value));
+ EXPECT_EQ(ints + 2, ranges::search_n(ints, 2, 1, {}, &Int::value));
+ EXPECT_EQ(ints + 4, ranges::search_n(ints, 2, 2, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 2, 3, {}, &Int::value));
+
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 3, 0, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 3, 1, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 3, 2, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::search_n(ints, 3, 3, {}, &Int::value));
+}
+
+TEST(RangesTest, Copy) {
+ int input[] = {1, 2, 3, 4, 5};
+ int output[] = {6, 6, 6, 6, 6, 6, 6};
+ auto equals_six = [](int i) { return i == 6; };
+
+ EXPECT_EQ(output + 3, ranges::copy(input, input + 3, output));
+ EXPECT_TRUE(std::equal(input, input + 3, output, output + 3));
+ EXPECT_TRUE(std::all_of(output + 3, output + 7, equals_six));
+
+ EXPECT_EQ(output + 5, ranges::copy(input, output));
+ EXPECT_TRUE(std::equal(input, input + 5, output, output + 5));
+ EXPECT_TRUE(std::all_of(output + 5, output + 7, equals_six));
+}
+
+TEST(RangesTest, CopyN) {
+ int input[] = {1, 2, 3, 4, 5};
+ int output[] = {6, 6, 6, 6, 6, 6, 6};
+ auto equals_six = [](int i) { return i == 6; };
+
+ EXPECT_EQ(output + 4, ranges::copy_n(input, 4, output));
+ EXPECT_TRUE(std::equal(input, input + 4, output, output + 4));
+ EXPECT_TRUE(std::all_of(output + 4, output + 7, equals_six));
+}
+
+TEST(RangesTest, CopyIf) {
+ int input[] = {2, 4, 6, 8, 6};
+ int output[] = {0, 0, 0, 0, 0, 0};
+ auto equals_six = [](int i) { return i == 6; };
+ auto equals_zero = [](int i) { return i == 0; };
+
+ EXPECT_EQ(output + 1, ranges::copy_if(input, input + 4, output, equals_six));
+ EXPECT_TRUE(std::all_of(output, output + 1, equals_six));
+ EXPECT_TRUE(std::all_of(output + 1, output + 6, equals_zero));
+
+ Int ints_in[] = {{2}, {4}, {6}, {8}, {6}};
+ Int ints_out[] = {{0}, {0}, {0}, {0}, {0}, {0}};
+ EXPECT_EQ(ints_out + 2,
+ ranges::copy_if(ints_in, ints_out, equals_six, &Int::value));
+
+ EXPECT_TRUE(ranges::all_of(ints_out, ints_out + 2, equals_six, &Int::value));
+ EXPECT_TRUE(
+ ranges::all_of(ints_out + 2, ints_out + 6, equals_zero, &Int::value));
+}
+
+TEST(RangesTest, CopyBackward) {
+ int input[] = {2, 4, 6, 8, 6};
+ int output[] = {0, 0, 0, 0, 0, 0};
+
+ EXPECT_EQ(output + 1, ranges::copy_backward(input, input + 5, output + 6));
+ EXPECT_THAT(output, ElementsAre(0, 2, 4, 6, 8, 6));
+
+ Int ints_in[] = {{2}, {4}, {6}, {8}, {6}};
+ Int ints_out[] = {{0}, {0}, {0}, {0}, {0}, {0}};
+
+ EXPECT_EQ(ints_out, ranges::copy_backward(ints_in, ints_out + 5));
+ EXPECT_TRUE(std::equal(ints_in, ints_in + 5, ints_out, ints_out + 5,
+ [](Int i, Int j) { return i.value == j.value; }));
+}
+
+TEST(RangesTest, LowerBound) {
+ int array[] = {0, 0, 1, 1, 2, 2};
+
+ EXPECT_EQ(array, ranges::lower_bound(array, array + 6, -1));
+ EXPECT_EQ(array, ranges::lower_bound(array, array + 6, 0));
+ EXPECT_EQ(array + 2, ranges::lower_bound(array, array + 6, 1));
+ EXPECT_EQ(array + 4, ranges::lower_bound(array, array + 6, 2));
+ EXPECT_EQ(array + 6, ranges::lower_bound(array, array + 6, 3));
+
+ Int ints[] = {{0}, {0}, {1}, {1}, {2}, {2}};
+
+ EXPECT_EQ(ints, ranges::lower_bound(ints, -1, {}, &Int::value));
+ EXPECT_EQ(ints, ranges::lower_bound(ints, 0, {}, &Int::value));
+ EXPECT_EQ(ints + 2, ranges::lower_bound(ints, 1, {}, &Int::value));
+ EXPECT_EQ(ints + 4, ranges::lower_bound(ints, 2, {}, &Int::value));
+ EXPECT_EQ(ints + 6, ranges::lower_bound(ints, 3, {}, &Int::value));
+
+ const auto proj = [](const auto& i) { return 2 - i.value; };
+ EXPECT_EQ(ints, ranges::lower_bound(ints, 3, ranges::greater{}, proj));
+ EXPECT_EQ(ints, ranges::lower_bound(ints, 2, ranges::greater{}, proj));
+ EXPECT_EQ(ints + 2, ranges::lower_bound(ints, 1, ranges::greater{}, proj));
+ EXPECT_EQ(ints + 4, ranges::lower_bound(ints, 0, ranges::greater{}, proj));
+ EXPECT_EQ(ints + 6, ranges::lower_bound(ints, -1, ranges::greater{}, proj));
+}
+
+} // namespace util
diff --git a/chromium/base/util/ranges/functional.h b/chromium/base/util/ranges/functional.h
new file mode 100644
index 00000000000..9d31a71a891
--- /dev/null
+++ b/chromium/base/util/ranges/functional.h
@@ -0,0 +1,71 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UTIL_RANGES_FUNCTIONAL_H_
+#define BASE_UTIL_RANGES_FUNCTIONAL_H_
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+namespace util {
+
+// Implementation of C++20's std::identity.
+//
+// Reference:
+// - https://en.cppreference.com/w/cpp/utility/functional/identity
+// - https://wg21.link/func.identity
+struct identity {
+ template <typename T>
+ constexpr T&& operator()(T&& t) const noexcept {
+ return std::forward<T>(t);
+ }
+
+ using is_transparent = void;
+};
+
+// Minimal implementation of C++17's std::invoke. Based on implementation
+// referenced in original std::invoke proposal.
+//
+// Note: Unlike C++20's std::invoke this implementation is not constexpr. A
+// constexpr version can be added in the future, but it won't be as concise,
+// since std::mem_fn is not constexpr prior to C++20.
+//
+// References:
+// - https://wg21.link/n4169#implementability
+// - https://en.cppreference.com/w/cpp/utility/functional/invoke
+// - https://wg21.link/func.invoke
+template <typename Functor,
+ typename... Args,
+ std::enable_if_t<
+ std::is_member_pointer<std::decay_t<Functor>>::value>* = nullptr>
+decltype(auto) invoke(Functor&& f, Args&&... args) {
+ return std::mem_fn(f)(std::forward<Args>(args)...);
+}
+
+template <typename Functor,
+ typename... Args,
+ std::enable_if_t<
+ !std::is_member_pointer<std::decay_t<Functor>>::value>* = nullptr>
+decltype(auto) invoke(Functor&& f, Args&&... args) {
+ return std::forward<Functor>(f)(std::forward<Args>(args)...);
+}
+
+// Simplified implementations of C++20's std::ranges comparison function
+// objects. As opposed to the std::ranges implementation, these versions do not
+// constrain the passed-in types.
+//
+// Reference: https://wg21.link/range.cmp
+namespace ranges {
+using equal_to = std::equal_to<>;
+using not_equal_to = std::not_equal_to<>;
+using greater = std::greater<>;
+using less = std::less<>;
+using greater_equal = std::greater_equal<>;
+using less_equal = std::less_equal<>;
+} // namespace ranges
+
+} // namespace util
+
+#endif // BASE_UTIL_RANGES_FUNCTIONAL_H_
diff --git a/chromium/base/util/ranges/functional_unittest.cc b/chromium/base/util/ranges/functional_unittest.cc
new file mode 100644
index 00000000000..c3fe5487aa7
--- /dev/null
+++ b/chromium/base/util/ranges/functional_unittest.cc
@@ -0,0 +1,52 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/util/ranges/functional.h"
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace util {
+
+TEST(RangesTest, Identity) {
+ static constexpr identity id;
+
+ std::vector<int> v;
+ EXPECT_EQ(&v, &id(v));
+
+ constexpr int arr = {0};
+ static_assert(arr == id(arr), "");
+}
+
+TEST(RangesTest, Invoke) {
+ struct S {
+ int data_member = 123;
+
+ int member_function() { return 42; }
+ };
+
+ S s;
+ EXPECT_EQ(123, invoke(&S::data_member, s));
+ EXPECT_EQ(42, invoke(&S::member_function, s));
+
+ auto add_functor = [](int i, int j) { return i + j; };
+ EXPECT_EQ(3, invoke(add_functor, 1, 2));
+}
+
+TEST(RangesTest, EqualTo) {
+ ranges::equal_to eq;
+ EXPECT_TRUE(eq(0, 0));
+ EXPECT_FALSE(eq(0, 1));
+ EXPECT_FALSE(eq(1, 0));
+}
+
+TEST(RangesTest, Less) {
+ ranges::less lt;
+ EXPECT_FALSE(lt(0, 0));
+ EXPECT_TRUE(lt(0, 1));
+ EXPECT_FALSE(lt(1, 0));
+}
+
+} // namespace util
diff --git a/chromium/base/util/ranges/iterator.h b/chromium/base/util/ranges/iterator.h
new file mode 100644
index 00000000000..daaedbc285b
--- /dev/null
+++ b/chromium/base/util/ranges/iterator.h
@@ -0,0 +1,40 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_UTIL_RANGES_ITERATOR_H_
+#define BASE_UTIL_RANGES_ITERATOR_H_
+
+#include <iterator>
+
+namespace util {
+namespace ranges {
+
+// Simplified implementation of C++20's std::ranges::begin.
+// As opposed to std::ranges::begin, this implementation does not prefer a
+// member begin() over a free standing begin(), does not check whether begin()
+// returns an iterator, does not inhibit ADL and is not constexpr.
+//
+// Reference: https://wg21.link/range.access.begin
+template <typename Range>
+decltype(auto) begin(Range&& range) {
+ using std::begin;
+ return begin(std::forward<Range>(range));
+}
+
+// Simplified implementation of C++20's std::ranges::end.
+// As opposed to std::ranges::end, this implementation does not prefer a
+// member end() over a free standing end(), does not check whether end()
+// returns an iterator, does not inhibit ADL and is not constexpr.
+//
+// Reference: - https://wg21.link/range.access.end
+template <typename Range>
+decltype(auto) end(Range&& range) {
+ using std::end;
+ return end(std::forward<Range>(range));
+}
+
+} // namespace ranges
+} // namespace util
+
+#endif // BASE_UTIL_RANGES_ITERATOR_H_
diff --git a/chromium/base/util/ranges/iterator_unittest.cc b/chromium/base/util/ranges/iterator_unittest.cc
new file mode 100644
index 00000000000..d20391720cc
--- /dev/null
+++ b/chromium/base/util/ranges/iterator_unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/util/ranges/iterator.h"
+
+#include <vector>
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace util {
+
+namespace {
+
+struct S {
+ std::vector<int> v;
+};
+
+auto begin(const S& s) {
+ return s.v.begin();
+}
+
+auto end(const S& s) {
+ return s.v.end();
+}
+
+} // namespace
+
+TEST(RangesTest, Begin) {
+ std::vector<int> vec;
+ int arr[1]{};
+ S s;
+
+ EXPECT_EQ(vec.begin(), ranges::begin(vec));
+ EXPECT_EQ(arr, ranges::begin(arr));
+ EXPECT_EQ(s.v.begin(), ranges::begin(s));
+}
+
+TEST(RangesTest, End) {
+ std::vector<int> vec;
+ int arr[1]{};
+ S s;
+
+ EXPECT_EQ(vec.end(), ranges::end(vec));
+ EXPECT_EQ(arr + 1, ranges::end(arr));
+ EXPECT_EQ(s.v.end(), ranges::end(s));
+}
+
+} // namespace util
diff --git a/chromium/base/util/timer/wall_clock_timer.cc b/chromium/base/util/timer/wall_clock_timer.cc
index 5420ec7190b..1407775872b 100644
--- a/chromium/base/util/timer/wall_clock_timer.cc
+++ b/chromium/base/util/timer/wall_clock_timer.cc
@@ -52,8 +52,10 @@ void WallClockTimer::OnResume() {
}
void WallClockTimer::AddObserver() {
- if (!observer_added_)
- observer_added_ = base::PowerMonitor::AddObserver(this);
+ if (!observer_added_) {
+ base::PowerMonitor::AddObserver(this);
+ observer_added_ = true;
+ }
}
void WallClockTimer::RemoveObserver() {
diff --git a/chromium/base/util/type_safety/pass_key.h b/chromium/base/util/type_safety/pass_key.h
index 987822ede8f..61add480648 100644
--- a/chromium/base/util/type_safety/pass_key.h
+++ b/chromium/base/util/type_safety/pass_key.h
@@ -24,7 +24,7 @@ namespace util {
// public:
// using PassKey = util::PassKey<Manager>;
// Manager() : foo_(blink::MakeGarbageCollected<Foo>(PassKey())) {}
-// void Trace(blink::Visitor* visitor) { visitor->Trace(foo_); }
+// void Trace(blink::Visitor* visitor) const { visitor->Trace(foo_); }
// Foo* GetFooSingleton() { foo_; }
//
// private:
diff --git a/chromium/base/util/values/BUILD.gn b/chromium/base/util/values/BUILD.gn
index 5048d86c208..f14d26426bd 100644
--- a/chromium/base/util/values/BUILD.gn
+++ b/chromium/base/util/values/BUILD.gn
@@ -8,7 +8,7 @@ source_set("values_util") {
"values_util.h",
]
- deps = [ "//base:base" ]
+ deps = [ "//base" ]
}
source_set("unittests") {
@@ -17,6 +17,7 @@ source_set("unittests") {
deps = [
":values_util",
+ "//base",
"//testing/gtest",
]
}
diff --git a/chromium/base/util/values/values_util.cc b/chromium/base/util/values/values_util.cc
index 43b317b3100..fc9341035ce 100644
--- a/chromium/base/util/values/values_util.cc
+++ b/chromium/base/util/values/values_util.cc
@@ -4,10 +4,33 @@
#include "base/util/values/values_util.h"
+#include "base/files/file_path.h"
#include "base/strings/string_number_conversions.h"
+#include "base/time/time.h"
+#include "base/unguessable_token.h"
+
+// Warning: The Values involved could be stored on persistent storage like files
+// on disks. Therefore, changes in implementation could lead to data corruption
+// and must be done with caution.
namespace util {
+namespace {
+
+// Helper to serialize/deserialize an UnguessableToken.
+//
+// It assumes a little-endian CPU, which is arguably a bug.
+union UnguessableTokenRepresentation {
+ struct Field {
+ uint64_t high;
+ uint64_t low;
+ } field;
+
+ uint8_t buffer[sizeof(Field)];
+};
+
+} // namespace
+
base::Value Int64ToValue(int64_t integer) {
return base::Value(base::NumberToString(integer));
}
@@ -57,4 +80,40 @@ base::Optional<base::Time> ValueToTime(const base::Value& value) {
return base::Time::FromDeltaSinceWindowsEpoch(*time_delta);
}
+base::Value FilePathToValue(base::FilePath file_path) {
+ return base::Value(file_path.AsUTF8Unsafe());
+}
+
+base::Optional<base::FilePath> ValueToFilePath(const base::Value* value) {
+ return value ? ValueToFilePath(*value) : base::nullopt;
+}
+
+base::Optional<base::FilePath> ValueToFilePath(const base::Value& value) {
+ if (!value.is_string())
+ return base::nullopt;
+ return base::FilePath::FromUTF8Unsafe(value.GetString());
+}
+
+base::Value UnguessableTokenToValue(base::UnguessableToken token) {
+ UnguessableTokenRepresentation repr;
+ repr.field.high = token.GetHighForSerialization();
+ repr.field.low = token.GetLowForSerialization();
+ return base::Value(base::HexEncode(repr.buffer, sizeof(repr.buffer)));
+}
+
+base::Optional<base::UnguessableToken> ValueToUnguessableToken(
+ const base::Value* value) {
+ return value ? ValueToUnguessableToken(*value) : base::nullopt;
+}
+
+base::Optional<base::UnguessableToken> ValueToUnguessableToken(
+ const base::Value& value) {
+ if (!value.is_string())
+ return base::nullopt;
+ UnguessableTokenRepresentation repr;
+ if (!base::HexStringToSpan(value.GetString(), repr.buffer))
+ return base::nullopt;
+ return base::UnguessableToken::Deserialize(repr.field.high, repr.field.low);
+}
+
} // namespace util
diff --git a/chromium/base/util/values/values_util.h b/chromium/base/util/values/values_util.h
index de9fd1b9dc4..2958ae01405 100644
--- a/chromium/base/util/values/values_util.h
+++ b/chromium/base/util/values/values_util.h
@@ -6,31 +6,57 @@
#define BASE_UTIL_VALUES_VALUES_UTIL_H_
#include "base/optional.h"
-#include "base/time/time.h"
#include "base/values.h"
+namespace base {
+class FilePath;
+class Time;
+class TimeDelta;
+class UnguessableToken;
+} // namespace base
+
namespace util {
-// Simple helper functions for converting int64_t, base::TimeDelta and
-// base::Time to numeric string base::Values.
-// Because base::TimeDelta and base::Time share the same internal representation
-// as int64_t they are stored using the exact same numeric string format.
+// Simple helper functions for converting between base::Value and other types.
+// The base::Value representation is stable, suitable for persistent storage
+// e.g. as JSON on disk.
+//
+// It is valid to pass nullptr to the ValueToEtc functions. They will just
+// return base::nullopt.
-// Stores the int64_t as a string.
+// Converts between an int64_t and a string-flavored base::Value (a human
+// readable string of that number).
base::Value Int64ToValue(int64_t integer);
base::Optional<int64_t> ValueToInt64(const base::Value* value);
base::Optional<int64_t> ValueToInt64(const base::Value& value);
-// Converts the TimeDelta to an int64_t of microseconds.
+// Converts between a base::TimeDelta (an int64_t number of microseconds) and a
+// string-flavored base::Value (a human readable string of that number).
base::Value TimeDeltaToValue(base::TimeDelta time_delta);
base::Optional<base::TimeDelta> ValueToTimeDelta(const base::Value* value);
base::Optional<base::TimeDelta> ValueToTimeDelta(const base::Value& value);
-// Converts the Time to a TimeDelta from the Windows epoch.
+// Converts between a base::Time (an int64_t number of microseconds since the
+// Windows epoch) and a string-flavored base::Value (a human readable string of
+// that number).
base::Value TimeToValue(base::Time time);
base::Optional<base::Time> ValueToTime(const base::Value* value);
base::Optional<base::Time> ValueToTime(const base::Value& value);
+// Converts between a base::FilePath (a std::string or base::string16) and a
+// string-flavored base::Value (the UTF-8 representation).
+base::Value FilePathToValue(base::FilePath file_path);
+base::Optional<base::FilePath> ValueToFilePath(const base::Value* value);
+base::Optional<base::FilePath> ValueToFilePath(const base::Value& value);
+
+// Converts between a base::UnguessableToken (128 bits) and a string-flavored
+// base::Value (32 hexadecimal digits).
+base::Value UnguessableTokenToValue(base::UnguessableToken token);
+base::Optional<base::UnguessableToken> ValueToUnguessableToken(
+ const base::Value* value);
+base::Optional<base::UnguessableToken> ValueToUnguessableToken(
+ const base::Value& value);
+
} // namespace util
#endif // BASE_UTIL_VALUES_VALUES_UTIL_H_
diff --git a/chromium/base/util/values/values_util_unittest.cc b/chromium/base/util/values/values_util_unittest.cc
index ee543c18d84..d2954a6c2fd 100644
--- a/chromium/base/util/values/values_util_unittest.cc
+++ b/chromium/base/util/values/values_util_unittest.cc
@@ -2,28 +2,32 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include <limits>
-
#include "base/util/values/values_util.h"
+#include <limits>
+
+#include "base/files/file_path.h"
+#include "base/strings/string_piece.h"
+#include "base/time/time.h"
+#include "base/unguessable_token.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace util {
namespace {
-TEST(ValuesUtilTest, BasicLimits) {
- struct {
+TEST(ValuesUtilTest, BasicInt64Limits) {
+ constexpr struct {
int64_t input;
- const char* expected;
- } test_cases[] = {
+ base::StringPiece expected;
+ } kTestCases[] = {
{0, "0"},
{-1234, "-1234"},
{5678, "5678"},
{std::numeric_limits<int64_t>::lowest(), "-9223372036854775808"},
{std::numeric_limits<int64_t>::max(), "9223372036854775807"},
};
- for (const auto& test_case : test_cases) {
+ for (const auto& test_case : kTestCases) {
int64_t input = test_case.input;
base::TimeDelta time_delta_input = base::TimeDelta::FromMicroseconds(input);
base::Time time_input =
@@ -43,8 +47,8 @@ TEST(ValuesUtilTest, BasicLimits) {
}
}
-TEST(ValuesUtilTest, InvalidValues) {
- std::unique_ptr<base::Value> test_cases[] = {
+TEST(ValuesUtilTest, InvalidInt64Values) {
+ const std::unique_ptr<base::Value> kTestCases[] = {
nullptr,
std::make_unique<base::Value>(),
std::make_unique<base::Value>(0),
@@ -59,13 +63,48 @@ TEST(ValuesUtilTest, InvalidValues) {
std::make_unique<base::Value>("1234a"),
std::make_unique<base::Value>("a1234"),
};
- for (const auto& test_case : test_cases) {
+ for (const auto& test_case : kTestCases) {
EXPECT_FALSE(ValueToInt64(test_case.get()));
EXPECT_FALSE(ValueToTimeDelta(test_case.get()));
EXPECT_FALSE(ValueToTime(test_case.get()));
}
}
+TEST(ValuesUtilTest, FilePath) {
+ // Ω is U+03A9 GREEK CAPITAL LETTER OMEGA, a non-ASCII character.
+ constexpr base::StringPiece kTestCases[] = {
+ "/unix/Ω/path.dat",
+ "C:\\windows\\Ω\\path.dat",
+ };
+ for (auto test_case : kTestCases) {
+ base::FilePath input = base::FilePath::FromUTF8Unsafe(test_case);
+ base::Value expected(test_case);
+ SCOPED_TRACE(testing::Message() << "test_case: " << test_case);
+
+ EXPECT_EQ(FilePathToValue(input), expected);
+ EXPECT_EQ(*ValueToFilePath(&expected), input);
+ }
+}
+
+TEST(ValuesUtilTest, UnguessableToken) {
+ constexpr struct {
+ uint64_t high;
+ uint64_t low;
+ base::StringPiece expected;
+ } kTestCases[] = {
+ {0x123456u, 0x9ABCu, "5634120000000000BC9A000000000000"},
+ };
+ for (const auto& test_case : kTestCases) {
+ base::UnguessableToken input =
+ base::UnguessableToken::Deserialize(test_case.high, test_case.low);
+ base::Value expected(test_case.expected);
+ SCOPED_TRACE(testing::Message() << "expected: " << test_case.expected);
+
+ EXPECT_EQ(UnguessableTokenToValue(input), expected);
+ EXPECT_EQ(*ValueToUnguessableToken(&expected), input);
+ }
+}
+
} // namespace
} // namespace util
diff --git a/chromium/base/value_conversions.cc b/chromium/base/value_conversions.cc
deleted file mode 100644
index 9d84e3601ae..00000000000
--- a/chromium/base/value_conversions.cc
+++ /dev/null
@@ -1,105 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/value_conversions.h"
-
-#include <stdint.h>
-
-#include <algorithm>
-#include <string>
-#include <vector>
-
-#include "base/files/file_path.h"
-#include "base/memory/ptr_util.h"
-#include "base/strings/string_number_conversions.h"
-#include "base/time/time.h"
-#include "base/unguessable_token.h"
-#include "base/values.h"
-
-namespace base {
-namespace {
-// Helper for serialize/deserialize UnguessableToken.
-union UnguessableTokenRepresentation {
- struct Field {
- uint64_t high;
- uint64_t low;
- } field;
-
- uint8_t buffer[sizeof(Field)];
-};
-} // namespace
-
-// |Value| internally stores strings in UTF-8, so we have to convert from the
-// system native code to UTF-8 and back.
-Value CreateFilePathValue(const FilePath& in_value) {
- return Value(in_value.AsUTF8Unsafe());
-}
-
-bool GetValueAsFilePath(const Value& value, FilePath* file_path) {
- std::string str;
- if (!value.GetAsString(&str))
- return false;
- if (file_path)
- *file_path = FilePath::FromUTF8Unsafe(str);
- return true;
-}
-
-// It is recommended in time.h to use ToDeltaSinceWindowsEpoch() and
-// FromDeltaSinceWindowsEpoch() for opaque serialization and
-// deserialization of time values.
-Value CreateTimeValue(const Time& time) {
- return CreateTimeDeltaValue(time.ToDeltaSinceWindowsEpoch());
-}
-
-bool GetValueAsTime(const Value& value, Time* time) {
- TimeDelta time_delta;
- if (!GetValueAsTimeDelta(value, &time_delta))
- return false;
-
- if (time)
- *time = Time::FromDeltaSinceWindowsEpoch(time_delta);
- return true;
-}
-
-// |Value| does not support 64-bit integers, and doubles do not have enough
-// precision, so we store the 64-bit time value as a string instead.
-Value CreateTimeDeltaValue(const TimeDelta& time_delta) {
- std::string string_value = base::NumberToString(time_delta.InMicroseconds());
- return Value(string_value);
-}
-
-bool GetValueAsTimeDelta(const Value& value, TimeDelta* time_delta) {
- std::string str;
- int64_t int_value;
- if (!value.GetAsString(&str) || !base::StringToInt64(str, &int_value))
- return false;
- if (time_delta)
- *time_delta = TimeDelta::FromMicroseconds(int_value);
- return true;
-}
-
-Value CreateUnguessableTokenValue(const UnguessableToken& token) {
- UnguessableTokenRepresentation representation;
- representation.field.high = token.GetHighForSerialization();
- representation.field.low = token.GetLowForSerialization();
-
- return Value(HexEncode(representation.buffer, sizeof(representation.buffer)));
-}
-
-bool GetValueAsUnguessableToken(const Value& value, UnguessableToken* token) {
- if (!value.is_string()) {
- return false;
- }
-
- UnguessableTokenRepresentation representation;
- if (!HexStringToSpan(value.GetString(), representation.buffer)) {
- return false;
- }
-
- *token = UnguessableToken::Deserialize(representation.field.high,
- representation.field.low);
- return true;
-}
-
-} // namespace base
diff --git a/chromium/base/value_conversions.h b/chromium/base/value_conversions.h
deleted file mode 100644
index 369767f1d10..00000000000
--- a/chromium/base/value_conversions.h
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_VALUE_CONVERSIONS_H_
-#define BASE_VALUE_CONVERSIONS_H_
-
-// This file contains methods to convert things to a |Value| and back.
-
-#include <memory>
-
-#include "base/base_export.h"
-#include "base/compiler_specific.h"
-
-namespace base {
-
-class FilePath;
-class Time;
-class TimeDelta;
-class UnguessableToken;
-class Value;
-
-// In GetValueAs*() functions, the caller owns the object pointed by the output
-// parameter, e.g. |file_path|. If false is returned, the value of the object
-// pointed by the output parameter is not changed. It is okay to pass nullptr to
-// the output parameter.
-
-// Warning: The Values involved could be stored on persistent storage like files
-// on disks. Therefore, changes in implementation could lead to data corruption
-// and must be done with caution.
-
-BASE_EXPORT Value CreateFilePathValue(const FilePath& in_value);
-BASE_EXPORT bool GetValueAsFilePath(const Value& value,
- FilePath* file_path) WARN_UNUSED_RESULT;
-
-BASE_EXPORT Value CreateTimeValue(const Time& time);
-BASE_EXPORT bool GetValueAsTime(const Value& value,
- Time* time) WARN_UNUSED_RESULT;
-
-BASE_EXPORT Value CreateTimeDeltaValue(const TimeDelta& time_delta);
-BASE_EXPORT bool GetValueAsTimeDelta(const Value& value,
- TimeDelta* time_delta) WARN_UNUSED_RESULT;
-
-BASE_EXPORT Value CreateUnguessableTokenValue(const UnguessableToken& token);
-BASE_EXPORT bool GetValueAsUnguessableToken(const Value& value,
- UnguessableToken* token)
- WARN_UNUSED_RESULT;
-
-} // namespace base
-
-#endif // BASE_VALUE_CONVERSIONS_H_
diff --git a/chromium/base/values.cc b/chromium/base/values.cc
index 4315dbc2068..5f5657935d9 100644
--- a/chromium/base/values.cc
+++ b/chromium/base/values.cc
@@ -21,7 +21,11 @@
#include "base/stl_util.h"
#include "base/strings/string_util.h"
#include "base/strings/utf_string_conversions.h"
+#include "base/tracing_buildflags.h"
+
+#if BUILDFLAG(ENABLE_BASE_TRACING)
#include "base/trace_event/memory_usage_estimator.h"
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
namespace base {
@@ -1061,6 +1065,7 @@ bool Value::Equals(const Value* other) const {
size_t Value::EstimateMemoryUsage() const {
switch (type_) {
+#if BUILDFLAG(ENABLE_BASE_TRACING)
case Type::STRING:
return base::trace_event::EstimateMemoryUsage(string_value_);
case Type::BINARY:
@@ -1069,6 +1074,7 @@ size_t Value::EstimateMemoryUsage() const {
return base::trace_event::EstimateMemoryUsage(dict_);
case Type::LIST:
return base::trace_event::EstimateMemoryUsage(list_);
+#endif // BUILDFLAG(ENABLE_BASE_TRACING)
default:
return 0;
}
diff --git a/chromium/base/values.h b/chromium/base/values.h
index 8d1ede3fd5e..6f0e21a446e 100644
--- a/chromium/base/values.h
+++ b/chromium/base/values.h
@@ -533,8 +533,9 @@ class BASE_EXPORT Value {
// TODO(crbug.com/646113): Delete this and migrate callsites.
bool Equals(const Value* other) const;
- // Estimates dynamic memory usage.
- // See base/trace_event/memory_usage_estimator.h for more info.
+ // Estimates dynamic memory usage. Requires tracing support
+ // (enable_base_tracing gn flag), otherwise always returns 0. See
+ // base/trace_event/memory_usage_estimator.h for more info.
size_t EstimateMemoryUsage() const;
protected:
@@ -942,12 +943,42 @@ class BASE_EXPORT ValueDeserializer {
// This method deserializes the subclass-specific format into a Value object.
// If the return value is non-NULL, the caller takes ownership of returned
- // Value. If the return value is NULL, and if error_code is non-NULL,
- // error_code will be set with the underlying error.
- // If |error_message| is non-null, it will be filled in with a formatted
+ // Value.
+ //
+ // If the return value is nullptr, and if |error_code| is non-nullptr,
+ // |*error_code| will be set to an integer value representing the underlying
+ // error. See "enum ErrorCode" below for more detail about the integer value.
+ //
+ // If |error_message| is non-nullptr, it will be filled in with a formatted
// error message including the location of the error if appropriate.
virtual std::unique_ptr<Value> Deserialize(int* error_code,
- std::string* error_str) = 0;
+ std::string* error_message) = 0;
+
+ // The integer-valued error codes form four groups:
+ // - The value 0 means no error.
+ // - Values between 1 and 999 inclusive mean an error in the data (i.e.
+ // content). The bytes being deserialized are not in the right format.
+ // - Values 1000 and above mean an error in the metadata (i.e. context). The
+ // file could not be read, the network is down, etc.
+ // - Negative values are reserved.
+ enum ErrorCode {
+ kErrorCodeNoError = 0,
+ // kErrorCodeInvalidFormat is a generic error code for "the data is not in
+ // the right format". Subclasses of ValueDeserializer may return other
+ // values for more specific errors.
+ kErrorCodeInvalidFormat = 1,
+ // kErrorCodeFirstMetadataError is the minimum value (inclusive) of the
+ // range of metadata errors.
+ kErrorCodeFirstMetadataError = 1000,
+ };
+
+ // The |error_code| argument can be one of the ErrorCode values, but it is
+ // not restricted to only being 0, 1 or 1000. Subclasses of ValueDeserializer
+ // can define their own error code values.
+ static inline bool ErrorCodeIsDataError(int error_code) {
+ return (kErrorCodeInvalidFormat <= error_code) &&
+ (error_code < kErrorCodeFirstMetadataError);
+ }
};
// Stream operator so Values can be used in assertion statements. In order that
diff --git a/chromium/base/version_unittest.cc b/chromium/base/version_unittest.cc
index 12b806399d0..4d04083801e 100644
--- a/chromium/base/version_unittest.cc
+++ b/chromium/base/version_unittest.cc
@@ -196,4 +196,28 @@ TEST(VersionTest, IsValidWildcardString) {
}
}
+TEST(VersionTest, LeadingZeros) {
+ {
+ // Leading zeros in the first component are not allowed.
+ base::Version v("01.1");
+ EXPECT_FALSE(v.IsValid());
+ }
+
+ {
+ // Leading zeros in subsequent components are allowed (and this behavior is
+ // now important for compatibility with existing modules, like extensions),
+ // but are ignored because the value is parsed as an integer...
+ base::Version v1("1.01");
+ EXPECT_TRUE(v1.IsValid());
+ // ...and as a result, v1.01 == v1.1.
+ EXPECT_EQ("1.1", v1.GetString());
+ base::Version v2("1.1");
+ EXPECT_EQ(v1, v2);
+ }
+
+ // Similarly, since leading zeros are ignored, v1.02 > v1.1 (because
+ // v1.02 is translated to 1.2).
+ EXPECT_GT(base::Version("1.02"), base::Version("1.1"));
+}
+
} // namespace
diff --git a/chromium/base/win/async_operation_unittest.cc b/chromium/base/win/async_operation_unittest.cc
index b29e181db3f..2309c230623 100644
--- a/chromium/base/win/async_operation_unittest.cc
+++ b/chromium/base/win/async_operation_unittest.cc
@@ -56,6 +56,19 @@ struct DECLSPEC_UUID("9e49373c-200c-4715-abd7-4214ba669c81")
}
};
+#ifdef NTDDI_WIN10_VB // Windows 10.0.19041
+// Specialization templates that used to be in windows.foundation.h, removed in
+// the 10.0.19041.0 SDK, so placed here instead.
+template <>
+struct __declspec(uuid("968b9665-06ed-5774-8f53-8edeabd5f7b5"))
+ IAsyncOperation<int> : IAsyncOperation_impl<int> {};
+
+template <>
+struct __declspec(uuid("d60cae9d-88cb-59f1-8576-3fba44796be8"))
+ IAsyncOperationCompletedHandler<int>
+ : IAsyncOperationCompletedHandler_impl<int> {};
+#endif
+
} // namespace Foundation
} // namespace Windows
} // namespace ABI
diff --git a/chromium/base/win/atl.h b/chromium/base/win/atl.h
index f87b7f1aeec..464438ed11c 100644
--- a/chromium/base/win/atl.h
+++ b/chromium/base/win/atl.h
@@ -1,6 +1,7 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#ifndef BASE_WIN_ATL_H_
#define BASE_WIN_ATL_H_
@@ -9,10 +10,6 @@
// Undefine before windows header will make the poisonous defines
#include "base/win/windows_undefines.inc"
-#ifndef _ATL_NO_EXCEPTIONS
-#define _ATL_NO_EXCEPTIONS
-#endif
-
// atlwin.h relies on std::void_t, but libc++ doesn't define it unless
// _LIBCPP_STD_VER > 14. Workaround this by manually defining it.
#include <type_traits>
@@ -23,6 +20,9 @@ using void_t = void;
}
#endif
+// Declare our own exception thrower (atl_throw.h includes atldef.h).
+#include "base/win/atl_throw.h"
+
#include <atlbase.h>
#include <atlcom.h>
#include <atlctl.h>
diff --git a/chromium/base/win/atl_throw.cc b/chromium/base/win/atl_throw.cc
new file mode 100644
index 00000000000..2da52e85e68
--- /dev/null
+++ b/chromium/base/win/atl_throw.cc
@@ -0,0 +1,25 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/atl_throw.h"
+
+#include <winerror.h>
+
+#include "base/compiler_specific.h"
+#include "base/debug/alias.h"
+#include "base/immediate_crash.h"
+#include "base/process/memory.h"
+
+namespace base {
+namespace win {
+
+NOINLINE void __stdcall AtlThrowImpl(HRESULT hr) {
+ base::debug::Alias(&hr);
+ if (hr == E_OUTOFMEMORY)
+ base::TerminateBecauseOutOfMemory(0);
+ IMMEDIATE_CRASH();
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/atl_throw.h b/chromium/base/win/atl_throw.h
new file mode 100644
index 00000000000..b90cb1f638b
--- /dev/null
+++ b/chromium/base/win/atl_throw.h
@@ -0,0 +1,43 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_ATL_THROW_H_
+#define BASE_WIN_ATL_THROW_H_
+
+#ifdef __ATLDEF_H__
+#error atl_throw.h must be included before atldef.h.
+#endif
+
+#include "base/base_export.h"
+#include "base/win/windows_types.h"
+
+// Defining _ATL_NO_EXCEPTIONS causes ATL to raise a structured exception
+// instead of throwing a CAtlException. While crashpad will eventually handle
+// this, the HRESULT that caused the problem is lost. So, in addition, define
+// our own custom AtlThrow function (_ATL_CUSTOM_THROW).
+#ifndef _ATL_NO_EXCEPTIONS
+#define _ATL_NO_EXCEPTIONS
+#endif
+
+#define _ATL_CUSTOM_THROW
+#define AtlThrow ::base::win::AtlThrowImpl
+
+namespace base {
+namespace win {
+
+// Crash the process forthwith in case of ATL errors.
+[[noreturn]] BASE_EXPORT void __stdcall AtlThrowImpl(HRESULT hr);
+
+} // namespace win
+} // namespace base
+
+#include <atldef.h>
+
+// atldef.h mistakenly leaves out the declaration of this function when
+// _ATL_CUSTOM_THROW is defined.
+namespace ATL {
+ATL_NOINLINE __declspec(noreturn) inline void WINAPI AtlThrowLastWin32();
+}
+
+#endif // BASE_WIN_ATL_THROW_H_
diff --git a/chromium/base/win/com_init_check_hook.cc b/chromium/base/win/com_init_check_hook.cc
index 034c6ec4b31..df24953a43a 100644
--- a/chromium/base/win/com_init_check_hook.cc
+++ b/chromium/base/win/com_init_check_hook.cc
@@ -10,6 +10,7 @@
#include <stdint.h>
#include <string.h>
+#include "base/notreached.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/win/com_init_util.h"
diff --git a/chromium/base/win/com_init_check_hook.h b/chromium/base/win/com_init_check_hook.h
index f143adee869..28a0db30558 100644
--- a/chromium/base/win/com_init_check_hook.h
+++ b/chromium/base/win/com_init_check_hook.h
@@ -6,7 +6,7 @@
#define BASE_WIN_COM_INIT_CHECK_HOOK_H_
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
#include "build/build_config.h"
diff --git a/chromium/base/win/com_init_util.cc b/chromium/base/win/com_init_util.cc
index 4064a2a8899..9c1773cd395 100644
--- a/chromium/base/win/com_init_util.cc
+++ b/chromium/base/win/com_init_util.cc
@@ -8,6 +8,7 @@
#include <winternl.h>
#include "base/logging.h"
+#include "base/notreached.h"
namespace base {
namespace win {
diff --git a/chromium/base/win/com_init_util.h b/chromium/base/win/com_init_util.h
index be5a1b4c5b7..906244cf7e2 100644
--- a/chromium/base/win/com_init_util.h
+++ b/chromium/base/win/com_init_util.h
@@ -6,7 +6,7 @@
#define BASE_WIN_COM_INIT_UTIL_H_
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
namespace base {
namespace win {
diff --git a/chromium/base/win/dispatch_stub.cc b/chromium/base/win/dispatch_stub.cc
new file mode 100644
index 00000000000..562f9526483
--- /dev/null
+++ b/chromium/base/win/dispatch_stub.cc
@@ -0,0 +1,40 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/dispatch_stub.h"
+
+namespace base {
+namespace win {
+namespace test {
+
+IFACEMETHODIMP DispatchStub::GetTypeInfoCount(UINT*) {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP DispatchStub::GetTypeInfo(UINT, LCID, ITypeInfo**) {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP DispatchStub::GetIDsOfNames(REFIID,
+ LPOLESTR*,
+ UINT,
+ LCID,
+ DISPID*) {
+ return E_NOTIMPL;
+}
+
+IFACEMETHODIMP DispatchStub::Invoke(DISPID,
+ REFIID,
+ LCID,
+ WORD,
+ DISPPARAMS*,
+ VARIANT*,
+ EXCEPINFO*,
+ UINT*) {
+ return E_NOTIMPL;
+}
+
+} // namespace test
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/dispatch_stub.h b/chromium/base/win/dispatch_stub.h
new file mode 100644
index 00000000000..85e4acc3db2
--- /dev/null
+++ b/chromium/base/win/dispatch_stub.h
@@ -0,0 +1,43 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_DISPATCH_STUB_H_
+#define BASE_WIN_DISPATCH_STUB_H_
+
+#include <wrl/client.h>
+#include <wrl/implements.h>
+
+namespace base {
+namespace win {
+namespace test {
+
+// An unimplemented IDispatch subclass for testing purposes.
+class DispatchStub
+ : public Microsoft::WRL::RuntimeClass<
+ Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::ClassicCom>,
+ IDispatch> {
+ public:
+ DispatchStub() = default;
+ DispatchStub(const DispatchStub&) = delete;
+ DispatchStub& operator=(const DispatchStub&) = delete;
+
+ // IDispatch:
+ IFACEMETHODIMP GetTypeInfoCount(UINT*) override;
+ IFACEMETHODIMP GetTypeInfo(UINT, LCID, ITypeInfo**) override;
+ IFACEMETHODIMP GetIDsOfNames(REFIID, LPOLESTR*, UINT, LCID, DISPID*) override;
+ IFACEMETHODIMP Invoke(DISPID,
+ REFIID,
+ LCID,
+ WORD,
+ DISPPARAMS*,
+ VARIANT*,
+ EXCEPINFO*,
+ UINT*) override;
+};
+
+} // namespace test
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_DISPATCH_STUB_H_
diff --git a/chromium/base/win/map.h b/chromium/base/win/map.h
index ef995ecf027..cee80e65e0e 100644
--- a/chromium/base/win/map.h
+++ b/chromium/base/win/map.h
@@ -10,7 +10,8 @@
#include <map>
-#include "base/logging.h"
+#include "base/check_op.h"
+#include "base/notreached.h"
#include "base/stl_util.h"
#include "base/win/vector.h"
#include "base/win/winrt_foundation_helpers.h"
diff --git a/chromium/base/win/post_async_results.h b/chromium/base/win/post_async_results.h
index 5df42252d66..81fb18f81e4 100644
--- a/chromium/base/win/post_async_results.h
+++ b/chromium/base/win/post_async_results.h
@@ -16,6 +16,7 @@
#include "base/bind.h"
#include "base/callback.h"
#include "base/location.h"
+#include "base/logging.h"
#include "base/threading/thread_task_runner_handle.h"
namespace base {
diff --git a/chromium/base/win/reference_unittest.cc b/chromium/base/win/reference_unittest.cc
index 4116872cce6..a83f3f5ff90 100644
--- a/chromium/base/win/reference_unittest.cc
+++ b/chromium/base/win/reference_unittest.cc
@@ -9,6 +9,24 @@
#include "testing/gtest/include/gtest/gtest.h"
+#ifdef NTDDI_WIN10_VB // Windows 10.0.19041
+// Specialization templates that used to be in windows.foundation.h, removed in
+// the 10.0.19041.0 SDK, so placed here instead.
+namespace ABI {
+namespace Windows {
+namespace Foundation {
+template <>
+struct __declspec(uuid("3c00fd60-2950-5939-a21a-2d12c5a01b8a")) IReference<bool>
+ : IReference_impl<Internal::AggregateType<bool, boolean>> {};
+
+template <>
+struct __declspec(uuid("548cefbd-bc8a-5fa0-8df2-957440fc8bf4")) IReference<int>
+ : IReference_impl<int> {};
+} // namespace Foundation
+} // namespace Windows
+} // namespace ABI
+#endif
+
namespace base {
namespace win {
diff --git a/chromium/base/win/scoped_bstr.h b/chromium/base/win/scoped_bstr.h
index 2fc70e341c9..9dbe3b2d525 100644
--- a/chromium/base/win/scoped_bstr.h
+++ b/chromium/base/win/scoped_bstr.h
@@ -11,7 +11,7 @@
#include <stddef.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
diff --git a/chromium/base/win/scoped_co_mem.h b/chromium/base/win/scoped_co_mem.h
index 8c4eac2e856..457d69fabba 100644
--- a/chromium/base/win/scoped_co_mem.h
+++ b/chromium/base/win/scoped_co_mem.h
@@ -7,7 +7,7 @@
#include <objbase.h>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
namespace base {
diff --git a/chromium/base/win/scoped_devinfo.h b/chromium/base/win/scoped_devinfo.h
new file mode 100644
index 00000000000..37a3a0f765a
--- /dev/null
+++ b/chromium/base/win/scoped_devinfo.h
@@ -0,0 +1,24 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_SCOPED_DEVINFO_H_
+#define BASE_WIN_SCOPED_DEVINFO_H_
+
+#include <setupapi.h>
+
+#include "base/scoped_generic.h"
+
+namespace base {
+namespace win {
+
+struct DevInfoScopedTraits {
+ static HDEVINFO InvalidValue() { return INVALID_HANDLE_VALUE; }
+ static void Free(HDEVINFO h) { SetupDiDestroyDeviceInfoList(h); }
+};
+using ScopedDevInfo = base::ScopedGeneric<HDEVINFO, DevInfoScopedTraits>;
+
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_SCOPED_DEVINFO_H_
diff --git a/chromium/base/win/scoped_handle.h b/chromium/base/win/scoped_handle.h
index 02c25336493..76a9b006751 100644
--- a/chromium/base/win/scoped_handle.h
+++ b/chromium/base/win/scoped_handle.h
@@ -8,10 +8,10 @@
#include "base/win/windows_types.h"
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/location.h"
-#include "base/logging.h"
#include "base/macros.h"
// TODO(rvargas): remove this with the rest of the verifier.
diff --git a/chromium/base/win/scoped_hdc.h b/chromium/base/win/scoped_hdc.h
index 4532d91bb2d..b5ca3c0a07d 100644
--- a/chromium/base/win/scoped_hdc.h
+++ b/chromium/base/win/scoped_hdc.h
@@ -7,8 +7,8 @@
#include <windows.h>
+#include "base/check.h"
#include "base/debug/gdi_debug_util_win.h"
-#include "base/logging.h"
#include "base/macros.h"
#include "base/win/scoped_handle.h"
diff --git a/chromium/base/win/scoped_propvariant.h b/chromium/base/win/scoped_propvariant.h
index 0f1d5c85417..78e3e728ef7 100644
--- a/chromium/base/win/scoped_propvariant.h
+++ b/chromium/base/win/scoped_propvariant.h
@@ -7,7 +7,7 @@
#include <propidl.h>
-#include "base/logging.h"
+#include "base/check_op.h"
#include "base/macros.h"
namespace base {
diff --git a/chromium/base/win/scoped_safearray.h b/chromium/base/win/scoped_safearray.h
index 2b52bf98673..e7f3d7d1415 100644
--- a/chromium/base/win/scoped_safearray.h
+++ b/chromium/base/win/scoped_safearray.h
@@ -8,7 +8,8 @@
#include <objbase.h>
#include "base/base_export.h"
-#include "base/logging.h"
+#include "base/check_op.h"
+#include "base/macros.h"
namespace base {
namespace win {
diff --git a/chromium/base/win/scoped_select_object.h b/chromium/base/win/scoped_select_object.h
index d4b1a816fc7..81b2f93cbfc 100644
--- a/chromium/base/win/scoped_select_object.h
+++ b/chromium/base/win/scoped_select_object.h
@@ -7,7 +7,7 @@
#include <windows.h>
-#include "base/logging.h"
+#include "base/check.h"
#include "base/macros.h"
namespace base {
diff --git a/chromium/base/win/scoped_variant.cc b/chromium/base/win/scoped_variant.cc
index 2b0d74094e7..b4ce8117ea5 100644
--- a/chromium/base/win/scoped_variant.cc
+++ b/chromium/base/win/scoped_variant.cc
@@ -4,6 +4,7 @@
#include "base/win/scoped_variant.h"
+#include "base/check.h"
#include "base/logging.h"
namespace base {
diff --git a/chromium/base/win/scoped_variant_unittest.cc b/chromium/base/win/scoped_variant_unittest.cc
index 31575846bc7..437efc2298b 100644
--- a/chromium/base/win/scoped_variant_unittest.cc
+++ b/chromium/base/win/scoped_variant_unittest.cc
@@ -8,9 +8,12 @@
#include <utility>
+#include "base/win/dispatch_stub.h"
#include "base/win/scoped_variant.h"
#include "testing/gtest/include/gtest/gtest.h"
+using base::win::test::DispatchStub;
+
namespace base {
namespace win {
@@ -28,41 +31,6 @@ void InitializeVariantWithBstr(VARIANT* var) {
V_BSTR(var) = ::SysAllocString(kTestString);
}
-// An unimplemented IDispatch subclass.
-class DispatchStub
- : public Microsoft::WRL::RuntimeClass<
- Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::ClassicCom>,
- IDispatch> {
- public:
- DispatchStub() = default;
- DispatchStub(const DispatchStub&) = delete;
- DispatchStub& operator=(const DispatchStub&) = delete;
-
- // IDispatch:
- IFACEMETHODIMP GetTypeInfoCount(UINT*) override { return E_NOTIMPL; }
- IFACEMETHODIMP GetTypeInfo(UINT, LCID, ITypeInfo**) override {
- return E_NOTIMPL;
- }
- IFACEMETHODIMP GetIDsOfNames(REFIID,
- LPOLESTR*,
- UINT,
- LCID,
- DISPID*) override {
- return E_NOTIMPL;
- }
-
- IFACEMETHODIMP Invoke(DISPID,
- REFIID,
- LCID,
- WORD,
- DISPPARAMS*,
- VARIANT*,
- EXCEPINFO*,
- UINT*) override {
- return E_NOTIMPL;
- }
-};
-
void ExpectRefCount(ULONG expected_refcount, IUnknown* object) {
// In general, code should not check the values of AddRef() and Release().
// However, tests need to validate that ScopedVariant safely owns a COM object
diff --git a/chromium/base/win/shortcut.h b/chromium/base/win/shortcut.h
index e1a271f90c6..57694dfc7f7 100644
--- a/chromium/base/win/shortcut.h
+++ b/chromium/base/win/shortcut.h
@@ -10,8 +10,8 @@
#include <stdint.h>
#include "base/base_export.h"
+#include "base/check.h"
#include "base/files/file_path.h"
-#include "base/logging.h"
namespace base {
namespace win {
diff --git a/chromium/base/win/typed_event_handler.h b/chromium/base/win/typed_event_handler.h
deleted file mode 100644
index fd62782f8a7..00000000000
--- a/chromium/base/win/typed_event_handler.h
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_WIN_TYPED_EVENT_HANDLER_H_
-#define BASE_WIN_TYPED_EVENT_HANDLER_H_
-
-#include <windows.foundation.collections.h>
-#include <wrl/implements.h>
-
-#include <utility>
-
-#include "base/callback.h"
-
-namespace base {
-namespace win {
-
-// This file provides an implementation of Windows::Foundation's
-// ITypedEventHandler. It serves as a thin wrapper around a RepeatingCallback,
-// that forwards the arguments to its |Invoke| method to the callback's |Run|
-// method.
-template <typename SenderT, typename ArgsT>
-class TypedEventHandler
- : public Microsoft::WRL::RuntimeClass<
- Microsoft::WRL::RuntimeClassFlags<Microsoft::WRL::ClassicCom>,
- ABI::Windows::Foundation::ITypedEventHandler<SenderT, ArgsT>> {
- public:
- using SenderAbiT =
- typename ABI::Windows::Foundation::Internal::GetAbiType<SenderT>::type;
- using ArgsAbiT =
- typename ABI::Windows::Foundation::Internal::GetAbiType<ArgsT>::type;
-
- using Handler = base::RepeatingCallback<HRESULT(SenderAbiT, ArgsAbiT)>;
-
- explicit TypedEventHandler(Handler handler) : handler_(std::move(handler)) {}
-
- // ABI::Windows::Foundation::ITypedEventHandler:
- IFACEMETHODIMP Invoke(SenderAbiT sender, ArgsAbiT args) override {
- return handler_.Run(std::move(sender), std::move(args));
- }
-
- private:
- Handler handler_;
-};
-
-} // namespace win
-} // namespace base
-
-#endif // BASE_WIN_TYPED_EVENT_HANDLER_H_
diff --git a/chromium/base/win/typed_event_handler_unittest.cc b/chromium/base/win/typed_event_handler_unittest.cc
deleted file mode 100644
index 76dba80cd97..00000000000
--- a/chromium/base/win/typed_event_handler_unittest.cc
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/win/typed_event_handler.h"
-
-#include <windows.foundation.h>
-
-#include "base/test/bind_test_util.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace win {
-
-TEST(TypedEventHandlerTest, InvokeSuccess) {
- bool called_callback = false;
- TypedEventHandler<IInspectable*, IInspectable*> handler(
- base::BindLambdaForTesting([&](IInspectable* sender, IInspectable* args) {
- EXPECT_EQ(reinterpret_cast<IInspectable*>(0x01), sender);
- EXPECT_EQ(reinterpret_cast<IInspectable*>(0x02), args);
- called_callback = true;
- return S_OK;
- }));
-
- EXPECT_FALSE(called_callback);
- HRESULT hr = handler.Invoke(reinterpret_cast<IInspectable*>(0x01),
- reinterpret_cast<IInspectable*>(0x02));
- EXPECT_TRUE(called_callback);
- EXPECT_EQ(S_OK, hr);
-}
-
-TEST(TypedEventHandlerTest, InvokeFail) {
- bool called_callback = false;
- TypedEventHandler<IInspectable*, IInspectable*> handler(
- base::BindLambdaForTesting([&](IInspectable* sender, IInspectable* args) {
- EXPECT_EQ(nullptr, sender);
- EXPECT_EQ(nullptr, args);
- called_callback = true;
- return E_FAIL;
- }));
-
- EXPECT_FALSE(called_callback);
- HRESULT hr = handler.Invoke(nullptr, nullptr);
- EXPECT_TRUE(called_callback);
- EXPECT_EQ(E_FAIL, hr);
-}
-
-} // namespace win
-} // namespace base
diff --git a/chromium/base/win/variant_util.h b/chromium/base/win/variant_util.h
new file mode 100644
index 00000000000..24f60e58c63
--- /dev/null
+++ b/chromium/base/win/variant_util.h
@@ -0,0 +1,151 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_WIN_VARIANT_UTIL_H_
+#define BASE_WIN_VARIANT_UTIL_H_
+
+#include "base/logging.h"
+
+namespace base {
+namespace win {
+namespace internal {
+
+// Returns true if a VARIANT of type |self| can be assigned to a
+// variant of type |other|.
+// Does not allow converting unsigned <-> signed or converting between
+// different sized types, but does allow converting IDispatch* -> IUnknown*.
+constexpr bool VarTypeIsConvertibleTo(VARTYPE self, VARTYPE other) {
+ // IDispatch inherits from IUnknown, so it's safe to
+ // upcast a VT_DISPATCH into an IUnknown*.
+ return (self == other) || (self == VT_DISPATCH && other == VT_UNKNOWN);
+}
+
+// VartypeToNativeType contains the underlying |Type| and offset to the
+// VARIANT union member related to the |ElementVartype| for simple types.
+template <VARTYPE ElementVartype>
+struct VartypeToNativeType final {};
+
+template <>
+struct VartypeToNativeType<VT_BOOL> final {
+ using Type = VARIANT_BOOL;
+ static constexpr VARIANT_BOOL VARIANT::*kMemberOffset = &VARIANT::boolVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_I1> final {
+ using Type = int8_t;
+ static constexpr CHAR VARIANT::*kMemberOffset = &VARIANT::cVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_UI1> final {
+ using Type = uint8_t;
+ static constexpr BYTE VARIANT::*kMemberOffset = &VARIANT::bVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_I2> final {
+ using Type = int16_t;
+ static constexpr SHORT VARIANT::*kMemberOffset = &VARIANT::iVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_UI2> final {
+ using Type = uint16_t;
+ static constexpr USHORT VARIANT::*kMemberOffset = &VARIANT::uiVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_I4> final {
+ using Type = int32_t;
+ static constexpr LONG VARIANT::*kMemberOffset = &VARIANT::lVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_UI4> final {
+ using Type = uint32_t;
+ static constexpr ULONG VARIANT::*kMemberOffset = &VARIANT::ulVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_I8> final {
+ using Type = int64_t;
+ static constexpr LONGLONG VARIANT::*kMemberOffset = &VARIANT::llVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_UI8> final {
+ using Type = uint64_t;
+ static constexpr ULONGLONG VARIANT::*kMemberOffset = &VARIANT::ullVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_R4> final {
+ using Type = float;
+ static constexpr FLOAT VARIANT::*kMemberOffset = &VARIANT::fltVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_R8> final {
+ using Type = double;
+ static constexpr DOUBLE VARIANT::*kMemberOffset = &VARIANT::dblVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_DATE> final {
+ using Type = DATE;
+ static constexpr DATE VARIANT::*kMemberOffset = &VARIANT::date;
+};
+
+template <>
+struct VartypeToNativeType<VT_BSTR> final {
+ using Type = BSTR;
+ static constexpr BSTR VARIANT::*kMemberOffset = &VARIANT::bstrVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_UNKNOWN> final {
+ using Type = IUnknown*;
+ static constexpr IUnknown* VARIANT::*kMemberOffset = &VARIANT::punkVal;
+};
+
+template <>
+struct VartypeToNativeType<VT_DISPATCH> final {
+ using Type = IDispatch*;
+ static constexpr IDispatch* VARIANT::*kMemberOffset = &VARIANT::pdispVal;
+};
+
+// VariantUtil contains the underlying |Type| and helper methods
+// related to the |ElementVartype| for simple types.
+template <VARTYPE ElementVartype>
+struct VariantUtil final {
+ using Type = typename VartypeToNativeType<ElementVartype>::Type;
+ static constexpr bool IsConvertibleTo(VARTYPE vartype) {
+ return VarTypeIsConvertibleTo(ElementVartype, vartype);
+ }
+ static constexpr bool IsConvertibleFrom(VARTYPE vartype) {
+ return VarTypeIsConvertibleTo(vartype, ElementVartype);
+ }
+ // Get the associated VARIANT union member value.
+ // Returns the value owned by the VARIANT without affecting the lifetime
+ // of managed contents.
+ // e.g. Does not affect IUnknown* reference counts or allocate a BSTR.
+ static Type RawGet(const VARIANT& var) {
+ DCHECK(IsConvertibleFrom(V_VT(&var)));
+ return var.*VartypeToNativeType<ElementVartype>::kMemberOffset;
+ }
+ // Set the associated VARIANT union member value.
+ // The caller is responsible for handling the lifetime of managed contents.
+ // e.g. Incrementing IUnknown* reference counts or allocating a BSTR.
+ static void RawSet(VARIANT* var, Type value) {
+ DCHECK(IsConvertibleTo(V_VT(var)));
+ var->*VartypeToNativeType<ElementVartype>::kMemberOffset = value;
+ }
+};
+
+} // namespace internal
+} // namespace win
+} // namespace base
+
+#endif // BASE_WIN_VARIANT_UTIL_H_
diff --git a/chromium/base/win/variant_util_unittest.cc b/chromium/base/win/variant_util_unittest.cc
new file mode 100644
index 00000000000..f15634afe60
--- /dev/null
+++ b/chromium/base/win/variant_util_unittest.cc
@@ -0,0 +1,266 @@
+// Copyright 2020 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdint.h>
+#include <wrl/client.h>
+#include <wrl/implements.h>
+
+#include <set>
+#include <utility>
+
+#include "base/stl_util.h"
+#include "base/win/dispatch_stub.h"
+#include "base/win/scoped_bstr.h"
+#include "base/win/variant_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+using base::win::internal::VariantUtil;
+using base::win::test::DispatchStub;
+
+namespace base {
+namespace win {
+
+namespace {
+
+static constexpr VARTYPE kSupportedVartypes[] = {
+ VT_BOOL, VT_I1, VT_UI1, VT_I2, VT_UI2, VT_I4, VT_UI4, VT_I8,
+ VT_UI8, VT_R4, VT_R8, VT_DATE, VT_BSTR, VT_UNKNOWN, VT_DISPATCH};
+
+template <VARTYPE ElementVartype>
+static bool TestIsConvertibleTo(const std::set<VARTYPE>& allowed_vartypes) {
+ for (VARTYPE vartype : kSupportedVartypes) {
+ if (VariantUtil<ElementVartype>::IsConvertibleTo(vartype) !=
+ base::Contains(allowed_vartypes, vartype)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+template <VARTYPE ElementVartype>
+static bool TestIsConvertibleFrom(const std::set<VARTYPE>& allowed_vartypes) {
+ for (VARTYPE vartype : kSupportedVartypes) {
+ if (VariantUtil<ElementVartype>::IsConvertibleFrom(vartype) !=
+ base::Contains(allowed_vartypes, vartype)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace
+
+TEST(VariantUtilTest, VariantTypeBool) {
+ VARIANT variant;
+ V_VT(&variant) = VT_BOOL;
+
+ VariantUtil<VT_BOOL>::RawSet(&variant, VARIANT_TRUE);
+ EXPECT_EQ(V_BOOL(&variant), VARIANT_TRUE);
+ EXPECT_EQ(VariantUtil<VT_BOOL>::RawGet(variant), VARIANT_TRUE);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_BOOL};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_BOOL>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_BOOL>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeI1) {
+ VARIANT variant;
+ V_VT(&variant) = VT_I1;
+
+ VariantUtil<VT_I1>::RawSet(&variant, 34);
+ EXPECT_EQ(V_I1(&variant), 34);
+ EXPECT_EQ(VariantUtil<VT_I1>::RawGet(variant), 34);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_I1};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_I1>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_I1>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeUI1) {
+ VARIANT variant;
+ V_VT(&variant) = VT_UI1;
+
+ VariantUtil<VT_UI1>::RawSet(&variant, 34U);
+ EXPECT_EQ(V_UI1(&variant), 34U);
+ EXPECT_EQ(VariantUtil<VT_UI1>::RawGet(variant), 34U);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_UI1};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_UI1>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_UI1>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeI2) {
+ VARIANT variant;
+ V_VT(&variant) = VT_I2;
+
+ VariantUtil<VT_I2>::RawSet(&variant, 8738);
+ EXPECT_EQ(V_I2(&variant), 8738);
+ EXPECT_EQ(VariantUtil<VT_I2>::RawGet(variant), 8738);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_I2};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_I2>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_I2>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeUI2) {
+ VARIANT variant;
+ V_VT(&variant) = VT_UI2;
+
+ VariantUtil<VT_UI2>::RawSet(&variant, 8738U);
+ EXPECT_EQ(V_UI2(&variant), 8738U);
+ EXPECT_EQ(VariantUtil<VT_UI2>::RawGet(variant), 8738U);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_UI2};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_UI2>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_UI2>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeI4) {
+ VARIANT variant;
+ V_VT(&variant) = VT_I4;
+
+ VariantUtil<VT_I4>::RawSet(&variant, 572662306);
+ EXPECT_EQ(V_I4(&variant), 572662306);
+ EXPECT_EQ(VariantUtil<VT_I4>::RawGet(variant), 572662306);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_I4};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_I4>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_I4>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeUI4) {
+ VARIANT variant;
+ V_VT(&variant) = VT_UI4;
+
+ VariantUtil<VT_UI4>::RawSet(&variant, 572662306U);
+ EXPECT_EQ(V_UI4(&variant), 572662306U);
+ EXPECT_EQ(VariantUtil<VT_UI4>::RawGet(variant), 572662306U);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_UI4};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_UI4>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_UI4>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeI8) {
+ VARIANT variant;
+ V_VT(&variant) = VT_I8;
+
+ VariantUtil<VT_I8>::RawSet(&variant, 2459565876494606882);
+ EXPECT_EQ(V_I8(&variant), 2459565876494606882);
+ EXPECT_EQ(VariantUtil<VT_I8>::RawGet(variant), 2459565876494606882);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_I8};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_I8>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_I8>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeUI8) {
+ VARIANT variant;
+ V_VT(&variant) = VT_UI8;
+
+ VariantUtil<VT_UI8>::RawSet(&variant, 2459565876494606882U);
+ EXPECT_EQ(V_UI8(&variant), 2459565876494606882U);
+ EXPECT_EQ(VariantUtil<VT_UI8>::RawGet(variant), 2459565876494606882U);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_UI8};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_UI8>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_UI8>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeR4) {
+ VARIANT variant;
+ V_VT(&variant) = VT_R4;
+
+ VariantUtil<VT_R4>::RawSet(&variant, 3.14159f);
+ EXPECT_EQ(V_R4(&variant), 3.14159f);
+ EXPECT_EQ(VariantUtil<VT_R4>::RawGet(variant), 3.14159f);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_R4};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_R4>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_R4>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeR8) {
+ VARIANT variant;
+ V_VT(&variant) = VT_R8;
+
+ VariantUtil<VT_R8>::RawSet(&variant, 3.14159);
+ EXPECT_EQ(V_R8(&variant), 3.14159);
+ EXPECT_EQ(VariantUtil<VT_R8>::RawGet(variant), 3.14159);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_R8};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_R8>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_R8>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeDate) {
+ SYSTEMTIME sys_time;
+ ::GetSystemTime(&sys_time);
+ DATE date;
+ ::SystemTimeToVariantTime(&sys_time, &date);
+
+ VARIANT variant;
+ V_VT(&variant) = VT_DATE;
+
+ VariantUtil<VT_DATE>::RawSet(&variant, date);
+ EXPECT_EQ(V_DATE(&variant), date);
+ EXPECT_EQ(VariantUtil<VT_DATE>::RawGet(variant), date);
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_DATE};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_DATE>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_DATE>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeBstr) {
+ ScopedBstr scoped_bstr;
+ scoped_bstr.Allocate(L"some text");
+
+ VARIANT variant;
+ V_VT(&variant) = VT_BSTR;
+
+ VariantUtil<VT_BSTR>::RawSet(&variant, scoped_bstr.Get());
+ EXPECT_EQ(V_BSTR(&variant), scoped_bstr.Get());
+ EXPECT_EQ(VariantUtil<VT_BSTR>::RawGet(variant), scoped_bstr.Get());
+
+ const std::set<VARTYPE> allowed_vartypes = {VT_BSTR};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_BSTR>(allowed_vartypes));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_BSTR>(allowed_vartypes));
+}
+
+TEST(VariantUtilTest, VariantTypeUnknown) {
+ Microsoft::WRL::ComPtr<IUnknown> unknown =
+ Microsoft::WRL::Make<DispatchStub>();
+
+ VARIANT variant;
+ V_VT(&variant) = VT_UNKNOWN;
+
+ VariantUtil<VT_UNKNOWN>::RawSet(&variant, unknown.Get());
+ EXPECT_EQ(V_UNKNOWN(&variant), unknown.Get());
+ EXPECT_EQ(VariantUtil<VT_UNKNOWN>::RawGet(variant), unknown.Get());
+
+ const std::set<VARTYPE> allow_convertible_to = {VT_UNKNOWN};
+ const std::set<VARTYPE> allow_convertible_from = {VT_UNKNOWN, VT_DISPATCH};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_UNKNOWN>(allow_convertible_to));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_UNKNOWN>(allow_convertible_from));
+}
+
+TEST(VariantUtilTest, VariantTypeDispatch) {
+ Microsoft::WRL::ComPtr<IDispatch> dispatch =
+ Microsoft::WRL::Make<DispatchStub>();
+
+ VARIANT variant;
+ V_VT(&variant) = VT_DISPATCH;
+
+ VariantUtil<VT_DISPATCH>::RawSet(&variant, dispatch.Get());
+ EXPECT_EQ(V_DISPATCH(&variant), dispatch.Get());
+ EXPECT_EQ(VariantUtil<VT_DISPATCH>::RawGet(variant), dispatch.Get());
+
+ const std::set<VARTYPE> allow_convertible_to = {VT_UNKNOWN, VT_DISPATCH};
+ const std::set<VARTYPE> allow_convertible_from = {VT_DISPATCH};
+ EXPECT_TRUE(TestIsConvertibleTo<VT_DISPATCH>(allow_convertible_to));
+ EXPECT_TRUE(TestIsConvertibleFrom<VT_DISPATCH>(allow_convertible_from));
+}
+
+} // namespace win
+} // namespace base
diff --git a/chromium/base/win/vector.h b/chromium/base/win/vector.h
index 1d96710b95d..654061fa3ec 100644
--- a/chromium/base/win/vector.h
+++ b/chromium/base/win/vector.h
@@ -15,8 +15,8 @@
#include <vector>
#include "base/base_export.h"
+#include "base/check_op.h"
#include "base/containers/flat_map.h"
-#include "base/logging.h"
#include "base/win/winrt_foundation_helpers.h"
namespace base {
diff --git a/chromium/base/win/vector_unittest.cc b/chromium/base/win/vector_unittest.cc
index b2cce5228f8..459e54026f6 100644
--- a/chromium/base/win/vector_unittest.cc
+++ b/chromium/base/win/vector_unittest.cc
@@ -39,6 +39,43 @@ struct __declspec(uuid("050e4b78-71b2-43ff-bf7c-f6ba589aced9"))
VectorChangedEventHandler<Uri*>
: VectorChangedEventHandler_impl<UriPtrAggregate> {};
+#ifdef NTDDI_WIN10_VB // Windows 10.0.19041
+// Specialization templates that used to be in windows.foundation.h, removed in
+// the 10.0.19041.0 SDK, so placed here instead.
+template <>
+struct __declspec(uuid("b939af5b-b45d-5489-9149-61442c1905fe")) IVector<int>
+ : IVector_impl<int> {};
+
+template <>
+struct __declspec(uuid("8d720cdf-3934-5d3f-9a55-40e8063b086a")) IVectorView<int>
+ : IVectorView_impl<int> {};
+
+template <>
+struct __declspec(uuid("bfea7f78-50c2-5f1d-a6ea-9e978d2699ff")) IIterator<int>
+ : IIterator_impl<int> {};
+
+template <>
+struct __declspec(uuid("81a643fb-f51c-5565-83c4-f96425777b66")) IIterable<int>
+ : IIterable_impl<int> {};
+
+template <>
+struct __declspec(uuid("0d82bd8d-fe62-5d67-a7b9-7886dd75bc4e")) IVector<Uri*>
+ : IVector_impl<Internal::AggregateType<Uri*, IUriRuntimeClass*>> {};
+
+template <>
+struct __declspec(uuid("4b8385bd-a2cd-5ff1-bf74-7ea580423e50"))
+ IVectorView<Uri*>
+ : IVectorView_impl<Internal::AggregateType<Uri*, IUriRuntimeClass*>> {};
+
+template <>
+struct __declspec(uuid("1c157d0f-5efe-5cec-bbd6-0c6ce9af07a5")) IIterator<Uri*>
+ : IIterator_impl<Internal::AggregateType<Uri*, IUriRuntimeClass*>> {};
+
+template <>
+struct __declspec(uuid("b0d63b78-78ad-5e31-b6d8-e32a0e16c447")) IIterable<Uri*>
+ : IIterable_impl<Internal::AggregateType<Uri*, IUriRuntimeClass*>> {};
+#endif
+
} // namespace Collections
} // namespace Foundation
} // namespace Windows
diff --git a/chromium/base/win/windows_version.cc b/chromium/base/win/windows_version.cc
index 0905602ee96..1c4580feab9 100644
--- a/chromium/base/win/windows_version.cc
+++ b/chromium/base/win/windows_version.cc
@@ -245,6 +245,8 @@ OSInfo::WOW64Status OSInfo::GetWOW64StatusForProcess(HANDLE process_handle) {
// static
Version OSInfo::MajorMinorBuildToVersion(int major, int minor, int build) {
if (major == 10) {
+ if (build >= 19041)
+ return Version::WIN10_20H1;
if (build >= 18362)
return Version::WIN10_19H1;
if (build >= 17763)
diff --git a/chromium/base/win/windows_version.h b/chromium/base/win/windows_version.h
index 7b70619e27f..95bd9fca4c4 100644
--- a/chromium/base/win/windows_version.h
+++ b/chromium/base/win/windows_version.h
@@ -50,6 +50,7 @@ enum class Version {
WIN10_RS4 = 12, // Redstone 4: Version 1803, Build 17134.
WIN10_RS5 = 13, // Redstone 5: Version 1809, Build 17763.
WIN10_19H1 = 14, // 19H1: Version 1903, Build 18362.
+ WIN10_20H1 = 15, // 20H1: Version 2004, Build 19041.
// On edit, update tools\metrics\histograms\enums.xml "WindowsVersion" and
// "GpuBlacklistFeatureTestResultsWindows2".
WIN_LAST, // Indicates error condition.
diff --git a/chromium/base/win/windows_version_unittest.cc b/chromium/base/win/windows_version_unittest.cc
index bd9d5048944..b5ca8b46080 100644
--- a/chromium/base/win/windows_version_unittest.cc
+++ b/chromium/base/win/windows_version_unittest.cc
@@ -19,7 +19,9 @@ TEST(WindowsVersion, GetVersionExAndKernelVersionMatch) {
TEST(OSInfo, MajorMinorBuildToVersion) {
EXPECT_EQ(OSInfo::MajorMinorBuildToVersion(10, 0, 32767),
- Version::WIN10_19H1);
+ Version::WIN10_20H1);
+ EXPECT_EQ(OSInfo::MajorMinorBuildToVersion(10, 0, 19041),
+ Version::WIN10_20H1);
EXPECT_EQ(OSInfo::MajorMinorBuildToVersion(10, 0, 18362),
Version::WIN10_19H1);
EXPECT_EQ(OSInfo::MajorMinorBuildToVersion(10, 0, 17763), Version::WIN10_RS5);